prompt
stringlengths 501
4.98M
| target
stringclasses 1
value | chunk_prompt
bool 1
class | kind
stringclasses 2
values | prob
float64 0.2
0.97
⌀ | path
stringlengths 10
394
⌀ | quality_prob
float64 0.4
0.99
⌀ | learning_prob
float64 0.15
1
⌀ | filename
stringlengths 4
221
⌀ |
---|---|---|---|---|---|---|---|---|
# Feature Engineering in Keras.
Let's start off with the Python imports that we need.
```
import os, json, math, shutil
import numpy as np
import tensorflow as tf
print(tf.__version__)
# Note that this cell is special. It's got a tag (you can view tags by clicking on the wrench icon on the left menu in Jupyter)
# These are parameters that we will configure so that we can schedule this notebook
DATADIR = '../data'
OUTDIR = './trained_model'
EXPORT_DIR = os.path.join(OUTDIR,'export/savedmodel')
NBUCKETS = 10 # for feature crossing
TRAIN_BATCH_SIZE = 32
NUM_TRAIN_EXAMPLES = 10000 * 5 # remember the training dataset repeats, so this will wrap around
NUM_EVALS = 5 # evaluate this many times
NUM_EVAL_EXAMPLES = 10000 # enough to get a reasonable sample, but no so much that it slows down
```
## Locating the CSV files
We will start with the CSV files that we wrote out in the [first notebook](../01_explore/taxifare.iypnb) of this sequence. Just so you don't have to run the notebook, we saved a copy in ../data
```
if DATADIR[:5] == 'gs://':
!gsutil ls $DATADIR/*.csv
else:
!ls -l $DATADIR/*.csv
```
## Use tf.data to read the CSV files
We wrote these cells in the [third notebook](../03_tfdata/input_pipeline.ipynb) of this sequence.
```
CSV_COLUMNS = ['fare_amount', 'pickup_datetime',
'pickup_longitude', 'pickup_latitude',
'dropoff_longitude', 'dropoff_latitude',
'passenger_count', 'key']
LABEL_COLUMN = 'fare_amount'
DEFAULTS = [[0.0],['na'],[0.0],[0.0],[0.0],[0.0],[0.0],['na']]
def features_and_labels(row_data):
for unwanted_col in ['key']: # keep the pickup_datetime!
row_data.pop(unwanted_col)
label = row_data.pop(LABEL_COLUMN)
return row_data, label # features, label
# load the training data
def load_dataset(pattern, batch_size=1, mode=tf.estimator.ModeKeys.EVAL):
pattern = '{}/{}'.format(DATADIR, pattern)
dataset = (tf.data.experimental.make_csv_dataset(pattern, batch_size, CSV_COLUMNS, DEFAULTS)
.map(features_and_labels) # features, label
.cache())
if mode == tf.estimator.ModeKeys.TRAIN:
print("Repeating training dataset indefinitely")
dataset = dataset.shuffle(1000).repeat()
dataset = dataset.prefetch(1) # take advantage of multi-threading; 1=AUTOTUNE
return dataset
import datetime
# Python 3.5 doesn't handle timezones of the form 00:00, only 0000
s = '2012-07-05 14:18:00+00:00'
print(s)
ts = datetime.datetime.strptime(s.replace(':',''), "%Y-%m-%d %H%M%S%z")
print(ts.weekday())
DAYS = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
print(DAYS[ts.weekday()])
s = tf.constant('2012-07-05 14:18:00+00:00').numpy().decode('utf-8')
print(s)
ts = datetime.datetime.strptime(s.replace(':',''), "%Y-%m-%d %H%M%S%z")
print(ts.weekday())
DAYS = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
print(DAYS[ts.weekday()])
## Add transformations
def euclidean(params):
lon1, lat1, lon2, lat2 = params
londiff = lon2 - lon1
latdiff = lat2 - lat1
return tf.sqrt(londiff*londiff + latdiff*latdiff)
DAYS = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
def get_dayofweek(s):
# Python 3.5 doesn't handle timezones of the form 00:00, only 0000
s1 = s.numpy().decode('utf-8') # get Python string
ts = datetime.datetime.strptime(s1.replace(':',''), "%Y-%m-%d %H%M%S%z")
return DAYS[ts.weekday()]
def dayofweek(ts_in):
return tf.map_fn(
lambda s: tf.py_function(get_dayofweek, inp=[s], Tout=tf.string),
ts_in
)
def transform(inputs, NUMERIC_COLS, STRING_COLS):
transformed = inputs.copy()
print("BEFORE TRANSFORMATION")
print("INPUTS:", inputs.keys())
print(inputs['pickup_longitude'].shape)
feature_columns = {
colname: tf.feature_column.numeric_column(colname)
for colname in NUMERIC_COLS
}
# scale the lat, lon values to be in 0, 1
for lon_col in ['pickup_longitude', 'dropoff_longitude']: # in range -70 to -78
transformed[lon_col] = tf.keras.layers.Lambda(
lambda x: (x+78)/8.0,
name='scale_{}'.format(lon_col)
)(inputs[lon_col])
for lat_col in ['pickup_latitude', 'dropoff_latitude']: # in range 37 to 45
transformed[lat_col] = tf.keras.layers.Lambda(
lambda x: (x-37)/8.0,
name='scale_{}'.format(lat_col)
)(inputs[lat_col])
# add Euclidean distance. Doesn't have to be accurate calculation because NN will calibrate it
transformed['euclidean'] = tf.keras.layers.Lambda(euclidean, name='euclidean')([
inputs['pickup_longitude'],
inputs['pickup_latitude'],
inputs['dropoff_longitude'],
inputs['dropoff_latitude']
])
feature_columns['euclidean'] = tf.feature_column.numeric_column('euclidean')
# hour of day from timestamp of form '2010-02-08 09:17:00+00:00'
transformed['hourofday'] = tf.keras.layers.Lambda(
lambda x: tf.strings.to_number(tf.strings.substr(x, 11, 2), out_type=tf.dtypes.int32),
name='hourofday'
)(inputs['pickup_datetime'])
feature_columns['hourofday'] = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_identity('hourofday', num_buckets=24))
# day of week is hard because there is no TensorFlow function for date handling
transformed['dayofweek'] = tf.keras.layers.Lambda(
lambda x: dayofweek(x),
name='dayofweek_pyfun'
)(inputs['pickup_datetime'])
transformed['dayofweek'] = tf.keras.layers.Reshape((), name='dayofweek')(transformed['dayofweek'])
feature_columns['dayofweek'] = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'dayofweek', vocabulary_list = DAYS))
# featurecross lat, lon into nxn buckets, then embed
# b/135479527
#nbuckets = NBUCKETS
#latbuckets = np.linspace(0, 1, nbuckets).tolist()
#lonbuckets = np.linspace(0, 1, nbuckets).tolist()
#b_plat = tf.feature_column.bucketized_column(feature_columns['pickup_latitude'], latbuckets)
#b_dlat = tf.feature_column.bucketized_column(feature_columns['dropoff_latitude'], latbuckets)
#b_plon = tf.feature_column.bucketized_column(feature_columns['pickup_longitude'], lonbuckets)
#b_dlon = tf.feature_column.bucketized_column(feature_columns['dropoff_longitude'], lonbuckets)
#ploc = tf.feature_column.crossed_column([b_plat, b_plon], nbuckets * nbuckets)
#dloc = tf.feature_column.crossed_column([b_dlat, b_dlon], nbuckets * nbuckets)
#pd_pair = tf.feature_column.crossed_column([ploc, dloc], nbuckets ** 4 )
#feature_columns['pickup_and_dropoff'] = tf.feature_column.embedding_column(pd_pair, 100)
print("AFTER TRANSFORMATION")
print("TRANSFORMED:", transformed.keys())
print("FEATURES", feature_columns.keys())
return transformed, feature_columns
def rmse(y_true, y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
def build_dnn_model():
# input layer is all float except for pickup_datetime which is a string
STRING_COLS = ['pickup_datetime']
NUMERIC_COLS = set(CSV_COLUMNS) - set([LABEL_COLUMN, 'key']) - set(STRING_COLS)
print(STRING_COLS)
print(NUMERIC_COLS)
inputs = {
colname : tf.keras.layers.Input(name=colname, shape=(), dtype='float32')
for colname in NUMERIC_COLS
}
inputs.update({
colname : tf.keras.layers.Input(name=colname, shape=(), dtype='string')
for colname in STRING_COLS
})
# transforms
transformed, feature_columns = transform(inputs, NUMERIC_COLS, STRING_COLS)
dnn_inputs = tf.keras.layers.DenseFeatures(feature_columns.values())(transformed)
# two hidden layers of [32, 8] just in like the BQML DNN
h1 = tf.keras.layers.Dense(32, activation='relu', name='h1')(dnn_inputs)
h2 = tf.keras.layers.Dense(8, activation='relu', name='h2')(h1)
# final output would normally have a linear activation because this is regression
# However, we know something about the taxi problem (fares are +ve and tend to be below $60).
# Use that here. (You can verify by running this query):
# SELECT APPROX_QUANTILES(fare_amount, 100) FROM serverlessml.cleaned_training_data
# b/136476088
#fare_thresh = lambda x: 60 * tf.keras.activations.relu(x)
#output = tf.keras.layers.Dense(1, activation=fare_thresh, name='fare')(h2)
output = tf.keras.layers.Dense(1, name='fare')(h2)
model = tf.keras.models.Model(inputs, output)
model.compile(optimizer='adam', loss='mse', metrics=[rmse, 'mse'])
return model
model = build_dnn_model()
print(model.summary())
tf.keras.utils.plot_model(model, 'dnn_model.png', show_shapes=False, rankdir='LR')
```
## Train model
To train the model, call model.fit()
```
trainds = load_dataset('taxi-train*', TRAIN_BATCH_SIZE, tf.estimator.ModeKeys.TRAIN)
evalds = load_dataset('taxi-valid*', 1000, tf.estimator.ModeKeys.EVAL).take(NUM_EVAL_EXAMPLES//10000) # evaluate on 1/10 final evaluation set
steps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS)
shutil.rmtree('{}/checkpoints/'.format(OUTDIR), ignore_errors=True)
checkpoint_path = '{}/checkpoints/taxi'.format(OUTDIR)
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
verbose=1)
history = model.fit(trainds,
validation_data=evalds,
epochs=NUM_EVALS,
steps_per_epoch=steps_per_epoch,
callbacks=[cp_callback])
# plot
import matplotlib.pyplot as plt
nrows = 1
ncols = 2
fig = plt.figure(figsize=(10, 5))
for idx, key in enumerate(['loss', 'rmse']):
ax = fig.add_subplot(nrows, ncols, idx+1)
plt.plot(history.history[key])
plt.plot(history.history['val_{}'.format(key)])
plt.title('model {}'.format(key))
plt.ylabel(key)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left');
```
## Evaluate over full validation dataset
Let's evaluate over the full validation dataset (provided the validation dataset is large enough).
```
evalds = load_dataset('taxi-valid*', 1000, tf.estimator.ModeKeys.EVAL).take(NUM_EVAL_EXAMPLES//1000)
model.evaluate(evalds)
```
Yippee! We are now at under 4 dollars RMSE!
## Predict with model
This is how to predict with this model:
```
model.predict({
'pickup_longitude': tf.convert_to_tensor([-73.982683]),
'pickup_latitude': tf.convert_to_tensor([40.742104]),
'dropoff_longitude': tf.convert_to_tensor([-73.983766]),
'dropoff_latitude': tf.convert_to_tensor([40.755174]),
'passenger_count': tf.convert_to_tensor([3.0]),
'pickup_datetime': tf.convert_to_tensor(['2010-02-08 09:17:00+00:00'], dtype=tf.string),
})
```
However, this is not realistic, because we can't expect client code to have a model object in memory. We'll have to export our model to a file, and expect client code to instantiate the model from that exported file.
## Export model
Let's export the model to a TensorFlow SavedModel format. Once we have a model in this format, we have lots of ways to "serve" the model, from a web application, from JavaScript, from mobile applications, etc.
```
export_dir = os.path.join(EXPORT_DIR, datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
tf.keras.experimental.export_saved_model(model, export_dir)
print(export_dir)
# Recreate the exact same model
new_model = tf.keras.experimental.load_from_saved_model(export_dir)
# try predicting with this model
new_model.predict({
'pickup_longitude': tf.convert_to_tensor([-73.982683]),
'pickup_latitude': tf.convert_to_tensor([40.742104]),
'dropoff_longitude': tf.convert_to_tensor([-73.983766]),
'dropoff_latitude': tf.convert_to_tensor([40.755174]),
'passenger_count': tf.convert_to_tensor([3.0]),
'pickup_datetime': tf.convert_to_tensor(['2010-02-08 09:17:00+00:00'], dtype=tf.string),
})
```
In this notebook, we have looked at how to implement a custom Keras model using feature columns.
Copyright 2019 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| true |
code
| 0.394463 | null | null | null | null |
|
# Transporter statistics and taxonomic profiles
## Overview
In this notebook some overview statistics of the datasets are computed and taxonomic profiles investigated. The notebook uses data produced by running the [01.process_data](01.process_data.ipynb) notebook.
```
import numpy as np
import pandas as pd
import seaborn as sns
import glob
import os
import matplotlib.pyplot as plt, matplotlib
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
plt.style.use('ggplot')
def make_tax_table(df,name="",rank="superkingdom"):
df_t = df.groupby(rank).sum()
df_tp = df_t.div(df_t.sum())*100
df_tp_mean = df_tp.mean(axis=1)
df_tp_max = df_tp.max(axis=1)
df_tp_min = df_tp.min(axis=1)
df_tp_sd = df_tp.std(axis=1)
table = pd.concat([df_tp_mean,df_tp_max,df_tp_min,df_tp_sd],axis=1)
table.columns = [name+" mean(%)",name+" max(%)",name+" min(%)",name+" std"]
table.rename(index=lambda x: x.split("_")[0], inplace=True)
return table
```
## Load the data
```
transinfo = pd.read_csv("selected_transporters_classified.tab", header=0, sep="\t", index_col=0)
transinfo.head()
```
Read gene abundance values with taxonomic annotations.
```
mg_cov = pd.read_table("data/mg/all_genes.tpm.taxonomy.tsv.gz", header=0, sep="\t", index_col=0)
mt_cov = pd.read_table("data/mt/all_genes.tpm.taxonomy.tsv.gz", header=0, sep="\t", index_col=0)
```
Read orf level transporter data.
```
mg_transcov = pd.read_table("results/mg/all_transporters.tpm.taxonomy.tsv.gz", header=0, sep="\t", index_col=0)
mt_transcov = pd.read_table("results/mt/all_transporters.tpm.taxonomy.tsv.gz", header=0, sep="\t", index_col=0)
mg_select_transcov = pd.read_table("results/mg/select_trans_genes.tpm.tsv", header=0, sep="\t", index_col=0)
mt_select_transcov = pd.read_table("results/mt/select_trans_genes.tpm.tsv", header=0, sep="\t", index_col=0)
```
Read transporter abundances.
```
mg_trans = pd.read_csv("results/mg/all_trans.tpm.tsv", header=0, sep="\t", index_col=0)
mt_trans = pd.read_csv("results/mt/all_trans.tpm.tsv", header=0, sep="\t", index_col=0)
```
## Generate taxonomic overview table
```
mg_tax_table = make_tax_table(mg_cov,name="MG ")
mg_tax_table_cyano = make_tax_table(mg_cov,name="MG ",rank="phylum").loc["Cyanobacteria"]
mg_tax_table = pd.concat([mg_tax_table,pd.DataFrame(mg_tax_table_cyano).T])
mg_tax_table
mt_tax_table = make_tax_table(mt_cov,name="MT ")
mt_tax_table_cyano = make_tax_table(mt_cov,name="MT ",rank="phylum").loc["Cyanobacteria"]
mt_tax_table = pd.concat([mt_tax_table,pd.DataFrame(mt_tax_table_cyano).T])
mt_tax_table
```
Concatenate overview tables. This is **Table 2** in the paper.
```
tax_table = pd.concat([mg_tax_table,mt_tax_table],axis=1).round(2)
tax_table.to_csv("results/Table2.tsv",sep="\t")
```
## Generate general overview of transporters
Make table with number of ORFs, ORFs classified as transporters, min, mean and max coverage for transporter ORFs.
```
num_genes = len(mg_cov)
gene_lengths = pd.read_table("data/mg/all_genes.tpm.tsv.gz", usecols=[1])
gene_lengths = np.round(gene_lengths.mean())
def generate_transporter_stats(df):
# Number of transporter genes (genes with sum > 0)
num_trans_genes = len(df.loc[df.groupby(level=0).sum().sum(axis=1)>0])
# Percent of transporter genes
num_trans_genes_p = np.round((num_trans_genes / float(num_genes))*100,2)
# Mean total coverage for transporter genes across the samples
transcov_mean = np.round(((df.groupby(level=0).sum().sum().mean()) / 1e6)*100,2)
# Minimum total coverage for transporter genes across the samples
transcov_min = np.round(((df.groupby(level=0).sum().sum().min()) / 1e6)*100,2)
# Maximum ...
transcov_max = np.round(((df.groupby(level=0).sum().sum().max()) / 1e6)*100,2)
# Standard dev
transcov_std = np.round(((df.groupby(level=0).sum().sum() / 1e6)*100).std(),2)
return num_trans_genes, num_trans_genes_p, transcov_mean, transcov_min, transcov_max, transcov_std
mg_num_trans_genes, mg_num_trans_genes_p, mg_transcov_mean, mg_transcov_min, mg_transcov_max, mg_transcov_std = generate_transporter_stats(mg_transcov)
mt_num_trans_genes, mt_num_trans_genes_p, mt_transcov_mean, mt_transcov_min, mt_transcov_max, mt_transcov_std = generate_transporter_stats(mt_transcov)
```
Create table with transporter statistics for MG and MT datasets (**Table 3** in the paper).
```
stats_df = pd.DataFrame(data={
"Transporter genes": ["{} ({}%)".format(mg_num_trans_genes,mg_num_trans_genes_p),"{} ({}%)".format(mt_num_trans_genes,mt_num_trans_genes_p)],
"Transporter mean": ["{}%".format(mg_transcov_mean),"{}%".format(mt_transcov_mean)],
"Transporter min": ["{}%".format(mg_transcov_min),"{}%".format(mt_transcov_min)],
"Transporter max": ["{}%".format(mg_transcov_max),"{}%".format(mt_transcov_max)],
"Transporter std": ["{}%".format(mg_transcov_std),"{}%".format(mt_transcov_std)]},index=["MG","MT"]).T
stats_df.to_csv("results/Table3.tsv",sep="\t")
stats_df
```
Do the same with the selected transporters.
```
mg_select_num_trans_genes, mg_select_num_trans_genes_p, mg_select_transcov_mean, mg_select_transcov_min, mg_select_transcov_max, mg_select_transcov_std = generate_transporter_stats(mg_select_transcov)
mt_select_num_trans_genes, mt_select_num_trans_genes_p, mt_select_transcov_mean, mt_select_transcov_min, mt_select_transcov_max, mt_select_transcov_std = generate_transporter_stats(mt_select_transcov)
select_stats_df = pd.DataFrame(data={
"Selected transporter genes": ["{} ({}%)".format(mg_select_num_trans_genes,mg_select_num_trans_genes_p),"{} ({}%)".format(mt_select_num_trans_genes,mt_select_num_trans_genes_p)],
"Selected transporter mean": ["{}%".format(mg_select_transcov_mean),"{}%".format(mt_select_transcov_mean)],
"Selected transporter min": ["{}%".format(mg_select_transcov_min),"{}%".format(mt_select_transcov_min)],
"Selected transporter max": ["{}%".format(mg_select_transcov_max),"{}%".format(mt_select_transcov_max)],
"Selected transporter std": ["{}%".format(mg_select_transcov_std),"{}%".format(mt_select_transcov_std)]},index=["mg_select","mt_select"]).T
select_stats_df.to_csv("results/selected_transporter_stats.tab",sep="\t")
select_stats_df
```
## Generate kingdom/phylum level taxonomic plots
```
def get_euk_taxa(taxa, df, rank):
euk_taxa = []
for t in taxa:
k = df.loc[df[rank]==t, "superkingdom"].unique()[0]
if k=="Eukaryota":
euk_taxa.append(t)
return euk_taxa
def set_euk_hatches(ax):
for patch in ax.patches:
t = color2taxmap[patch.properties()['facecolor'][0:-1]]
if t in euk_taxa:
patch.set_hatch("////")
```
Generate profiles for metagenomes.
```
# Get sum of abundances at superkingdom level
mg_k = mg_cov.groupby("superkingdom").sum()
# Normalize to %
mg_kn = mg_k.div(mg_k.sum())*100
mg_kn = mg_kn.loc[["Archaea","Bacteria","Eukaryota","Viruses","Unclassified.sequences","other sequences"]]
mg_kn = mg_kn.loc[mg_kn.sum(axis=1).sort_values(ascending=False).index]
# Swtich Proteobacterial classes to phylum
mg_cov.loc[mg_cov.phylum=="Proteobacteria","phylum"] = mg_cov.loc[mg_cov.phylum=="Proteobacteria","class"]
# Normalize at phylum level
mg_p = mg_cov.groupby("phylum").sum()
mg_pn = mg_p.div(mg_p.sum())*100
_ = mg_pn.mean(axis=1).sort_values(ascending=False)
_.loc[~_.index.str.contains("Unclassified")].head(8)
```
Create the taxonomic overview of the 7 most abundant phyla in the metagenomic dataset. This is **Figure 1** in the paper.
```
select_taxa = ["Verrucomicrobia","Actinobacteria","Alphaproteobacteria","Gammaproteobacteria","Cyanobacteria","Bacteroidetes","Betaproteobacteria"]
# Sort taxa by mean abundance
taxa_order = mg_pn.loc[select_taxa].mean(axis=1).sort_values(ascending=False).index
ax = mg_pn.loc[taxa_order].T.plot(kind="area",stacked=True)
ax.legend(bbox_to_anchor=(1,1))
ax.set_ylabel("% normalized abundance");
xticks = list(range(0,33))
ax.set_xticks(xticks);
ax.set_xticklabels(mg_pn.columns, rotation=90);
plt.savefig("results/Figure1.svg", bbox_inches="tight")
```
Generate profiles for metatranscriptomes.
```
# Get sum of abundances at superkingdom level
mt_k = mt_cov.groupby("superkingdom").sum()
# Normalize to %
mt_kn = mt_k.div(mt_k.sum())*100
mt_kn = mt_kn.loc[["Archaea","Bacteria","Eukaryota","Viruses","Unclassified.sequences","other sequences"]]
mt_kn = mt_kn.loc[mt_kn.sum(axis=1).sort_values(ascending=False).index]
# Swtich Proteobacterial classes to phylum
mt_cov.loc[mt_cov.phylum=="Proteobacteria","phylum"] = mt_cov.loc[mt_cov.phylum=="Proteobacteria","class"]
# Normalize at phylum level
mt_p = mt_cov.groupby("phylum").sum()
mt_pn = mt_p.div(mt_p.sum())*100
```
Get common taxa for both datasets by taking the union of the top 15 most abundant taxa
```
mg_taxa = mg_pn.mean(axis=1).sort_values(ascending=False).head(15).index
mt_taxa = mt_pn.mean(axis=1).sort_values(ascending=False).head(15).index
taxa = set(mg_taxa).union(set(mt_taxa))
```
Single out eukaryotic taxa
```
euk_taxa = get_euk_taxa(taxa, mg_cov, rank="phylum")
```
Sort the taxa by their mean abundance in the mg data
```
taxa_sort = mg_pn.loc[taxa].mean(axis=1).sort_values(ascending=False).index
taxa_colors = dict(zip(taxa_sort,(sns.color_palette("Set1",7)+sns.color_palette("Set2",7)+sns.color_palette("Dark2",5))))
color2taxmap = {}
for t, c in taxa_colors.items():
color2taxmap[c] = t
```
Plot metagenome profiles
```
fig,axes = plt.subplots(ncols=2,nrows=1, figsize=(12,4))
# Plot the kingdoms
ax1 = mg_kn.T.plot(kind="bar",stacked=True,ax=axes[0])
ax1.legend(loc="lower right",fontsize="small")
ax1.set_ylabel("%")
# Plot the phyla
ax2 = mg_pn.loc[taxa_sort].T.plot(kind="bar",stacked=True, color=[taxa_colors[tax] for tax in taxa_sort], legend=None,ax=axes[1])
set_euk_hatches(ax2)
ax2.set_ylabel("%")
ax2.legend(bbox_to_anchor=(1,1),fontsize="small");
```
Plot metatranscriptome profiles
```
fig,axes = plt.subplots(ncols=2,nrows=1, figsize=(12,4))
# Plot the kingdoms
ax1 = mt_kn.T.plot(kind="bar",stacked=True,ax=axes[0])
ax1.legend(loc="lower center",fontsize="small")
ax1.set_ylabel("%")
# Plot the phyla
ax2 = mt_pn.loc[taxa_sort].T.plot(kind="bar",stacked=True, color=[taxa_colors[tax] for tax in taxa_sort], legend=None,ax=axes[1])
set_euk_hatches(ax2)
ax2.set_ylabel("%")
ax2.legend(bbox_to_anchor=(1,1),fontsize="small");
```
Calculate total number of orders.
```
mg_ordersum = mg_cov.groupby("order").sum()
mg_total_orders = len(mg_ordersum.loc[mg_ordersum.sum(axis=1)>0])
print("{} orders in the entire mg dataset".format(mg_total_orders))
mg_trans_ordersum = mg_select_transcov.groupby("order").sum()
mg_trans_total_orders = len(mg_trans_ordersum.loc[mg_trans_ordersum.sum(axis=1)>0])
print("{} orders in the transporter mg dataset".format(mg_trans_total_orders))
mt_ordersum = mt_cov.groupby("order").sum()
mt_total_orders = len(mt_ordersum.loc[mt_ordersum.sum(axis=1)>0])
print("{} orders in the entire mt dataset".format(mt_total_orders))
mt_trans_ordersum = mt_select_transcov.groupby("order").sum()
mt_trans_total_orders = len(mt_trans_ordersum.loc[mt_trans_ordersum.sum(axis=1)>0])
print("{} orders in the transporter mt dataset".format(mt_trans_total_orders))
```
## Calculate and plot distributions per taxonomic subsets.
Extract ORFs belonging to each subset.
```
cya_orfs = mg_transcov.loc[mg_transcov.phylum=="Cyanobacteria"].index
bac_orfs = mg_transcov.loc[(mg_transcov.phylum!="Cyanobacteria")&(mg_transcov.superkingdom=="Bacteria")].index
euk_orfs = mg_transcov.loc[mg_transcov.superkingdom=="Eukaryota"].index
```
Calculate contribution of taxonomic subsets to the identified transporters.
```
taxgroup_df = pd.DataFrame(columns=["MG","MT"],index=["Bacteria","Cyanobacteria","Eukaryota"])
mg_all_transcov_info = pd.merge(transinfo,mg_transcov,left_index=True,right_on="transporter")
mg_bac_transcov_info = pd.merge(transinfo,mg_transcov.loc[bac_orfs],left_index=True,right_on="transporter")
mg_euk_transcov_info = pd.merge(transinfo,mg_transcov.loc[euk_orfs],left_index=True,right_on="transporter")
mg_cya_transcov_info = pd.merge(transinfo,mg_transcov.loc[cya_orfs],left_index=True,right_on="transporter")
mt_all_transcov_info = pd.merge(transinfo,mt_transcov,left_index=True,right_on="transporter")
mt_bac_transcov_info = pd.merge(transinfo,mt_transcov.loc[bac_orfs],left_index=True,right_on="transporter")
mt_euk_transcov_info = pd.merge(transinfo,mt_transcov.loc[euk_orfs],left_index=True,right_on="transporter")
mt_cya_transcov_info = pd.merge(transinfo,mt_transcov.loc[cya_orfs],left_index=True,right_on="transporter")
mg_cya_part = mg_cya_transcov_info.groupby("transporter").sum().sum().div(mg_all_transcov_info.groupby("transporter").sum().sum())*100
mi,ma,me = mg_cya_part.min(),mg_cya_part.max(),mg_cya_part.mean()
taxgroup_df.loc["Cyanobacteria","MG"] = "{}% ({}-{}%)".format(round(me,2),round(mi,2),round(ma,2))
mg_euk_part = mg_euk_transcov_info.groupby("transporter").sum().sum().div(mg_all_transcov_info.groupby("transporter").sum().sum())*100
mi,ma,me = mg_euk_part.min(),mg_euk_part.max(),mg_euk_part.mean()
taxgroup_df.loc["Eukaryota","MG"] = "{}% ({}-{}%)".format(round(me,2),round(mi,2),round(ma,2))
mg_bac_part = mg_bac_transcov_info.groupby("transporter").sum().sum().div(mg_all_transcov_info.groupby("transporter").sum().sum())*100
mi,ma,me = mg_bac_part.min(),mg_bac_part.max(),mg_bac_part.mean()
taxgroup_df.loc["Bacteria","MG"] = "{}% ({}-{}%)".format(round(me,2),round(mi,2),round(ma,2))
mt_cya_part = mt_cya_transcov_info.groupby("transporter").sum().sum().div(mt_all_transcov_info.groupby("transporter").sum().sum())*100
mi,ma,me = mt_cya_part.min(),mt_cya_part.max(),mt_cya_part.mean()
taxgroup_df.loc["Cyanobacteria","MT"] = "{}% ({}-{}%)".format(round(me,2),round(mi,2),round(ma,2))
mt_euk_part = mt_euk_transcov_info.groupby("transporter").sum().sum().div(mt_all_transcov_info.groupby("transporter").sum().sum())*100
mi,ma,me = mt_euk_part.min(),mt_euk_part.max(),mt_euk_part.mean()
taxgroup_df.loc["Eukaryota","MT"] = "{}% ({}-{}%)".format(round(me,2),round(mi,2),round(ma,2))
mt_bac_part = mt_bac_transcov_info.groupby("transporter").sum().sum().div(mt_all_transcov_info.groupby("transporter").sum().sum())*100
mi,ma,me = mt_bac_part.min(),mt_bac_part.max(),mt_bac_part.mean()
taxgroup_df.loc["Bacteria","MT"] = "{}% ({}-{}%)".format(round(me,2),round(mi,2),round(ma,2))
taxgroup_df
```
### Taxonomic subsets per substrate category
```
def calculate_mean_total_substrate_subset(df,df_sum,subset,var_name="Sample",value_name="%"):
cols = ["fam","transporter","substrate_category","name"]
# Sum to protein family
x = df.groupby(["fam","transporter","substrate_category","name"]).sum().reset_index()
cols.pop(cols.index("fam"))
# Calculate mean of transporters
x.groupby(cols).mean().reset_index()
xt = x.copy()
# Normalize to sum of all transporters
x.iloc[:,4:] = x.iloc[:,4:].div(df_sum)*100
# Sum percent to substrate category
x = x.groupby("substrate_category").sum()
# Melt dataframe and add subset column
x["substrate_category"] = x.index
xm = pd.melt(x,id_vars="substrate_category", var_name="Sample",value_name="%")
xm = xm.assign(Subset=pd.Series(data=subset,index=xm.index))
return xm,xt
# Get contribution of bacterial transporters to total for substrate category
mg_bac_cat_melt,mg_bac_cat = calculate_mean_total_substrate_subset(mg_bac_transcov_info,mg_trans.sum(),"Bacteria")
# Get contribution of eukaryotic transporters to total for substrate category
mg_euk_cat_melt,mg_euk_cat = calculate_mean_total_substrate_subset(mg_euk_transcov_info,mg_trans.sum(),"Eukaryota")
# Get contribution of cyanobacterial transporters to total for substrate category
mg_cya_cat_melt,mg_cya_cat = calculate_mean_total_substrate_subset(mg_cya_transcov_info,mg_trans.sum(),"Cyanobacteria")
# Get contribution of bacterial transporters to total for substrate category
mt_bac_cat_melt,mt_bac_cat = calculate_mean_total_substrate_subset(mt_bac_transcov_info,mt_trans.sum(),"Bacteria")
# Get contribution of eukaryotic transporters to total for substrate category
mt_euk_cat_melt,mt_euk_cat = calculate_mean_total_substrate_subset(mt_euk_transcov_info,mt_trans.sum(),"Eukaryota")
# Get contribution of cyanobacterial transporters to total for substrate category
mt_cya_cat_melt,mt_cya_cat = calculate_mean_total_substrate_subset(mt_cya_transcov_info,mt_trans.sum(),"Cyanobacteria")
# Concatenate dataframes for metagenomes
mg_subsets_cat = pd.concat([pd.concat([mg_bac_cat_melt,mg_euk_cat_melt]),mg_cya_cat_melt])
mg_subsets_cat = mg_subsets_cat.assign(dataset=pd.Series(data="MG",index=mg_subsets_cat.index))
# Concatenate dataframes for metagenomes
mt_subsets_cat = pd.concat([pd.concat([mt_bac_cat_melt,mt_euk_cat_melt]),mt_cya_cat_melt])
mt_subsets_cat = mt_subsets_cat.assign(dataset=pd.Series(data="MT",index=mt_subsets_cat.index))
```
**Concatenate MG and MT**
```
subsets_cat = pd.concat([mg_subsets_cat,mt_subsets_cat])
```
### Plot substrate category distributions
```
cats = transinfo.substrate_category.unique()
# Update Eukaryota subset label
subsets_cat.loc[subsets_cat.Subset=="Eukaryota","Subset"] = ["Picoeukaryota"]*len(subsets_cat.loc[subsets_cat.Subset=="Eukaryota","Subset"])
sns.set(font_scale=0.8)
ax = sns.catplot(kind="bar",data=subsets_cat.loc[subsets_cat.substrate_category.isin(cats)],hue="dataset",
y="substrate_category", x="%", col="Subset",
errwidth=1, height=3, palette="Set1", aspect=1)
ax.set_titles("{col_name}")
ax.set_axis_labels("% of normalized transporter abundance","Substrate category")
plt.savefig("results/Figure3A.svg", bbox_inches="tight")
_ = mg_transcov.groupby(["fam","transporter"]).sum().reset_index()
_ = _.groupby("transporter").mean()
_ = pd.merge(transinfo, _, left_index=True, right_index=True)
_ = _.loc[_.substrate_category=="Carbohydrate"].groupby("name").sum()
(_.div(_.sum())*100).mean(axis=1).sort_values(ascending=False).head(3).sum()
```
| true |
code
| 0.317674 | null | null | null | null |
|
# Ridge Regression
## Goal
Given a dataset with continuous inputs and corresponding outputs, the objective is to find a function that matches the two as accurately as possible. This function is usually called the target function.
In the case of a ridge regression, the idea is to modellize the target function as a linear sum of functions (that can be non linear and are generally not). Thus, with f the target function, $\phi_i$ a base function and $w_i$ its weight in the linear sum, we suppose that:
$$f(x) = \sum w_i \phi_i(x)$$
The parameters that must be found are the weights $w_i$ for each base function $\phi_i$. This is done by minimizing the [root mean square error](https://en.wikipedia.org/wiki/Root-mean-square_deviation).
There is a closed solution to this problem given by the following equation $W = (\Phi^T \Phi)^{-1} \Phi^T Y$ with:
- $d$ the number of base functions
- $W = (w_0, ..., w_d)$ the weight vector
- $Y$ the output vector
- $\Phi(X) = (\phi_0(X)^T, \phi_1(X)^T, ..., \phi_d(X)^T)$, $\phi_0(X) = \mathbf{1}$ and $\phi_i(X) = (\phi_i(X_1), ... \phi_i(X_n))$.
If you want more details, I find that the best explanation is the one given in the book [Pattern Recognition and Machine Learning](http://research.microsoft.com/en-us/um/people/cmbishop/PRML/) by C. Bishop.
## Implementation
The following implementation does exactly what is explained above and uses three different types of kernel:
- linear $f(x) = w_0 + w_1 x$
- polynomial $f(x) = \sum_{i=0}^d w_i x^i$ with d the degree of the polynome. Notice that d = 1 is the linear case.
- gaussian $f(x) = \sum w_i \exp(-\frac{x - b_i}{2 \sigma^2})$ with $b_i$ define the location of the base function number $i$ (they are usually taken at random within the dataset) and $\sigma$ a parameter tuning the width of the functions. Here the "width" is the same for all base function but you could make them different for each of them.
The steps are:
- normalization
- building the $\Phi$ matrix
- computing the weights $W$
- plotting the found function and the dataset
```
# to display plots within the notebook
%matplotlib inline
# to define the size of the plotted images
from pylab import rcParams
rcParams['figure.figsize'] = (15, 10)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from numpy.linalg import inv
from fct import normalize_pd
```
The X matrix correspond to the inputs and the Y matrix to the outputs to predict.
```
data = pd.read_csv('datasets/data_regression.csv')
X = data['X']
Y = data['Y']
# Normalization
X = np.asmatrix(normalize_pd(X)).T
Y = np.asmatrix(normalize_pd(Y)).T
```
## Linear regression
Here we have $\Phi(X) = X$. The function we look for has the form $f(x) = ax + b$.
```
def linear_regression(X, Y):
# Building the Phi matrix
Ones = np.ones((X.shape[0], 1))
phi_X = np.hstack((Ones, X))
# Calculating the weights
w = np.dot(np.dot(inv(np.dot(phi_X.T, phi_X)), phi_X.T), Y)
# Predicting the output values
Y_linear_reg = np.dot(phi_X, w)
return Y_linear_reg
Y_linear_reg = linear_regression(X, Y)
plt.plot(X, Y, '.')
plt.plot(X, Y_linear_reg, 'r')
plt.title('Linear Regression')
plt.legend(['Data', 'Linear Regression'])
```
The obtained solution does not represent the data very well. It is because the power of representation is too low compared to the target function. This is usually referred to as **underfitting**.
## Polynomial Regression
Now, we approximate the target function by a polynom $f(x) = w_0 + w_1 x + w_2 x^2 + ... + w_d x^d$ with $d$ the degree of the polynom.
We plotted the results obtained with different degrees.
```
def polynomial_regression(X, Y, degree):
# Building the Phi matrix
Ones = np.ones((X.shape[0], 1))
# Add a column of ones
phi_X = np.hstack((Ones, X))
# add a column of X elevated to all the powers from 2 to degree
for i in range(2, degree + 1):
# calculate the vector X to the power i and add it to the Phi matrix
X_power = np.array(X) ** i
phi_X = np.hstack((phi_X, np.asmatrix(X_power)))
# Calculating the weights
w = np.dot(np.dot(inv(np.dot(phi_X.T, phi_X)), phi_X.T), Y)
# Predicting the output values
Y_poly_reg = np.dot(phi_X, w)
return Y_poly_reg
# Degrees to plot you can change these values to
# see how the degree of the polynom affects the
# predicted function
degrees = [1, 2, 20]
legend = ['Data']
plt.plot(X, Y, '.')
for degree in degrees:
Y_poly_reg = polynomial_regression(X, Y, degree)
plt.plot(X, Y_poly_reg)
legend.append('degree ' + str(degree))
plt.legend(legend)
plt.title('Polynomial regression results depending on the degree of the polynome used')
```
The linear case is still underfitting but now, we see that the polynom of degree 20 is too sensitive to the data, especially around $[-2.5, -1.5]$. This phenomena is called **overfitting**: the model starts fitting the noise in the data as well and looses its capacity to generalize.
## Regression with kernel gaussian
Lastly, we look at function of the type $f(x) = \sum \phi_i(x)$ with $\phi_i(x) = \exp({-\frac{x - b_i}{\sigma^2}}$). $b_i$ is called the base and $\sigma$ is its width.
Usually, the $b_i$ are taken randomly within the dataset. That is what I did in the implementation with b the number of bases.
In the plot, there is the base function used to compute the regressed function and the latter.
```
def gaussian_regression(X, Y, b, sigma, return_base=True):
"""b is the number of bases to use, sigma is the variance of the
base functions."""
# Building the Phi matrix
Ones = np.ones((X.shape[0], 1))
# Add a column of ones
phi_X = np.hstack((Ones, X))
# Choose randomly without replacement b values from X
# to be the center of the base functions
X_array = np.array(X).reshape(1, -1)[0]
bases = np.random.choice(X_array, b, replace=False)
bases_function = []
for i in range(1, b):
base_function = np.exp(-0.5 * (((X_array - bases[i - 1] *
np.ones(len(X_array))) / sigma) ** 2))
bases_function.append(base_function)
phi_X = np.hstack((phi_X, np.asmatrix(base_function).T))
w = np.dot(np.dot(inv(np.dot(phi_X.T, phi_X)), phi_X.T), Y)
if return_base:
return np.dot(phi_X, w), bases_function
else:
return np.dot(phi_X, w)
# By changing this value, you will change the width of the base functions
sigma = 0.2
# b is the number of base functions used
b = 5
Y_gauss_reg, bases_function = gaussian_regression(X, Y, b, sigma)
# Plotting the base functions and the dataset
plt.plot(X, Y, '.')
plt.plot(X, Y_gauss_reg)
legend = ['Data', 'Regression result']
for i, base_function in enumerate(bases_function):
plt.plot(X, base_function)
legend.append('Base function n°' + str(i))
plt.legend(legend)
plt.title('Regression with gaussian base functions')
```
We can observe that here the sigma is too small. Some part of the dataset are too far away from the bases to be taken into accoutn.
If you change the <code>sigma</code> in the code to 0.5 and then 1. You will notice how the output function will get closer to the data.
| true |
code
| 0.743628 | null | null | null | null |
|
```
%matplotlib inline
```
GroupLasso for linear regression with dummy variables
=====================================================
A sample script for group lasso with dummy variables
Setup
-----
```
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import Ridge
from sklearn.metrics import r2_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from group_lasso import GroupLasso
from group_lasso.utils import extract_ohe_groups
np.random.seed(42)
GroupLasso.LOG_LOSSES = True
```
Set dataset parameters
----------------------
```
num_categories = 30
min_options = 2
max_options = 10
num_datapoints = 10000
noise_std = 1
```
Generate data matrix
--------------------
```
X_cat = np.empty((num_datapoints, num_categories))
for i in range(num_categories):
X_cat[:, i] = np.random.randint(min_options, max_options, num_datapoints)
ohe = OneHotEncoder()
X = ohe.fit_transform(X_cat)
groups = extract_ohe_groups(ohe)
group_sizes = [np.sum(groups == g) for g in np.unique(groups)]
active_groups = [np.random.randint(0, 2) for _ in np.unique(groups)]
```
Generate coefficients
---------------------
```
w = np.concatenate(
[
np.random.standard_normal(group_size) * is_active
for group_size, is_active in zip(group_sizes, active_groups)
]
)
w = w.reshape(-1, 1)
true_coefficient_mask = w != 0
intercept = 2
```
Generate regression targets
---------------------------
```
y_true = X @ w + intercept
y = y_true + np.random.randn(*y_true.shape) * noise_std
```
View noisy data and compute maximum R^2
---------------------------------------
```
plt.figure()
plt.plot(y, y_true, ".")
plt.xlabel("Noisy targets")
plt.ylabel("Noise-free targets")
# Use noisy y as true because that is what we would have access
# to in a real-life setting.
R2_best = r2_score(y, y_true)
```
Generate pipeline and train it
------------------------------
```
pipe = pipe = Pipeline(
memory=None,
steps=[
(
"variable_selection",
GroupLasso(
groups=groups,
group_reg=0.1,
l1_reg=0,
scale_reg=None,
supress_warning=True,
n_iter=100000,
frobenius_lipschitz=False,
),
),
("regressor", Ridge(alpha=1)),
],
)
pipe.fit(X, y)
```
Extract results and compute performance metrics
-----------------------------------------------
```
# Extract from pipeline
yhat = pipe.predict(X)
sparsity_mask = pipe["variable_selection"].sparsity_mask_
coef = pipe["regressor"].coef_.T
# Construct full coefficient vector
w_hat = np.zeros_like(w)
w_hat[sparsity_mask] = coef
R2 = r2_score(y, yhat)
# Print performance metrics
print(f"Number variables: {len(sparsity_mask)}")
print(f"Number of chosen variables: {sparsity_mask.sum()}")
print(f"R^2: {R2}, best possible R^2 = {R2_best}")
```
Visualise regression coefficients
---------------------------------
```
for i in range(w.shape[1]):
plt.figure()
plt.plot(w[:, i], ".", label="True weights")
plt.plot(w_hat[:, i], ".", label="Estimated weights")
plt.figure()
plt.plot([w.min(), w.max()], [coef.min(), coef.max()], "gray")
plt.scatter(w, w_hat, s=10)
plt.ylabel("Learned coefficients")
plt.xlabel("True coefficients")
plt.show()
```
| true |
code
| 0.655474 | null | null | null | null |
|
# Pair-wise Correlations
The purpose is to identify predictor variables strongly correlated with the sales price and with each other to get an idea of what variables could be good predictors and potential issues with collinearity.
Furthermore, Box-Cox transformations and linear combinations of variables are added where applicable or useful.
## "Housekeeping"
```
import warnings
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import PowerTransformer
from tabulate import tabulate
from utils import (
ALL_VARIABLES,
CONTINUOUS_VARIABLES,
DISCRETE_VARIABLES,
NUMERIC_VARIABLES,
ORDINAL_VARIABLES,
TARGET_VARIABLES,
encode_ordinals,
load_clean_data,
print_column_list,
)
pd.set_option("display.max_columns", 100)
sns.set_style("white")
```
## Load the Data
Only a subset of the previously cleaned data is used in this analysis. In particular, it does not make sense to calculate correlations involving nominal variables.
Furthermore, ordinal variables are encoded as integers (with greater values indicating a higher sales price by "guts feeling"; refer to the [data documentation](https://www.amstat.org/publications/jse/v19n3/decock/DataDocumentation.txt) to see the un-encoded values) and take part in the analysis.
A `cleaned_df` DataFrame with the original data from the previous notebook is kept so as to restore the encoded ordinal labels again at the end of this notebook for correct storage.
```
cleaned_df = load_clean_data()
df = cleaned_df[NUMERIC_VARIABLES + ORDINAL_VARIABLES + TARGET_VARIABLES]
df = encode_ordinals(df)
df[NUMERIC_VARIABLES].head()
df[ORDINAL_VARIABLES].head()
```
## Linearly "dependent" Features
The "above grade (ground) living area" (= *Gr Liv Area*) can be split into 1st and 2nd floor living area plus some undefined rest.
```
assert not (
df["Gr Liv Area"]
!= (df["1st Flr SF"] + df["2nd Flr SF"] + df["Low Qual Fin SF"])
).any()
```
The various basement areas also add up.
```
assert not (
df["Total Bsmt SF"]
!= (df["BsmtFin SF 1"] + df["BsmtFin SF 2"] + df["Bsmt Unf SF"])
).any()
```
Calculate a variable for the total living area *Total SF* as this is the number communicated most often in housing ads.
```
df["Total SF"] = df["Gr Liv Area"] + df["Total Bsmt SF"]
new_variables = ["Total SF"]
CONTINUOUS_VARIABLES.append("Total SF")
```
The different porch areas are unified into a new variable *Total Porch SF*. This potentially helps making the presence of a porch in general relevant in the prediction.
```
df["Total Porch SF"] = (
df["3Ssn Porch"] + df["Enclosed Porch"] + df["Open Porch SF"]
+ df["Screen Porch"] + df["Wood Deck SF"]
)
new_variables.append("Total Porch SF")
CONTINUOUS_VARIABLES.append("Total Porch SF")
```
The various types of rooms "above grade" (i.e., *TotRms AbvGrd*, *Bedroom AbvGr*, *Kitchen AbvGr*, and *Full Bath*) do not add up (only in 29% of the cases they do). Therefore, no single unified variable can be used as a predictor.
```
round(
100
* (
df["TotRms AbvGrd"]
== (df["Bedroom AbvGr"] + df["Kitchen AbvGr"] + df["Full Bath"])
).sum()
/ df.shape[0]
)
```
Unify the number of various types of bathrooms into a single variable. Note that "half" bathrooms are counted as such.
```
df["Total Bath"] = (
df["Full Bath"] + 0.5 * df["Half Bath"]
+ df["Bsmt Full Bath"] + 0.5 * df["Bsmt Half Bath"]
)
new_variables.append("Total Bath")
DISCRETE_VARIABLES.append("Total Bath")
```
## Box-Cox Transformations
Only numeric columns with non-negative values are eligable for a Box-Cox transformation.
```
columns = CONTINUOUS_VARIABLES + TARGET_VARIABLES
transforms = df[columns].describe().T
transforms = list(transforms[transforms['min'] > 0].index)
print_column_list(transforms)
```
A common convention is to use Box-Cox transformations only if the found lambda value (estimated with Maximum Likelyhood Estimation) is in the range from -3 to +3.
Consequently, the only applicable transformation are for *SalePrice* and the new variable *Total SF*.
```
# Check the Box-Cox tranformations for each column seperately
# to decide if the optimal lambda value is in an acceptable range.
output = []
transformed_columns = []
for column in transforms:
X = df[[column]] # 2D array needed!
pt = PowerTransformer(method="box-cox", standardize=False)
# Suppress a weird but harmless warning from scipy
with warnings.catch_warnings():
warnings.simplefilter("ignore")
pt.fit(X)
# Check if the optimal lambda is ok.
lambda_ = pt.lambdas_[0].round(1)
if -3 <= lambda_ <= 3:
lambda_label = 0 if lambda_ <= 0.01 else lambda_ # to avoid -0.0
new_column = f"{column} (box-cox-{lambda_label})"
df[new_column] = (
np.log(X) if lambda_ <= 0.001 else (((X ** lambda_) - 1) / lambda_)
)
# Track the new column in the appropiate list.
new_variables.append(new_column)
if column in TARGET_VARIABLES:
TARGET_VARIABLES.append(new_column)
else:
CONTINUOUS_VARIABLES.append(new_column)
# To show only the transformed columns below.
transformed_columns.append(column)
transformed_columns.append(new_column)
output.append((
f"{column}:",
f"use lambda of {lambda_}",
))
else:
output.append((
f"{column}:",
f"lambda of {lambda_} not in realistic range",
))
print(tabulate(sorted(output), tablefmt="plain"))
df[transformed_columns].head()
```
## Correlations
The pair-wise correlations are calculated based on the type of the variables:
- **continuous** variables are assumed to be linearly related with the target and each other or not: use **Pearson's correlation coefficient**
- **discrete** (because of the low number of distinct realizations as seen in the data cleaning notebook) and **ordinal** (low number of distinct realizations as well) variables are assumed to be related in a monotonic way with the target and each other or not: use **Spearman's rank correlation coefficient**
Furthermore, for a **naive feature selection** a "rule of thumb" classification in *weak* and *strong* correlation is applied to the predictor variables. The identified variables will be used in the prediction modelling part to speed up the feature selection. A correlation between 0.33 and 0.66 is considered *weak* while a correlation above 0.66 is considered *strong* (these thresholds refer to the absolute value of the correlation). Correlations are calculated for **each** target variable (i.e., raw "SalePrice" and Box-Cox transformation thereof). Correlations below 0.1 are considered "uncorrelated".
```
strong = 0.66
weak = 0.33
uncorrelated = 0.1
```
Two heatmaps below (implemented in the reusable `plot_correlation` function) help visualize the correlations.
Obviously, many variables are pair-wise correlated. This could yield regression coefficients *inprecise* and not usable / interpretable. At the same time, this does not lower the predictive power of a model as a whole. In contrast to the pair-wise correlations, *multi-collinearity* is not checked here.
```
def plot_correlation(data, title):
"""Visualize a correlation matrix in a nice heatmap."""
fig, ax = plt.subplots(figsize=(12, 12))
ax.set_title(title, fontsize=24)
# Blank out the upper triangular part of the matrix.
mask = np.zeros_like(data, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Use a diverging color map.
cmap = sns.diverging_palette(240, 0, as_cmap=True)
# Adjust the labels' font size.
labels = data.columns
ax.set_xticklabels(labels, fontsize=10)
ax.set_yticklabels(labels, fontsize=10)
# Plot it.
sns.heatmap(
data, vmin=-1, vmax=1, cmap=cmap, center=0, linewidths=.5,
cbar_kws={"shrink": .5}, square=True, mask=mask, ax=ax
)
```
### Pearson
Pearson's correlation coefficient shows a linear relationship between two variables.
```
columns = CONTINUOUS_VARIABLES + TARGET_VARIABLES
pearson = df[columns].corr(method="pearson")
plot_correlation(pearson, "Pearson's Correlation")
```
Predictors weakly or strongly correlated with a target variable are collected.
```
pearson_weakly_correlated = set()
pearson_strongly_correlated = set()
pearson_uncorrelated = set()
# Iterate over the raw and transformed target.
for target in TARGET_VARIABLES:
corrs = pearson.loc[target].drop(TARGET_VARIABLES).abs()
pearson_weakly_correlated |= set(corrs[(weak < corrs) & (corrs <= strong)].index)
pearson_strongly_correlated |= set(corrs[(strong < corrs)].index)
pearson_uncorrelated |= set(corrs[(corrs < uncorrelated)].index)
# Show that no contradiction exists between the classifications.
assert pearson_weakly_correlated & pearson_strongly_correlated == set()
assert pearson_weakly_correlated & pearson_uncorrelated == set()
```
Show the continuous variables that are weakly and strongly correlated with the sales price or uncorrelated.
```
print_column_list(pearson_uncorrelated)
print_column_list(pearson_weakly_correlated)
print_column_list(pearson_strongly_correlated)
```
### Spearman
Spearman's correlation coefficient shows an ordinal rank relationship between two variables.
```
columns = sorted(DISCRETE_VARIABLES + ORDINAL_VARIABLES) + TARGET_VARIABLES
spearman = df[columns].corr(method="spearman")
plot_correlation(spearman, "Spearman's Rank Correlation")
```
Predictors weakly or strongly correlated with a target variable are collected.
```
spearman_weakly_correlated = set()
spearman_strongly_correlated = set()
spearman_uncorrelated = set()
# Iterate over the raw and transformed target.
for target in TARGET_VARIABLES:
corrs = spearman.loc[target].drop(TARGET_VARIABLES).abs()
spearman_weakly_correlated |= set(corrs[(weak < corrs) & (corrs <= strong)].index)
spearman_strongly_correlated |= set(corrs[(strong < corrs)].index)
spearman_uncorrelated |= set(corrs[(corrs < uncorrelated)].index)
# Show that no contradiction exists between the classifications.
assert spearman_weakly_correlated & spearman_strongly_correlated == set()
assert spearman_weakly_correlated & spearman_uncorrelated == set()
```
Show the discrete and ordinal variables that are weakly and strongly correlated with the sales price or uncorrelated.
```
print_column_list(spearman_uncorrelated)
print_column_list(spearman_weakly_correlated)
print_column_list(spearman_strongly_correlated)
```
## Save the Results
### Save the weakly and strongly correlated Variables
The subset of variables that have a correlation with the house price are saved in a simple JSON file for easy re-use.
```
with open("data/correlated_variables.json", "w") as file:
file.write(json.dumps({
"uncorrelated": sorted(
list(pearson_uncorrelated) + list(spearman_uncorrelated)
),
"weakly_correlated": sorted(
list(pearson_weakly_correlated) + list(spearman_weakly_correlated)
),
"strongly_correlated": sorted(
list(pearson_strongly_correlated) + list(spearman_strongly_correlated)
),
}))
```
### Save the Data
Sort the new variables into the unprocessed `cleaned_df` DataFrame with the targets at the end. This "restores" the ordinal labels again for storage.
```
for column in new_variables:
cleaned_df[column] = df[column]
for target in set(TARGET_VARIABLES) & set(new_variables):
new_variables.remove(target)
cleaned_df = cleaned_df[sorted(ALL_VARIABLES + new_variables) + TARGET_VARIABLES]
```
In totality, this notebook added two new linear combinations and one Box-Cox transformation to the previous 78 columns.
```
cleaned_df.shape
cleaned_df.head()
cleaned_df.to_csv("data/data_clean_with_transformations.csv")
```
| true |
code
| 0.528777 | null | null | null | null |
|
# Introduction to TensorFlow v2 : Basics
### Importing and printing the versions
```
import tensorflow as tf
print("TensorFlow version: {}".format(tf.__version__))
print("Eager execution is: {}".format(tf.executing_eagerly()))
print("Keras version: {}".format(tf.keras.__version__))
```
### TensorFlow Variables
[Tensors](https://www.tensorflow.org/guide/tensor) are multi-dimensional arrays in TensorFlow. But, Tensors are immutable in nature. [Variables](https://www.tensorflow.org/guide/variable) are a way to store data which can be manipulated and changed easily. Variables are automatically placed on the fastest compatible device for it's datatype. For ex: If GPU is found, the tensors are automatically placed on GPU directly.
```
var = 1
# Defining a Tensorflow Variables
ten = tf.Variable(7)
another_tensor = tf.Variable([[1, 2],[3, 4]])
var, ten, another_tensor
```
### Creating new Variables
```
f1 = tf.Variable(100.6)
print(f1)
```
### Assigning values to existing Variables
```
# Assign and print the Data-Type
print(f1.assign(25))
print(f1.dtype)
f2 = tf.Variable(7, dtype = tf.float64)
print(f2.dtype)
# Creating a TensorFlow constant - Value cannot be changed in future
constant_var = tf.constant(10)
print(constant_var)
```
### Extracting the value from a Tensor and formatting like a Numpy array using .numpy()
```
constant_var.numpy()
```
### Rank and Shape of Tensor
About [Rank and Shape](https://www.tensorflow.org/guide/tensor#about_shapes) in TensorFlow
```
tf.rank(another_tensor)
tf.shape(another_tensor)
new_tensor = tf.Variable([ [ [0., 1., 2.], [3., 4., 5.] ], [ [6., 7., 8.], [9., 10., 11.] ] ])
print(new_tensor.shape)
print(tf.rank(new_tensor))
```
### Reshaping Tensors
```
new_reshape = tf.reshape(new_tensor, [2, 6])
recent_reshape = tf.reshape(new_tensor, [1, 12])
print(new_reshape)
print(recent_reshape)
```
### Broadcasting Feature
```
new_tensor + 4
new_tensor - 4
new_tensor * 4
```
### Matrix Multiplication
```
new_tensor * new_tensor
u = tf.constant([[5, 6, 7]])
v = tf.constant([[8, 9, 0]])
print('Matrix Multiplication - Transpose')
print(tf.matmul(u, tf.transpose(a=v)))
```
### Type Casting
```
int_tensor = tf.cast(ten, dtype=tf.float32)
print(int_tensor)
```
### Arithmetic Operations
```
a = tf.random.normal(shape=(2, 2))
b = tf.random.normal(shape=(2, 2))
c = a + b
d = tf.square(c)
e = tf.exp(d)
print('Addition - {}'.format(c))
print('Square Root - {}'.format(d))
print('Exponent - {}'.format(e))
```
# TensorFlow v2 Functions
### Squared Difference Function
```
#Squared Difference Function
x = [2, 4, 6, 8, 12]
y = 6
#(x-y)*(x-y)
result = tf.math.squared_difference(x, y)
result
```
### Reduce Mean
```
numbers = tf.constant([[6., 9.], [3., 5.]])
print(numbers)
tf.reduce_mean(input_tensor = numbers)
```
### Mean across columns
```
# Reduce rows -> Find mean across columns
#(6. + 3.)/2, (9. + 5.)/2
print(tf.reduce_mean(input_tensor = numbers, axis = 0))
# (6. + 3.)/2, (9. + 5.)/2
print(tf.reduce_mean(input_tensor = numbers, axis = 0, keepdims = True))
```
### Mean across rows
```
# Reduce columns -> Find mean across rows
#(6. + 9.)/2, (3. + 5.)/2
print(tf.reduce_mean(input_tensor = numbers, axis = 1))
# (6. + 9.)/2, (3. + 5.)/2
print(tf.reduce_mean(input_tensor = numbers, axis = 1, keepdims = True))
```
### Generating normal distribution in a tensor
```
print(tf.random.normal(shape = (3, 2), mean = 10, stddev = 2, dtype = tf.float32, seed = None, name = None))
```
### Generating uniform distribution in a tensor
```
tf.random.uniform(shape = (3, 2), minval = 0, maxval = 1, dtype = tf.float32, seed = None, name = None)
```
### Random Seed in Tensorflow
```
print('Random Seed - 11\n')
tf.random.set_seed(11)
random_1 = tf.random.uniform(shape = (2, 2), maxval = 7, dtype = tf.int32)
random_2 = tf.random.uniform(shape = (2, 2), maxval = 7, dtype = tf.int32)
print(random_1)
print(random_2)
print('\n')
print('Random Seed - 12\n')
tf.random.set_seed(12)
random_1 = tf.random.uniform(shape = (2, 2), maxval = 7, dtype = tf.int32)
random_2 = tf.random.uniform(shape = (2, 2), maxval = 7, dtype = tf.int32)
print(random_1)
print(random_2)
print('\n')
print('Random Seed - 11\n')
tf.random.set_seed(11)
random_1 = tf.random.uniform(shape = (2, 2), maxval = 7, dtype = tf.int32)
random_2 = tf.random.uniform(shape = (2, 2), maxval = 7, dtype = tf.int32)
print(random_1)
print(random_2)
```
### Max, Min and Indices
```
tensor_m = tf.constant([2, 20, 15, 32, 77, 29, -16, -51, 29])
print(tensor_m)
# Max argument
index = tf.argmax(input = tensor_m)
print('Index of max: {}\n'.format(index))
print('Max element: {}'.format(tensor_m[index].numpy()))
print(tensor_m)
# Min argument
index = tf.argmin(input = tensor_m)
print('Index of minumum element: {}\n'.format(index))
print('Minimum element: {}'.format(tensor_m[index].numpy()))
```
# TensorFlow v2 : Advanced
### Computing gradients with GradientTape - Automatic Differentiation
TensorFlow v2 has this API for recording gradient values based on the values computed in the forward pass with respect to inputs. Since we need values to be remembered during the forward pass, the tf.GradientTape provides us a way to automatically differentiate a certain function wrt to the input variable specified. To read more on Auto Diiferentiation in TensorFlow v2 click [here]https://www.tensorflow.org/guide/autodiff).
```
x = tf.random.normal(shape=(2, 2))
y = tf.random.normal(shape=(2, 2))
with tf.GradientTape() as tape:
# Start recording the history of operations applied to x
tape.watch(x)
# Do some math using x and y
z = tf.sqrt(tf.square(x) + tf.square(y))
# What's the gradient of z with respect to x
dz = tape.gradient(z, x)
print(dz)
```
tf.GradientTape API automatically watches the function to be differentiated, no need to explicitly mention/run tape.watch()
```
x = tf.Variable(x)
with tf.GradientTape() as tape:
# Doing some calculations using x and y
z = tf.sqrt(tf.square(x) + tf.square(y))
# Getting the gradient of z wrt x
dz = tape.gradient(z, x)
print(dz)
```
We can perform differentiation in chains also, using two tapes!
```
with tf.GradientTape() as outer_tape:
with tf.GradientTape() as tape:
# Computation using x and y
z = tf.sqrt(tf.square(x) + tf.square(y))
# First differentiation of z wrt x
dz = tape.gradient(z, x)
# Second differentiation of z wrt x
dz2 = outer_tape.gradient(dz, x)
print(dz2)
```
### Tensorflow v2 Graph Function
Read [here](https://www.tensorflow.org/guide/intro_to_graphs) for more information on Computation Graphs and TensorFlow Functions of TensorFlow v1
```
#Normal Python function
def f1(x, y):
return tf.reduce_mean(input_tensor=tf.multiply(x ** 2, 5) + y**2)
#Converting that into Tensorflow Graph function
f2 = tf.function(f1)
x = tf.constant([7., -2.])
y = tf.constant([8., 6.])
#Funtion 1 and function 2 return the same value, but function 2 executes as a TensorFlow graph
assert f1(x,y).numpy() == f2(x,y).numpy()
ans = f1(x,y)
print(ans)
ans = f2(x,y)
print(ans)
```
# TensorFlow v2 : Linear Regression and tf.function
### Let's see what is the importance of tf.function with a small example of Linear Regression
```
input_dim = 2
output_dim = 1
learning_rate = 0.01
# This is our weight matrix
w = tf.Variable(tf.random.uniform(shape=(input_dim, output_dim)))
# This is our bias vector
b = tf.Variable(tf.zeros(shape=(output_dim,)))
def compute_predictions(features):
return tf.matmul(features, w) + b
def compute_loss(labels, predictions):
return tf.reduce_mean(tf.square(labels - predictions))
def train_on_batch(x, y):
with tf.GradientTape() as tape:
predictions = compute_predictions(x)
loss = compute_loss(y, predictions)
# Note that `tape.gradient` works with a list as well (w, b).
dloss_dw, dloss_db = tape.gradient(loss, [w, b])
w.assign_sub(learning_rate * dloss_dw)
b.assign_sub(learning_rate * dloss_db)
return loss
import numpy as np
import random
import matplotlib.pyplot as plt
%matplotlib inline
# Prepare a dataset.
num_samples = 10000
negative_samples = np.random.multivariate_normal(mean=[0, 3], cov=[[1, 0.5],[0.5, 1]], size=num_samples)
positive_samples = np.random.multivariate_normal(mean=[3, 0], cov=[[1, 0.5],[0.5, 1]], size=num_samples)
features = np.vstack((negative_samples, positive_samples)).astype(np.float32)
labels = np.vstack((np.zeros((num_samples, 1), dtype='float32'), np.ones((num_samples, 1), dtype='float32')))
plt.scatter(features[:, 0], features[:, 1], c=labels[:, 0])
# Shuffle the data.
indices = np.random.permutation(len(features))
features = features[indices]
labels = labels[indices]
# Create a tf.data.Dataset object for easy batched iteration
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
dataset = dataset.shuffle(buffer_size=1024).batch(256)
for epoch in range(10):
for step, (x, y) in enumerate(dataset):
loss = train_on_batch(x, y)
print('Epoch %d: last batch loss = %.4f' % (epoch, float(loss)))
predictions = compute_predictions(features)
plt.scatter(features[:, 0], features[:, 1], c=predictions[:, 0] > 0.5)
```
### Analysizing the code run time
TensorFlow v2 with Eager Execution
```
import time
t0 = time.time()
for epoch in range(20):
for step, (x, y) in enumerate(dataset):
loss = train_on_batch(x, y)
t_end = time.time() - t0
print('Time per epoch: %.3f s' % (t_end / 20,))
```
Adding the @tf.function to convert the function into a static graph (TensorFlow v1)
```
@tf.function
def train_on_batch_tf(x, y):
with tf.GradientTape() as tape:
predictions = compute_predictions(x)
loss = compute_loss(y, predictions)
dloss_dw, dloss_db = tape.gradient(loss, [w, b])
w.assign_sub(learning_rate * dloss_dw)
b.assign_sub(learning_rate * dloss_db)
return loss
```
Running using the Static Graph method
```
t0 = time.time()
for epoch in range(20):
for step, (x, y) in enumerate(dataset):
loss = train_on_batch_tf(x, y)
t_end = time.time() - t0
print('Time per epoch: %.3f s' % (t_end / 20,))
```
## There is a huge decrease in the time taken per epoch!!!
## Eager execution is great for debugging and printing results line-by-line, but when it's time to scale, static graphs are a researcher's best friends.
| true |
code
| 0.585634 | null | null | null | null |
|
```
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.datasets import mnist
from tensorflow.contrib.eager.python import tfe
# enable eager mode
tf.enable_eager_execution()
tf.set_random_seed(0)
np.random.seed(0)
if not os.path.exists('weights/'):
os.makedirs('weights/')
# constants
units = 64
batch_size = 256
epochs = 2
num_classes = 10
# dataset loading
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((-1, 28, 28)) # 28 timesteps, 28 inputs / timestep
x_test = x_test.reshape((-1, 28, 28)) # 28 timesteps, 28 inputs / timeste
# one hot encode the labels. convert back to numpy as we cannot use a combination of numpy
# and tensors as input to keras
y_train_ohe = tf.one_hot(y_train, depth=num_classes).numpy()
y_test_ohe = tf.one_hot(y_test, depth=num_classes).numpy()
print('x train', x_train.shape)
print('y train', y_train_ohe.shape)
print('x test', x_test.shape)
print('y test', y_test_ohe.shape)
```
# Bi-Directional LSTM
Writing a Bi-directional LSTM in keras is super simple with the Bidirectional wrapper. However the speed of such a model is slower than expected.
Some fixes for it are to use the GPU implementation for all the cells, and to unroll the entire RNN before hand. In normal Keras and Tensorflow, unrolling the RNN yields significant speed improvements since the symbolic loop is replaced with the unrolled graph representation of the RNN.
In Eager, I don't believe it is doing much to help with the speed.
```
class BiRNN(tf.keras.Model):
def __init__(self, units, num_classes, merge_mode='concat', num_layers=1):
super(BiRNN, self).__init__()
self.impl = 1 if tfe.num_gpus() == 0 else 2
self.cells = [tf.keras.layers.LSTMCell(units, implementation=self.impl) for _ in range(num_layers)]
self.rnn = tf.keras.layers.RNN(self.cells, unroll=True) # slower if not unrolled - probably because it is using K.rnn() internally.
self.bidirectional = tf.keras.layers.Bidirectional(self.rnn, merge_mode=merge_mode)
self.classifier = tf.keras.layers.Dense(num_classes)
def call(self, inputs, training=None, mask=None):
x = self.bidirectional(inputs)
output = self.classifier(x)
# softmax op does not exist on the gpu, so always use cpu
with tf.device('/cpu:0'):
output = tf.nn.softmax(output)
return output
device = '/cpu:0' if tfe.num_gpus() == 0 else '/gpu:0'
with tf.device(device):
# build model and optimizer
model = BiRNN(units, num_classes, num_layers=2)
model.compile(optimizer=tf.train.AdamOptimizer(0.01), loss='categorical_crossentropy',
metrics=['accuracy'])
# TF Keras tries to use entire dataset to determine shape without this step when using .fit()
# Fix = Use exactly one sample from the provided input dataset to determine input/output shape/s for the model
dummy_x = tf.zeros((1, 28, 28))
model._set_inputs(dummy_x)
# train
model.fit(x_train, y_train_ohe, batch_size=batch_size, epochs=epochs,
validation_data=(x_test, y_test_ohe), verbose=1)
# evaluate on test set
scores = model.evaluate(x_test, y_test_ohe, batch_size, verbose=1)
print("Final test loss and accuracy :", scores)
saver = tfe.Saver(model.variables)
saver.save('weights/07_01_bi_rnn/weights.ckpt')
```
| true |
code
| 0.73313 | null | null | null | null |
|
# ANCOM: WGS
```
library(tidyverse)
library(magrittr)
source("/Users/Cayla/ANCOM/scripts/ancom_v2.1.R")
```
## T2
```
t2 <- read_csv('https://github.com/bryansho/PCOS_WGS_16S_metabolome/raw/master/DESEQ2/WGS/T2/T2_filtered_greater_00001.csv')
head(t2,n=1)
t2.meta <- read_csv('https://github.com/bryansho/PCOS_WGS_16S_metabolome/raw/master/DESEQ2/WGS/T2/Deseq2_T2_mapping.csv')
head(t2.meta,n=1)
# subset data
t2.meta.PvL <- t2.meta %>% filter(Treatment == 'Placebo' | Treatment == 'Let')
t2.PvL <- t2 %>% select(X1, any_of(t2.meta.PvL$Sample)) %>% column_to_rownames('X1')
t2.meta.LvLCH <- t2.meta %>% filter(Treatment == 'Let' | Treatment == 'CoL')
t2.LvLCH <- t2 %>% select(X1, any_of(t2.meta.LvLCH$Sample)) %>% column_to_rownames('X1')
```
### Placebo vs. Let
```
# Data Preprocessing
# feature_table is a df/matrix with features as rownames and samples in columns
feature_table <- t2.PvL
# character vector/column containing sample IDs
sample_var <- "Sample"
# grouping variable to detect structural zeros and outliers
group_var <- "Treatment"
# 0 < fraction < 1. For each feature, observations with proportion of mixture
# distribution < out_cut will be detected as outlier zeros;
# > (1 - out_cut) will be detected as outlier values
out_cut <- 0.05
# 0 < fraction < 1. Features with proportion of zeros > zero_cut are removed.
zero_cut <- 0.90
# samples with library size < lib_cut will be excluded in the analysis
lib_cut <- 0
# TRUE indicates a taxon would be classified as a structural zero in the
# corresponding experimental group using its asymptotic lower bound. More
# specifically, ```neg_lb = TRUE``` indicates you are using both criteria
# stated in section 3.2 of [ANCOM-II]
# (https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5682008/) to detect structural
# zeros; Otherwise, ```neg_lb = FALSE``` will only use the equation 1 in
# section 3.2 of [ANCOM-II](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5682008/)
# for declaring structural zeros.
neg_lb <- TRUE
prepro <- feature_table_pre_process(feature_table, t2.meta.PvL, sample_var, group_var,
out_cut, zero_cut, lib_cut, neg_lb)
# Preprocessed feature table
feature_table1 <- prepro$feature_table
# Preprocessed metadata
meta_data1 <- prepro$meta_data
# Structural zero info
struc_zero1 <- prepro$structure_zeros
# Run ANCOM
# name of the main variable of interest (character)
main_var <- "Treatment"
p_adj_method <- "BH" # number of taxa > 10, therefore use Benjamini-Hochberg correction
alpha <- 0.05
# character string representing the formula for adjustment
adj_formula <- NULL
# character string representing the formula for random effects in lme
rand_formula <- NULL
t_start <- Sys.time()
res <- ANCOM(feature_table1, meta_data1, struc_zero1, main_var, p_adj_method,
alpha, adj_formula, rand_formula)
t_end <- Sys.time()
t_end - t_start
# write output to file
# output contains the "W" statistic for each taxa - a count of the number of times
# the null hypothesis is rejected for each taxa
# detected_x are logicals indicating detection at specified FDR cut-off
write_csv(res$out, "2021-07-25_WGS_T2_PvL_ANCOM_data.csv")
n_taxa <- ifelse(is.null(struc_zero1), nrow(feature_table1), sum(apply(struc_zero1, 1, sum) == 0))
res$fig + scale_y_continuous(sec.axis = sec_axis(~ . * 100 / n_taxa, name = 'W proportion'))
ggsave(filename = paste(lubridate::today(),'volcano_WGS_T2_PvL.pdf',sep='_'), bg = 'transparent', device = 'pdf', dpi = 'retina')
# to find most significant taxa, I will sort the data
# 1) y (W statistic)
# 2) according to the absolute value of CLR mean difference
sig <- res$fig$data %>%
mutate(taxa_id = str_split_fixed(res$fig$data$taxa_id, pattern='s_', n=2)[,2]) %>% # remove leading 's_'
arrange(desc(y), desc(abs(x))) %>%
filter(y >= (0.7*n_taxa), !is.na(taxa_id)) # keep significant taxa, remove unidentified taxa
write.csv(sig, paste(lubridate::today(),'SigFeatures_WGS_T2_PvL.csv',sep='_'))
# save features with W > 0
non.zero <- res$fig$data %>%
arrange(desc(y), desc(abs(x))) %>%
mutate(taxa_id = str_split_fixed(res$fig$data$taxa_id, pattern='s_', n=2)[,2], # remove leading 's_'
W.proportion = y/(n_taxa-1)) %>% # add W
filter(y > 0) %>%
rowid_to_column()
write.csv(non.zero, paste(lubridate::today(),'NonZeroW_Features_WGS_T2_PvL.csv',sep='_'))
# plot top 20 taxa
sig %>%
slice_head(n=20) %>%
ggplot(aes(x, taxa_id)) +
geom_point(aes(size = 1)) +
theme_bw(base_size = 16) +
guides(size = FALSE) +
labs(x = 'CLR Mean Difference', y = NULL)
ggsave(filename = paste(lubridate::today(),'Top20_WGS_T2_PvL.pdf',sep='_'), bg = 'transparent', device = 'pdf', dpi = 'retina', width = 10)
```
### Let v Let-co-housed
```
# Data Preprocessing
feature_table <- t2.LvLCH
sample_var <- "Sample"
group_var <- "Treatment"
out_cut <- 0.05
zero_cut <- 0.90
lib_cut <- 0
neg_lb <- TRUE
prepro <- feature_table_pre_process(feature_table, t2.meta.LvLCH, sample_var, group_var,
out_cut, zero_cut, lib_cut, neg_lb)
# Preprocessed feature table
feature_table2 <- prepro$feature_table
# Preprocessed metadata
meta_data2 <- prepro$meta_data
# Structural zero info
struc_zero2 <- prepro$structure_zeros
# Run ANCOM
# name of the main variable of interest (character)
main_var <- "Treatment"
p_adj_method <- "BH" # number of taxa > 10, therefore use Benjamini-Hochberg correction
alpha <- 0.05
# character string representing the formula for adjustment
adj_formula <- NULL
# character string representing the formula for random effects in lme
rand_formula <- NULL
t_start <- Sys.time()
res2 <- ANCOM(feature_table2, meta_data2, struc_zero2, main_var, p_adj_method,
alpha, adj_formula, rand_formula)
t_end <- Sys.time()
t_end - t_start
# write output to file
# output contains the "W" statistic for each taxa - a count of the number of times
# the null hypothesis is rejected for each taxa
# detected_x are logicals indicating detection at specified FDR cut-off
write_csv(res2$out, "2021-07-25_WGS_T2_LvLCH_ANCOM_data.csv")
n_taxa <- ifelse(is.null(struc_zero2), nrow(feature_table2), sum(apply(struc_zero2, 1, sum) == 0))
res2$fig + scale_y_continuous(sec.axis = sec_axis(~ . * 100 / n_taxa, name = 'W proportion'))
ggsave(filename = paste(lubridate::today(),'volcano_WGS_T2_LvLCH.pdf',sep='_'), bg = 'transparent', device = 'pdf', dpi = 'retina')
# save features with W > 0
non.zero <- res2$fig$data %>%
arrange(desc(y), desc(abs(x))) %>%
mutate(taxa_id = str_split_fixed(res2$fig$data$taxa_id, pattern='s_', n=2)[,2], # remove leading 's_'
W.proportion = y/(n_taxa-1)) %>% # add W
filter(y > 0) %>%
rowid_to_column()
write.csv(non.zero, paste(lubridate::today(),'NonZeroW_Features_WGS_T2_LvLCH.csv',sep='_'))
# to find most significant taxa, I will sort the data
# 1) y (W statistic)
# 2) according to the absolute value of CLR mean difference
sig <- res2$fig$data %>%
mutate(taxa_id = str_split_fixed(res2$fig$data$taxa_id, pattern='s_', n=2)[,2]) %>% # remove leading 's_'
arrange(desc(y), desc(abs(x))) %>%
filter(y >= (0.7*n_taxa), !is.na(taxa_id)) # keep significant taxa, remove unidentified taxa
write.csv(sig, paste(lubridate::today(),'SigFeatures_WGS_T2_LvLCH.csv',sep='_'))
# plot top 20 taxa
sig %>%
slice_head(n=20) %>%
ggplot(aes(x, taxa_id)) +
geom_point(aes(size = 1)) +
theme_bw(base_size = 16) +
guides(size = FALSE) +
labs(x = 'CLR Mean Difference', y = NULL)
ggsave(filename = paste(lubridate::today(),'Top20_WGS_T2_LvLCH.pdf',sep='_'), bg = 'transparent', device = 'pdf', dpi = 'retina', width = 10)
```
## T5
```
t5 <- read_csv('https://github.com/bryansho/PCOS_WGS_16S_metabolome/raw/master/DESEQ2/WGS/T5/T5_filtered_greater_00001.csv')
head(t5,n=1)
t5.meta <- read_csv('https://github.com/bryansho/PCOS_WGS_16S_metabolome/raw/master/DESEQ2/WGS/T5/Deseq2_T5_mapping.csv')
head(t5.meta,n=1)
# subset data
t5.meta.PvL <- t5.meta %>% filter(Treatment == 'Placebo' | Treatment == 'Let')
t5.PvL <- t5 %>% select(X1, any_of(t5.meta.PvL$SampleID)) %>% column_to_rownames('X1')
t5.meta.LvLCH <- t5.meta %>% filter(Treatment == 'Let' | Treatment == 'CoL')
t5.LvLCH <- t5 %>% select(X1, any_of(t5.meta.LvLCH$SampleID)) %>% column_to_rownames('X1')
```
### Placebo v Let
```
# Data Preprocessing
feature_table <- t5.PvL
sample_var <- "SampleID"
group_var <- "Treatment"
out_cut <- 0.05
zero_cut <- 0.90
lib_cut <- 0
neg_lb <- TRUE
prepro <- feature_table_pre_process(feature_table, t5.meta.PvL, sample_var, group_var,
out_cut, zero_cut, lib_cut, neg_lb)
# Preprocessed feature table
feature_table3 <- prepro$feature_table
# Preprocessed metadata
meta_data3 <- prepro$meta_data
# Structural zero info
struc_zero3 <- prepro$structure_zeros
# Run ANCOM
# name of the main variable of interest (character)
main_var <- "Treatment"
p_adj_method <- "BH" # number of taxa > 10, therefore use Benjamini-Hochberg correction
alpha <- 0.05
# character string representing the formula for adjustment
adj_formula <- NULL
# character string representing the formula for random effects in lme
rand_formula <- NULL
t_start <- Sys.time()
res3 <- ANCOM(feature_table3, meta_data3, struc_zero3, main_var, p_adj_method,
alpha, adj_formula, rand_formula)
t_end <- Sys.time()
t_end - t_start
# write output to file
# output contains the "W" statistic for each taxa - a count of the number of times
# the null hypothesis is rejected for each taxa
# detected_x are logicals indicating detection at specified FDR cut-off
write_csv(res3$out, "2021-07-25_WGS_T5_PvL_ANCOM_data.csv")
n_taxa <- ifelse(is.null(struc_zero3), nrow(feature_table3), sum(apply(struc_zero3, 1, sum) == 0))
res3$fig + scale_y_continuous(sec.axis = sec_axis(~ . * 100 / n_taxa, name = 'W proportion'))
ggsave(filename = paste(lubridate::today(),'volcano_WGS_T5_PvL.pdf',sep='_'), bg = 'transparent', device = 'pdf', dpi = 'retina')
# save features with W > 0
non.zero <- res3$fig$data %>%
arrange(desc(y), desc(abs(x))) %>%
mutate(taxa_id = str_split_fixed(res3$fig$data$taxa_id, pattern='s_', n=2)[,2], # remove leading 's_'
W.proportion = y/(n_taxa-1)) %>% # add W
filter(y > 0) %>%
rowid_to_column()
write.csv(non.zero, paste(lubridate::today(),'NonZeroW_Features_WGS_T5_PvL.csv',sep='_'))
# to find most significant taxa, I will sort the data
# 1) y (W statistic)
# 2) according to the absolute value of CLR mean difference
sig <- res3$fig$data %>%
mutate(taxa_id = str_split_fixed(res3$fig$data$taxa_id, pattern='s_', n=2)[,2]) %>% # remove leading 's_'
arrange(desc(y), desc(abs(x))) %>%
filter(y >= (0.7*n_taxa), !is.na(taxa_id)) # keep significant taxa, remove unidentified taxa
write.csv(sig, paste(lubridate::today(),'SigFeatures_WGS_T5_PvL.csv',sep='_'))
# plot top 20 taxa
sig %>%
slice_head(n=20) %>%
ggplot(aes(x, taxa_id)) +
geom_point(aes(size = 1)) +
theme_bw(base_size = 16) +
guides(size = FALSE) +
labs(x = 'CLR Mean Difference', y = NULL)
ggsave(filename = paste(lubridate::today(),'Top20_WGS_T5_PvL.pdf',sep='_'), bg = 'transparent', device = 'pdf', dpi = 'retina', width = 10)
```
### Let v Let-co-housed
```
# Data Preprocessing
feature_table <- t5.LvLCH
sample_var <- "SampleID"
group_var <- "Treatment"
out_cut <- 0.05
zero_cut <- 0.90
lib_cut <- 0
neg_lb <- TRUE
prepro <- feature_table_pre_process(feature_table, t5.meta.LvLCH, sample_var, group_var,
out_cut, zero_cut, lib_cut, neg_lb)
# Preprocessed feature table
feature_table4 <- prepro$feature_table
# Preprocessed metadata
meta_data4 <- prepro$meta_data
# Structural zero info
struc_zero4 <- prepro$structure_zeros
# Run ANCOM
# name of the main variable of interest (character)
main_var <- "Treatment"
p_adj_method <- "BH" # number of taxa > 10, therefore use Benjamini-Hochberg correction
alpha <- 0.05
# character string representing the formula for adjustment
adj_formula <- NULL
# character string representing the formula for random effects in lme
rand_formula <- NULL
t_start <- Sys.time()
res4 <- ANCOM(feature_table4, meta_data4, struc_zero4, main_var, p_adj_method,
alpha, adj_formula, rand_formula)
t_end <- Sys.time()
t_end - t_start
# write output to file
# output contains the "W" statistic for each taxa - a count of the number of times
# the null hypothesis is rejected for each taxa
# detected_x are logicals indicating detection at specified FDR cut-off
write_csv(res4$out, "2021-07-25_WGS_T5_LvLCH_ANCOM_data.csv")
n_taxa <- ifelse(is.null(struc_zero4), nrow(feature_table4), sum(apply(struc_zero4, 1, sum) == 0))
res4$fig + scale_y_continuous(sec.axis = sec_axis(~ . * 100 / n_taxa, name = 'W proportion'))
ggsave(filename = paste(lubridate::today(),'volcano_WGS_T5_LvLCH.pdf',sep='_'), bg = 'transparent', device = 'pdf', dpi = 'retina')
# save features with W > 0
non.zero <- res4$fig$data %>%
arrange(desc(y), desc(abs(x))) %>%
mutate(taxa_id = str_split_fixed(res4$fig$data$taxa_id, pattern='s_', n=2)[,2], # remove leading 's_'
W.proportion = y/(n_taxa-1)) %>% # add W
filter(y > 0) %>%
rowid_to_column()
write.csv(non.zero, paste(lubridate::today(),'NonZeroW_Features_WGS_T5_LvLCH.csv',sep='_'))
# to find most significant taxa, I will sort the data
# 1) y (W statistic)
# 2) according to the absolute value of CLR mean difference
sig <- res4$fig$data %>%
mutate(taxa_id = str_split_fixed(res4$fig$data$taxa_id, pattern='s_', n=2)[,2]) %>% # remove leading 's_'
arrange(desc(y), desc(abs(x))) %>%
filter(y >= (0.7*n_taxa), !is.na(taxa_id)) # keep significant taxa, remove unidentified taxa
write.csv(sig, paste(lubridate::today(),'SigFeatures_WGS_T5_LvLCH.csv',sep='_'))
# plot top 20 taxa
sig %>%
slice_head(n=20) %>%
ggplot(aes(x, taxa_id)) +
geom_point(aes(size = 1)) +
theme_bw(base_size = 16) +
guides(size = FALSE) +
labs(x = 'CLR Mean Difference', y = NULL)
ggsave(filename = paste(lubridate::today(),'Top20_WGS_T5_LvLCH.pdf',sep='_'), bg = 'transparent', device = 'pdf', dpi = 'retina', width=10)
```
| true |
code
| 0.758354 | null | null | null | null |
|
[Table of Contents](./table_of_contents.ipynb)
# The Extended Kalman Filter
```
from __future__ import division, print_function
%matplotlib inline
#format the book
import book_format
book_format.set_style()
```
We have developed the theory for the linear Kalman filter. Then, in the last two chapters we broached the topic of using Kalman filters for nonlinear problems. In this chapter we will learn the Extended Kalman filter (EKF). The EKF handles nonlinearity by linearizing the system at the point of the current estimate, and then the linear Kalman filter is used to filter this linearized system. It was one of the very first techniques used for nonlinear problems, and it remains the most common technique.
The EKF provides significant mathematical challenges to the designer of the filter; this is the most challenging chapter of the book. I do everything I can to avoid the EKF in favor of other techniques that have been developed to filter nonlinear problems. However, the topic is unavoidable; all classic papers and a majority of current papers in the field use the EKF. Even if you do not use the EKF in your own work you will need to be familiar with the topic to be able to read the literature.
## Linearizing the Kalman Filter
The Kalman filter uses linear equations, so it does not work with nonlinear problems. Problems can be nonlinear in two ways. First, the process model might be nonlinear. An object falling through the atmosphere encounters drag which reduces its acceleration. The drag coefficient varies based on the velocity the object. The resulting behavior is nonlinear - it cannot be modeled with linear equations. Second, the measurements could be nonlinear. For example, a radar gives a range and bearing to a target. We use trigonometry, which is nonlinear, to compute the position of the target.
For the linear filter we have these equations for the process and measurement models:
$$\begin{aligned}\dot{\mathbf x} &= \mathbf{Ax} + w_x\\
\mathbf z &= \mathbf{Hx} + w_z
\end{aligned}$$
Where $\mathbf A$ is the systems dynamic matrix. Using the state space methods covered in the **Kalman Filter Math** chapter these equations can be tranformed into
$$\begin{aligned}\bar{\mathbf x} &= \mathbf{Fx} \\
\mathbf z &= \mathbf{Hx}
\end{aligned}$$
where $\mathbf F$ is the *fundamental matrix*. The noise $w_x$ and $w_z$ terms are incorporated into the matrices $\mathbf R$ and $\mathbf Q$. This form of the equations allow us to compute the state at step $k$ given a measurement at step $k$ and the state estimate at step $k-1$. In earlier chapters I built your intuition and minimized the math by using problems describable with Newton's equations. We know how to design $\mathbf F$ based on high school physics.
For the nonlinear model the linear expression $\mathbf{Fx} + \mathbf{Bu}$ is replaced by a nonlinear function $f(\mathbf x, \mathbf u)$, and the linear expression $\mathbf{Hx}$ is replaced by a nonlinear function $h(\mathbf x)$:
$$\begin{aligned}\dot{\mathbf x} &= f(\mathbf x, \mathbf u) + w_x\\
\mathbf z &= h(\mathbf x) + w_z
\end{aligned}$$
You might imagine that we could proceed by finding a new set of Kalman filter equations that optimally solve these equations. But if you remember the charts in the **Nonlinear Filtering** chapter you'll recall that passing a Gaussian through a nonlinear function results in a probability distribution that is no longer Gaussian. So this will not work.
The EKF does not alter the Kalman filter's linear equations. Instead, it *linearizes* the nonlinear equations at the point of the current estimate, and uses this linearization in the linear Kalman filter.
*Linearize* means what it sounds like. We find a line that most closely matches the curve at a defined point. The graph below linearizes the parabola $f(x)=x^2-2x$ at $x=1.5$.
```
import kf_book.ekf_internal as ekf_internal
ekf_internal.show_linearization()
```
If the curve above is the process model, then the dotted lines shows the linearization of that curve for the estimate $x=1.5$.
We linearize systems by taking the derivative, which finds the slope of a curve:
$$\begin{aligned}
f(x) &= x^2 -2x \\
\frac{df}{dx} &= 2x - 2
\end{aligned}$$
and then evaluating it at $x$:
$$\begin{aligned}m &= f'(x=1.5) \\&= 2(1.5) - 2 \\&= 1\end{aligned}$$
Linearizing systems of differential equations is similar. We linearize $f(\mathbf x, \mathbf u)$, and $h(\mathbf x)$ by taking the partial derivatives of each to evaluate $\mathbf F$ and $\mathbf H$ at the point $\mathbf x_t$ and $\mathbf u_t$. We call the partial derivative of a matrix the [*Jacobian*](https://en.wikipedia.org/wiki/Jacobian_matrix_and_determinant). This gives us the the discrete state transition matrix and measurement model matrix:
$$
\begin{aligned}
\mathbf F
&= {\frac{\partial{f(\mathbf x_t, \mathbf u_t)}}{\partial{\mathbf x}}}\biggr|_{{\mathbf x_t},{\mathbf u_t}} \\
\mathbf H &= \frac{\partial{h(\bar{\mathbf x}_t)}}{\partial{\bar{\mathbf x}}}\biggr|_{\bar{\mathbf x}_t}
\end{aligned}
$$
This leads to the following equations for the EKF. I put boxes around the differences from the linear filter:
$$\begin{array}{l|l}
\text{linear Kalman filter} & \text{EKF} \\
\hline
& \boxed{\mathbf F = {\frac{\partial{f(\mathbf x_t, \mathbf u_t)}}{\partial{\mathbf x}}}\biggr|_{{\mathbf x_t},{\mathbf u_t}}} \\
\mathbf{\bar x} = \mathbf{Fx} + \mathbf{Bu} & \boxed{\mathbf{\bar x} = f(\mathbf x, \mathbf u)} \\
\mathbf{\bar P} = \mathbf{FPF}^\mathsf{T}+\mathbf Q & \mathbf{\bar P} = \mathbf{FPF}^\mathsf{T}+\mathbf Q \\
\hline
& \boxed{\mathbf H = \frac{\partial{h(\bar{\mathbf x}_t)}}{\partial{\bar{\mathbf x}}}\biggr|_{\bar{\mathbf x}_t}} \\
\textbf{y} = \mathbf z - \mathbf{H \bar{x}} & \textbf{y} = \mathbf z - \boxed{h(\bar{x})}\\
\mathbf{K} = \mathbf{\bar{P}H}^\mathsf{T} (\mathbf{H\bar{P}H}^\mathsf{T} + \mathbf R)^{-1} & \mathbf{K} = \mathbf{\bar{P}H}^\mathsf{T} (\mathbf{H\bar{P}H}^\mathsf{T} + \mathbf R)^{-1} \\
\mathbf x=\mathbf{\bar{x}} +\mathbf{K\textbf{y}} & \mathbf x=\mathbf{\bar{x}} +\mathbf{K\textbf{y}} \\
\mathbf P= (\mathbf{I}-\mathbf{KH})\mathbf{\bar{P}} & \mathbf P= (\mathbf{I}-\mathbf{KH})\mathbf{\bar{P}}
\end{array}$$
We don't normally use $\mathbf{Fx}$ to propagate the state for the EKF as the linearization causes inaccuracies. It is typical to compute $\bar{\mathbf x}$ using a suitable numerical integration technique such as Euler or Runge Kutta. Thus I wrote $\mathbf{\bar x} = f(\mathbf x, \mathbf u)$. For the same reasons we don't use $\mathbf{H\bar{x}}$ in the computation for the residual, opting for the more accurate $h(\bar{\mathbf x})$.
I think the easiest way to understand the EKF is to start off with an example. Later you may want to come back and reread this section.
## Example: Tracking a Airplane
This example tracks an airplane using ground based radar. We implemented a UKF for this problem in the last chapter. Now we will implement an EKF for the same problem so we can compare both the filter performance and the level of effort required to implement the filter.
Radars work by emitting a beam of radio waves and scanning for a return bounce. Anything in the beam's path will reflects some of the signal back to the radar. By timing how long it takes for the reflected signal to get back to the radar the system can compute the *slant distance* - the straight line distance from the radar installation to the object.
The relationship between the radar's slant range distance $r$ and elevation angle $\epsilon$ with the horizontal position $x$ and altitude $y$ of the aircraft is illustrated in the figure below:
```
ekf_internal.show_radar_chart()
```
This gives us the equalities:
$$\begin{aligned}
\epsilon &= \tan^{-1} \frac y x\\
r^2 &= x^2 + y^2
\end{aligned}$$
### Design the State Variables
We want to track the position of an aircraft assuming a constant velocity and altitude, and measurements of the slant distance to the aircraft. That means we need 3 state variables - horizontal distance, horizonal velocity, and altitude:
$$\mathbf x = \begin{bmatrix}\mathtt{distance} \\\mathtt{velocity}\\ \mathtt{altitude}\end{bmatrix}= \begin{bmatrix}x \\ \dot x\\ y\end{bmatrix}$$
### Design the Process Model
We assume a Newtonian, kinematic system for the aircraft. We've used this model in previous chapters, so by inspection you may recognize that we want
$$\mathbf F = \left[\begin{array}{cc|c} 1 & \Delta t & 0\\
0 & 1 & 0 \\ \hline
0 & 0 & 1\end{array}\right]$$
I've partioned the matrix into blocks to show the upper left block is a constant velocity model for $x$, and the lower right block is a constant position model for $y$.
However, let's practice finding these matrices. We model systems with a set of differential equations. We need an equation in the form
$$\dot{\mathbf x} = \mathbf{Ax} + \mathbf{w}$$
where $\mathbf{w}$ is the system noise.
The variables $x$ and $y$ are independent so we can compute them separately. The differential equations for motion in one dimension are:
$$\begin{aligned}v &= \dot x \\
a &= \ddot{x} = 0\end{aligned}$$
Now we put the differential equations into state-space form. If this was a second or greater order differential system we would have to first reduce them to an equivalent set of first degree equations. The equations are first order, so we put them in state space matrix form as
$$\begin{aligned}\begin{bmatrix}\dot x \\ \ddot{x}\end{bmatrix} &= \begin{bmatrix}0&1\\0&0\end{bmatrix} \begin{bmatrix}x \\
\dot x\end{bmatrix} \\ \dot{\mathbf x} &= \mathbf{Ax}\end{aligned}$$
where $\mathbf A=\begin{bmatrix}0&1\\0&0\end{bmatrix}$.
Recall that $\mathbf A$ is the *system dynamics matrix*. It describes a set of linear differential equations. From it we must compute the state transition matrix $\mathbf F$. $\mathbf F$ describes a discrete set of linear equations which compute $\mathbf x$ for a discrete time step $\Delta t$.
A common way to compute $\mathbf F$ is to use the power series expansion of the matrix exponential:
$$\mathbf F(\Delta t) = e^{\mathbf A\Delta t} = \mathbf{I} + \mathbf A\Delta t + \frac{(\mathbf A\Delta t)^2}{2!} + \frac{(\mathbf A \Delta t)^3}{3!} + ... $$
$\mathbf A^2 = \begin{bmatrix}0&0\\0&0\end{bmatrix}$, so all higher powers of $\mathbf A$ are also $\mathbf{0}$. Thus the power series expansion is:
$$
\begin{aligned}
\mathbf F &=\mathbf{I} + \mathbf At + \mathbf{0} \\
&= \begin{bmatrix}1&0\\0&1\end{bmatrix} + \begin{bmatrix}0&1\\0&0\end{bmatrix}\Delta t\\
\mathbf F &= \begin{bmatrix}1&\Delta t\\0&1\end{bmatrix}
\end{aligned}$$
This is the same result used by the kinematic equations! This exercise was unnecessary other than to illustrate finding the state transition matrix from linear differential equations. We will conclude the chapter with an example that will require the use of this technique.
### Design the Measurement Model
The measurement function takes the state estimate of the prior $\bar{\mathbf x}$ and turn it into a measurement of the slant range distance. We use the Pythagorean theorem to derive:
$$h(\bar{\mathbf x}) = \sqrt{x^2 + y^2}$$
The relationship between the slant distance and the position on the ground is nonlinear due to the square root. We linearize it by evaluating its partial derivative at $\mathbf x_t$:
$$
\mathbf H = \frac{\partial{h(\bar{\mathbf x})}}{\partial{\bar{\mathbf x}}}\biggr|_{\bar{\mathbf x}_t}
$$
The partial derivative of a matrix is called a Jacobian, and takes the form
$$\frac{\partial \mathbf H}{\partial \bar{\mathbf x}} =
\begin{bmatrix}
\frac{\partial h_1}{\partial x_1} & \frac{\partial h_1}{\partial x_2} &\dots \\
\frac{\partial h_2}{\partial x_1} & \frac{\partial h_2}{\partial x_2} &\dots \\
\vdots & \vdots
\end{bmatrix}
$$
In other words, each element in the matrix is the partial derivative of the function $h$ with respect to the $x$ variables. For our problem we have
$$\mathbf H = \begin{bmatrix}{\partial h}/{\partial x} & {\partial h}/{\partial \dot{x}} & {\partial h}/{\partial y}\end{bmatrix}$$
Solving each in turn:
$$\begin{aligned}
\frac{\partial h}{\partial x} &= \frac{\partial}{\partial x} \sqrt{x^2 + y^2} \\
&= \frac{x}{\sqrt{x^2 + y^2}}
\end{aligned}$$
and
$$\begin{aligned}
\frac{\partial h}{\partial \dot{x}} &=
\frac{\partial}{\partial \dot{x}} \sqrt{x^2 + y^2} \\
&= 0
\end{aligned}$$
and
$$\begin{aligned}
\frac{\partial h}{\partial y} &= \frac{\partial}{\partial y} \sqrt{x^2 + y^2} \\
&= \frac{y}{\sqrt{x^2 + y^2}}
\end{aligned}$$
giving us
$$\mathbf H =
\begin{bmatrix}
\frac{x}{\sqrt{x^2 + y^2}} &
0 &
&
\frac{y}{\sqrt{x^2 + y^2}}
\end{bmatrix}$$
This may seem daunting, so step back and recognize that all of this math is doing something very simple. We have an equation for the slant range to the airplane which is nonlinear. The Kalman filter only works with linear equations, so we need to find a linear equation that approximates $\mathbf H$. As we discussed above, finding the slope of a nonlinear equation at a given point is a good approximation. For the Kalman filter, the 'given point' is the state variable $\mathbf x$ so we need to take the derivative of the slant range with respect to $\mathbf x$. For the linear Kalman filter $\mathbf H$ was a constant that we computed prior to running the filter. For the EKF $\mathbf H$ is updated at each step as the evaluation point $\bar{\mathbf x}$ changes at each epoch.
To make this more concrete, let's now write a Python function that computes the Jacobian of $h$ for this problem.
```
from math import sqrt
def HJacobian_at(x):
""" compute Jacobian of H matrix at x """
horiz_dist = x[0]
altitude = x[2]
denom = sqrt(horiz_dist**2 + altitude**2)
return array ([[horiz_dist/denom, 0., altitude/denom]])
```
Finally, let's provide the code for $h(\bar{\mathbf x})$:
```
def hx(x):
""" compute measurement for slant range that
would correspond to state x.
"""
return (x[0]**2 + x[2]**2) ** 0.5
```
Now let's write a simulation for our radar.
```
from numpy.random import randn
import math
class RadarSim:
""" Simulates the radar signal returns from an object
flying at a constant altityude and velocity in 1D.
"""
def __init__(self, dt, pos, vel, alt):
self.pos = pos
self.vel = vel
self.alt = alt
self.dt = dt
def get_range(self):
""" Returns slant range to the object. Call once
for each new measurement at dt time from last call.
"""
# add some process noise to the system
self.vel = self.vel + .1*randn()
self.alt = self.alt + .1*randn()
self.pos = self.pos + self.vel*self.dt
# add measurement noise
err = self.pos * 0.05*randn()
slant_dist = math.sqrt(self.pos**2 + self.alt**2)
return slant_dist + err
```
### Design Process and Measurement Noise
The radar measures the range to a target. We will use $\sigma_{range}= 5$ meters for the noise. This gives us
$$\mathbf R = \begin{bmatrix}\sigma_{range}^2\end{bmatrix} = \begin{bmatrix}25\end{bmatrix}$$
The design of $\mathbf Q$ requires some discussion. The state $\mathbf x= \begin{bmatrix}x & \dot x & y\end{bmatrix}^\mathtt{T}$. The first two elements are position (down range distance) and velocity, so we can use `Q_discrete_white_noise` noise to compute the values for the upper left hand side of $\mathbf Q$. The third element of $\mathbf x$ is altitude, which we are assuming is independent of the down range distance. That leads us to a block design of $\mathbf Q$ of:
$$\mathbf Q = \begin{bmatrix}\mathbf Q_\mathtt{x} & 0 \\ 0 & \mathbf Q_\mathtt{y}\end{bmatrix}$$
### Implementation
`FilterPy` provides the class `ExtendedKalmanFilter`. It works similarly to the `KalmanFilter` class we have been using, except that it allows you to provide a function that computes the Jacobian of $\mathbf H$ and the function $h(\mathbf x)$.
We start by importing the filter and creating it. The dimension of `x` is 3 and `z` has dimension 1.
```python
from filterpy.kalman import ExtendedKalmanFilter
rk = ExtendedKalmanFilter(dim_x=3, dim_z=1)
```
We create the radar simulator:
```python
radar = RadarSim(dt, pos=0., vel=100., alt=1000.)
```
We will initialize the filter near the airplane's actual position:
```python
rk.x = array([radar.pos, radar.vel-10, radar.alt+100])
```
We assign the system matrix using the first term of the Taylor series expansion we computed above:
```python
dt = 0.05
rk.F = eye(3) + array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])*dt
```
After assigning reasonable values to $\mathbf R$, $\mathbf Q$, and $\mathbf P$ we can run the filter with a simple loop. We pass the functions for computing the Jacobian of $\mathbf H$ and $h(x)$ into the `update` method.
```python
for i in range(int(20/dt)):
z = radar.get_range()
rk.update(array([z]), HJacobian_at, hx)
rk.predict()
```
Adding some boilerplate code to save and plot the results we get:
```
from filterpy.common import Q_discrete_white_noise
from filterpy.kalman import ExtendedKalmanFilter
from numpy import eye, array, asarray
import numpy as np
dt = 0.05
rk = ExtendedKalmanFilter(dim_x=3, dim_z=1)
radar = RadarSim(dt, pos=0., vel=100., alt=1000.)
# make an imperfect starting guess
rk.x = array([radar.pos-100, radar.vel+100, radar.alt+1000])
rk.F = eye(3) + array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]) * dt
range_std = 5. # meters
rk.R = np.diag([range_std**2])
rk.Q[0:2, 0:2] = Q_discrete_white_noise(2, dt=dt, var=0.1)
rk.Q[2,2] = 0.1
rk.P *= 50
xs, track = [], []
for i in range(int(20/dt)):
z = radar.get_range()
track.append((radar.pos, radar.vel, radar.alt))
rk.update(array([z]), HJacobian_at, hx)
xs.append(rk.x)
rk.predict()
xs = asarray(xs)
track = asarray(track)
time = np.arange(0, len(xs)*dt, dt)
ekf_internal.plot_radar(xs, track, time)
```
## Using SymPy to compute Jacobians
Depending on your experience with derivatives you may have found the computation of the Jacobian difficult. Even if you found it easy, a slightly more difficult problem easily leads to very difficult computations.
As explained in Appendix A, we can use the SymPy package to compute the Jacobian for us.
```
import sympy
from IPython.display import display
sympy.init_printing(use_latex='mathjax')
x, x_vel, y = sympy.symbols('x, x_vel y')
H = sympy.Matrix([sympy.sqrt(x**2 + y**2)])
state = sympy.Matrix([x, x_vel, y])
J = H.jacobian(state)
display(state)
display(J)
```
This result is the same as the result we computed above, and with much less effort on our part!
## Robot Localization
It's time to try a real problem. I warn you that this section is difficult. However, most books choose simple, textbook problems with simple answers, and you are left wondering how to solve a real world problem.
We will consider the problem of robot localization. We already implemented this in the **Unscented Kalman Filter** chapter, and I recommend you read it now if you haven't already. In this scenario we have a robot that is moving through a landscape using a sensor to detect landmarks. This could be a self driving car using computer vision to identify trees, buildings, and other landmarks. It might be one of those small robots that vacuum your house, or a robot in a warehouse.
The robot has 4 wheels in the same configuration used by automobiles. It maneuvers by pivoting the front wheels. This causes the robot to pivot around the rear axle while moving forward. This is nonlinear behavior which we will have to model.
The robot has a sensor that measures the range and bearing to known targets in the landscape. This is nonlinear because computing a position from a range and bearing requires square roots and trigonometry.
Both the process model and measurement models are nonlinear. The EKF accommodates both, so we provisionally conclude that the EKF is a viable choice for this problem.
### Robot Motion Model
At a first approximation an automobile steers by pivoting the front tires while moving forward. The front of the car moves in the direction that the wheels are pointing while pivoting around the rear tires. This simple description is complicated by issues such as slippage due to friction, the differing behavior of the rubber tires at different speeds, and the need for the outside tire to travel a different radius than the inner tire. Accurately modeling steering requires a complicated set of differential equations.
For lower speed robotic applications a simpler *bicycle model* has been found to perform well. This is a depiction of the model:
```
ekf_internal.plot_bicycle()
```
In the **Unscented Kalman Filter** chapter we derived these equations:
$$\begin{aligned}
\beta &= \frac d w \tan(\alpha) \\
x &= x - R\sin(\theta) + R\sin(\theta + \beta) \\
y &= y + R\cos(\theta) - R\cos(\theta + \beta) \\
\theta &= \theta + \beta
\end{aligned}
$$
where $\theta$ is the robot's heading.
You do not need to understand this model in detail if you are not interested in steering models. The important thing to recognize is that our motion model is nonlinear, and we will need to deal with that with our Kalman filter.
### Design the State Variables
For our filter we will maintain the position $x,y$ and orientation $\theta$ of the robot:
$$\mathbf x = \begin{bmatrix}x \\ y \\ \theta\end{bmatrix}$$
Our control input $\mathbf u$ is the velocity $v$ and steering angle $\alpha$:
$$\mathbf u = \begin{bmatrix}v \\ \alpha\end{bmatrix}$$
### Design the System Model
We model our system as a nonlinear motion model plus noise.
$$\bar x = f(x, u) + \mathcal{N}(0, Q)$$
Using the motion model for a robot that we created above, we can expand this to
$$\bar{\begin{bmatrix}x\\y\\\theta\end{bmatrix}} = \begin{bmatrix}x\\y\\\theta\end{bmatrix} +
\begin{bmatrix}- R\sin(\theta) + R\sin(\theta + \beta) \\
R\cos(\theta) - R\cos(\theta + \beta) \\
\beta\end{bmatrix}$$
We find The $\mathbf F$ by taking the Jacobian of $f(x,u)$.
$$\mathbf F = \frac{\partial f(x, u)}{\partial x} =\begin{bmatrix}
\frac{\partial f_1}{\partial x} &
\frac{\partial f_1}{\partial y} &
\frac{\partial f_1}{\partial \theta}\\
\frac{\partial f_2}{\partial x} &
\frac{\partial f_2}{\partial y} &
\frac{\partial f_2}{\partial \theta} \\
\frac{\partial f_3}{\partial x} &
\frac{\partial f_3}{\partial y} &
\frac{\partial f_3}{\partial \theta}
\end{bmatrix}
$$
When we calculate these we get
$$\mathbf F = \begin{bmatrix}
1 & 0 & -R\cos(\theta) + R\cos(\theta+\beta) \\
0 & 1 & -R\sin(\theta) + R\sin(\theta+\beta) \\
0 & 0 & 1
\end{bmatrix}$$
We can double check our work with SymPy.
```
import sympy
from sympy.abc import alpha, x, y, v, w, R, theta
from sympy import symbols, Matrix
sympy.init_printing(use_latex="mathjax", fontsize='16pt')
time = symbols('t')
d = v*time
beta = (d/w)*sympy.tan(alpha)
r = w/sympy.tan(alpha)
fxu = Matrix([[x-r*sympy.sin(theta) + r*sympy.sin(theta+beta)],
[y+r*sympy.cos(theta)- r*sympy.cos(theta+beta)],
[theta+beta]])
F = fxu.jacobian(Matrix([x, y, theta]))
F
```
That looks a bit complicated. We can use SymPy to substitute terms:
```
# reduce common expressions
B, R = symbols('beta, R')
F = F.subs((d/w)*sympy.tan(alpha), B)
F.subs(w/sympy.tan(alpha), R)
```
This form verifies that the computation of the Jacobian is correct.
Now we can turn our attention to the noise. Here, the noise is in our control input, so it is in *control space*. In other words, we command a specific velocity and steering angle, but we need to convert that into errors in $x, y, \theta$. In a real system this might vary depending on velocity, so it will need to be recomputed for every prediction. I will choose this as the noise model; for a real robot you will need to choose a model that accurately depicts the error in your system.
$$\mathbf{M} = \begin{bmatrix}\sigma_{vel}^2 & 0 \\ 0 & \sigma_\alpha^2\end{bmatrix}$$
If this was a linear problem we would convert from control space to state space using the by now familiar $\mathbf{FMF}^\mathsf T$ form. Since our motion model is nonlinear we do not try to find a closed form solution to this, but instead linearize it with a Jacobian which we will name $\mathbf{V}$.
$$\mathbf{V} = \frac{\partial f(x, u)}{\partial u} \begin{bmatrix}
\frac{\partial f_1}{\partial v} & \frac{\partial f_1}{\partial \alpha} \\
\frac{\partial f_2}{\partial v} & \frac{\partial f_2}{\partial \alpha} \\
\frac{\partial f_3}{\partial v} & \frac{\partial f_3}{\partial \alpha}
\end{bmatrix}$$
These partial derivatives become very difficult to work with. Let's compute them with SymPy.
```
V = fxu.jacobian(Matrix([v, alpha]))
V = V.subs(sympy.tan(alpha)/w, 1/R)
V = V.subs(time*v/R, B)
V = V.subs(time*v, 'd')
V
```
This should give you an appreciation of how quickly the EKF become mathematically intractable.
This gives us the final form of our prediction equations:
$$\begin{aligned}
\mathbf{\bar x} &= \mathbf x +
\begin{bmatrix}- R\sin(\theta) + R\sin(\theta + \beta) \\
R\cos(\theta) - R\cos(\theta + \beta) \\
\beta\end{bmatrix}\\
\mathbf{\bar P} &=\mathbf{FPF}^{\mathsf T} + \mathbf{VMV}^{\mathsf T}
\end{aligned}$$
This form of linearization is not the only way to predict $\mathbf x$. For example, we could use a numerical integration technique such as *Runge Kutta* to compute the movement
of the robot. This will be required if the time step is relatively large. Things are not as cut and dried with the EKF as for the Kalman filter. For a real problem you have to carefully model your system with differential equations and then determine the most appropriate way to solve that system. The correct approach depends on the accuracy you require, how nonlinear the equations are, your processor budget, and numerical stability concerns.
### Design the Measurement Model
The robot's sensor provides a noisy bearing and range measurement to multiple known locations in the landscape. The measurement model must convert the state $\begin{bmatrix}x & y&\theta\end{bmatrix}^\mathsf T$ into a range and bearing to the landmark. If $\mathbf p$
is the position of a landmark, the range $r$ is
$$r = \sqrt{(p_x - x)^2 + (p_y - y)^2}$$
The sensor provides bearing relative to the orientation of the robot, so we must subtract the robot's orientation from the bearing to get the sensor reading, like so:
$$\phi = \arctan(\frac{p_y - y}{p_x - x}) - \theta$$
Thus our measurement model $h$ is
$$\begin{aligned}
\mathbf z& = h(\bar{\mathbf x}, \mathbf p) &+ \mathcal{N}(0, R)\\
&= \begin{bmatrix}
\sqrt{(p_x - x)^2 + (p_y - y)^2} \\
\arctan(\frac{p_y - y}{p_x - x}) - \theta
\end{bmatrix} &+ \mathcal{N}(0, R)
\end{aligned}$$
This is clearly nonlinear, so we need linearize $h$ at $\mathbf x$ by taking its Jacobian. We compute that with SymPy below.
```
px, py = symbols('p_x, p_y')
z = Matrix([[sympy.sqrt((px-x)**2 + (py-y)**2)],
[sympy.atan2(py-y, px-x) - theta]])
z.jacobian(Matrix([x, y, theta]))
```
Now we need to write that as a Python function. For example we might write:
```
from math import sqrt
def H_of(x, landmark_pos):
""" compute Jacobian of H matrix where h(x) computes
the range and bearing to a landmark for state x """
px = landmark_pos[0]
py = landmark_pos[1]
hyp = (px - x[0, 0])**2 + (py - x[1, 0])**2
dist = sqrt(hyp)
H = array(
[[-(px - x[0, 0]) / dist, -(py - x[1, 0]) / dist, 0],
[ (py - x[1, 0]) / hyp, -(px - x[0, 0]) / hyp, -1]])
return H
```
We also need to define a function that converts the system state into a measurement.
```
from math import atan2
def Hx(x, landmark_pos):
""" takes a state variable and returns the measurement
that would correspond to that state.
"""
px = landmark_pos[0]
py = landmark_pos[1]
dist = sqrt((px - x[0, 0])**2 + (py - x[1, 0])**2)
Hx = array([[dist],
[atan2(py - x[1, 0], px - x[0, 0]) - x[2, 0]]])
return Hx
```
### Design Measurement Noise
It is reasonable to assume that the noise of the range and bearing measurements are independent, hence
$$\mathbf R=\begin{bmatrix}\sigma_{range}^2 & 0 \\ 0 & \sigma_{bearing}^2\end{bmatrix}$$
### Implementation
We will use `FilterPy`'s `ExtendedKalmanFilter` class to implement the filter. Its `predict()` method uses the standard linear equations for the process model. Ours is nonlinear, so we will have to override `predict()` with our own implementation. I'll want to also use this class to simulate the robot, so I'll add a method `move()` that computes the position of the robot which both `predict()` and my simulation can call.
The matrices for the prediction step are quite large. While writing this code I made several errors before I finally got it working. I only found my errors by using SymPy's `evalf` function. `evalf` evaluates a SymPy `Matrix` with specific values for the variables. I decided to demonstrate this technique to you, and used `evalf` in the Kalman filter code. You'll need to understand a couple of points.
First, `evalf` uses a dictionary to specify the values. For example, if your matrix contains an `x` and `y`, you can write
```python
M.evalf(subs={x:3, y:17})
```
to evaluate the matrix for `x=3` and `y=17`.
Second, `evalf` returns a `sympy.Matrix` object. Use `numpy.array(M).astype(float)` to convert it to a NumPy array. `numpy.array(M)` creates an array of type `object`, which is not what you want.
Here is the code for the EKF:
```
from filterpy.kalman import ExtendedKalmanFilter as EKF
from numpy import array, sqrt
class RobotEKF(EKF):
def __init__(self, dt, wheelbase, std_vel, std_steer):
EKF.__init__(self, 3, 2, 2)
self.dt = dt
self.wheelbase = wheelbase
self.std_vel = std_vel
self.std_steer = std_steer
a, x, y, v, w, theta, time = symbols(
'a, x, y, v, w, theta, t')
d = v*time
beta = (d/w)*sympy.tan(a)
r = w/sympy.tan(a)
self.fxu = Matrix(
[[x-r*sympy.sin(theta)+r*sympy.sin(theta+beta)],
[y+r*sympy.cos(theta)-r*sympy.cos(theta+beta)],
[theta+beta]])
self.F_j = self.fxu.jacobian(Matrix([x, y, theta]))
self.V_j = self.fxu.jacobian(Matrix([v, a]))
# save dictionary and it's variables for later use
self.subs = {x: 0, y: 0, v:0, a:0,
time:dt, w:wheelbase, theta:0}
self.x_x, self.x_y, = x, y
self.v, self.a, self.theta = v, a, theta
def predict(self, u):
self.x = self.move(self.x, u, self.dt)
self.subs[self.theta] = self.x[2, 0]
self.subs[self.v] = u[0]
self.subs[self.a] = u[1]
F = array(self.F_j.evalf(subs=self.subs)).astype(float)
V = array(self.V_j.evalf(subs=self.subs)).astype(float)
# covariance of motion noise in control space
M = array([[self.std_vel*u[0]**2, 0],
[0, self.std_steer**2]])
self.P = np.dot(F, self.P).dot(F.T) + np.dot(V, M).dot(V.T)
def move(self, x, u, dt):
hdg = x[2, 0]
vel = u[0]
steering_angle = u[1]
dist = vel * dt
if abs(steering_angle) > 0.001: # is robot turning?
beta = (dist / self.wheelbase) * tan(steering_angle)
r = self.wheelbase / tan(steering_angle) # radius
dx = np.array([[-r*sin(hdg) + r*sin(hdg + beta)],
[r*cos(hdg) - r*cos(hdg + beta)],
[beta]])
else: # moving in straight line
dx = np.array([[dist*cos(hdg)],
[dist*sin(hdg)],
[0]])
return x + dx
```
Now we have another issue to handle. The residual is notionally computed as $y = z - h(x)$ but this will not work because our measurement contains an angle in it. Suppose z has a bearing of $1^\circ$ and $h(x)$ has a bearing of $359^\circ$. Naively subtracting them would yield a angular difference of $-358^\circ$, whereas the correct value is $2^\circ$. We have to write code to correctly compute the bearing residual.
```
def residual(a, b):
""" compute residual (a-b) between measurements containing
[range, bearing]. Bearing is normalized to [-pi, pi)"""
y = a - b
y[1] = y[1] % (2 * np.pi) # force in range [0, 2 pi)
if y[1] > np.pi: # move to [-pi, pi)
y[1] -= 2 * np.pi
return y
```
The rest of the code runs the simulation and plots the results, and shouldn't need too much comment by now. I create a variable `landmarks` that contains the landmark coordinates. I update the simulated robot position 10 times a second, but run the EKF only once per second. This is for two reasons. First, we are not using Runge Kutta to integrate the differental equations of motion, so a narrow time step allows our simulation to be more accurate. Second, it is fairly normal in embedded systems to have limited processing speed. This forces you to run your Kalman filter only as frequently as absolutely needed.
```
from filterpy.stats import plot_covariance_ellipse
from math import sqrt, tan, cos, sin, atan2
import matplotlib.pyplot as plt
dt = 1.0
def z_landmark(lmark, sim_pos, std_rng, std_brg):
x, y = sim_pos[0, 0], sim_pos[1, 0]
d = np.sqrt((lmark[0] - x)**2 + (lmark[1] - y)**2)
a = atan2(lmark[1] - y, lmark[0] - x) - sim_pos[2, 0]
z = np.array([[d + randn()*std_rng],
[a + randn()*std_brg]])
return z
def ekf_update(ekf, z, landmark):
ekf.update(z, HJacobian=H_of, Hx=Hx,
residual=residual,
args=(landmark), hx_args=(landmark))
def run_localization(landmarks, std_vel, std_steer,
std_range, std_bearing,
step=10, ellipse_step=20, ylim=None):
ekf = RobotEKF(dt, wheelbase=0.5, std_vel=std_vel,
std_steer=std_steer)
ekf.x = array([[2, 6, .3]]).T # x, y, steer angle
ekf.P = np.diag([.1, .1, .1])
ekf.R = np.diag([std_range**2, std_bearing**2])
sim_pos = ekf.x.copy() # simulated position
# steering command (vel, steering angle radians)
u = array([1.1, .01])
plt.figure()
plt.scatter(landmarks[:, 0], landmarks[:, 1],
marker='s', s=60)
track = []
for i in range(200):
sim_pos = ekf.move(sim_pos, u, dt/10.) # simulate robot
track.append(sim_pos)
if i % step == 0:
ekf.predict(u=u)
if i % ellipse_step == 0:
plot_covariance_ellipse(
(ekf.x[0,0], ekf.x[1,0]), ekf.P[0:2, 0:2],
std=6, facecolor='k', alpha=0.3)
x, y = sim_pos[0, 0], sim_pos[1, 0]
for lmark in landmarks:
z = z_landmark(lmark, sim_pos,
std_range, std_bearing)
ekf_update(ekf, z, lmark)
if i % ellipse_step == 0:
plot_covariance_ellipse(
(ekf.x[0,0], ekf.x[1,0]), ekf.P[0:2, 0:2],
std=6, facecolor='g', alpha=0.8)
track = np.array(track)
plt.plot(track[:, 0], track[:,1], color='k', lw=2)
plt.axis('equal')
plt.title("EKF Robot localization")
if ylim is not None: plt.ylim(*ylim)
plt.show()
return ekf
landmarks = array([[5, 10], [10, 5], [15, 15]])
ekf = run_localization(
landmarks, std_vel=0.1, std_steer=np.radians(1),
std_range=0.3, std_bearing=0.1)
print('Final P:', ekf.P.diagonal())
```
I have plotted the landmarks as solid squares. The path of the robot is drawn with a black line. The covariance ellipses for the predict step are light gray, and the covariances of the update are shown in green. To make them visible at this scale I have set the ellipse boundary at 6$\sigma$.
We can see that there is a lot of uncertainty added by our motion model, and that most of the error in in the direction of motion. We determine that from the shape of the blue ellipses. After a few steps we can see that the filter incorporates the landmark measurements and the errors improve.
I used the same initial conditions and landmark locations in the UKF chapter. The UKF achieves much better accuracy in terms of the error ellipse. Both perform roughly as well as far as their estimate for $\mathbf x$ is concerned.
Now let's add another landmark.
```
landmarks = array([[5, 10], [10, 5], [15, 15], [20, 5]])
ekf = run_localization(
landmarks, std_vel=0.1, std_steer=np.radians(1),
std_range=0.3, std_bearing=0.1)
plt.show()
print('Final P:', ekf.P.diagonal())
```
The uncertainly in the estimates near the end of the track are smaller. We can see the effect that multiple landmarks have on our uncertainty by only using the first two landmarks.
```
ekf = run_localization(
landmarks[0:2], std_vel=1.e-10, std_steer=1.e-10,
std_range=1.4, std_bearing=.05)
print('Final P:', ekf.P.diagonal())
```
The estimate quickly diverges from the robot's path after passing the landmarks. The covariance also grows quickly. Let's see what happens with only one landmark:
```
ekf = run_localization(
landmarks[0:1], std_vel=1.e-10, std_steer=1.e-10,
std_range=1.4, std_bearing=.05)
print('Final P:', ekf.P.diagonal())
```
As you probably suspected, one landmark produces a very bad result. Conversely, a large number of landmarks allows us to make very accurate estimates.
```
landmarks = array([[5, 10], [10, 5], [15, 15], [20, 5], [15, 10],
[10,14], [23, 14], [25, 20], [10, 20]])
ekf = run_localization(
landmarks, std_vel=0.1, std_steer=np.radians(1),
std_range=0.3, std_bearing=0.1, ylim=(0, 21))
print('Final P:', ekf.P.diagonal())
```
### Discussion
I said that this was a real problem, and in some ways it is. I've seen alternative presentations that used robot motion models that led to simpler Jacobians. On the other hand, my model of the movement is also simplistic in several ways. First, it uses a bicycle model. A real car has two sets of tires, and each travels on a different radius. The wheels do not grip the surface perfectly. I also assumed that the robot responds instantaneously to the control input. Sebastian Thrun writes in *Probabilistic Robots* that this simplified model is justified because the filters perform well when used to track real vehicles. The lesson here is that while you have to have a reasonably accurate nonlinear model, it does not need to be perfect to operate well. As a designer you will need to balance the fidelity of your model with the difficulty of the math and the CPU time required to perform the linear algebra.
Another way in which this problem was simplistic is that we assumed that we knew the correspondance between the landmarks and measurements. But suppose we are using radar - how would we know that a specific signal return corresponded to a specific building in the local scene? This question hints at SLAM algorithms - simultaneous localization and mapping. SLAM is not the point of this book, so I will not elaborate on this topic.
## UKF vs EKF
In the last chapter I used the UKF to solve this problem. The difference in implementation should be very clear. Computing the Jacobians for the state and measurement models was not trivial despite a rudimentary motion model. A different problem could result in a Jacobian which is difficult or impossible to derive analytically. In contrast, the UKF only requires you to provide a function that computes the system motion model and another for the measurement model.
There are many cases where the Jacobian cannot be found analytically. The details are beyond the scope of this book, but you will have to use numerical methods to compute the Jacobian. That undertaking is not trivial, and you will spend a significant portion of a master's degree at a STEM school learning techniques to handle such situations. Even then you'll likely only be able to solve problems related to your field - an aeronautical engineer learns a lot about Navier Stokes equations, but not much about modelling chemical reaction rates.
So, UKFs are easy. Are they accurate? In practice they often perform better than the EKF. You can find plenty of research papers that prove that the UKF outperforms the EKF in various problem domains. It's not hard to understand why this would be true. The EKF works by linearizing the system model and measurement model at a single point, and the UKF uses $2n+1$ points.
Let's look at a specific example. Take $f(x) = x^3$ and pass a Gaussian distribution through it. I will compute an accurate answer using a monte carlo simulation. I generate 50,000 points randomly distributed according to the Gaussian, pass each through $f(x)$, then compute the mean and variance of the result.
The EKF linearizes the function by taking the derivative to find the slope at the evaluation point $x$. This slope becomes the linear function that we use to transform the Gaussian. Here is a plot of that.
```
import kf_book.nonlinear_plots as nonlinear_plots
nonlinear_plots.plot_ekf_vs_mc()
```
The EKF computation is rather inaccurate. In contrast, here is the performance of the UKF:
```
nonlinear_plots.plot_ukf_vs_mc(alpha=0.001, beta=3., kappa=1.)
```
Here we can see that the computation of the UKF's mean is accurate to 2 decimal places. The standard deviation is slightly off, but you can also fine tune how the UKF computes the distribution by using the $\alpha$, $\beta$, and $\gamma$ parameters for generating the sigma points. Here I used $\alpha=0.001$, $\beta=3$, and $\gamma=1$. Feel free to modify them to see the result. You should be able to get better results than I did. However, avoid over-tuning the UKF for a specific test. It may perform better for your test case, but worse in general.
| true |
code
| 0.664921 | null | null | null | null |
|
Good morning! You have completed the math trail on car plate numbers in a somewhat (semi-)automated way.
Can you actually solve the same tasks with code? Read on and you will be amazed how empowering programming can be to help make mathematics learning more efficient and productive! :)
# Task
Given the incomplete car plate number `SLA9??2H`
Find the missing ?? numbers.
A valid Singapore car plate number typically starts with 3 letters, followed by 4 digits and ending with a 'check' letter.
For example, for the valid car plate number is 'SDG6136T',
- The first letter is 'S' for Singapore.
- The next two letters and the digits are used to compute the check letter, using the following steps:
- Ignoring the first letter 'S', the letters are converted to their positions in the alphabet. For example, 'D' is 4, 'G' is 7 and 'M' is 13.
- The converted letters and the digits form a sequence of 6 numbers. For example, 'DG6136' will give (4, 7, 6, 1, 3, 6).
- The sequence of 6 numbers is multiplied term by term by the sequence of 6 weights (9, 4, 5, 4, 3, 2) respectively, summed up and then divided by 19 to obtain the remainder.
- For example, '476136' will give 4x9 + 7x4 + 6x5 + 1x4 + 3x3 + 6x2 = 119, and this leaves a remainder of 5 after dividing by 19.
- The 'check' letter is obtained by referring to the following table. Thus the check letter corresponding to remainder 5 is T.
```
| Remainder | 'check' letter | Remainder | 'check' letter | Remainder | 'check' letter |
| 0 | A | 7 | R | 13 | H |
| 1 | Z | 8 | P | 14 | G |
| 2 | Y | 9 | M | 15 | E |
| 3 | X | 10 | L | 16 | D |
| 4 | U | 11 | K | 17 | C |
| 5 | T | 12 | J | 18 | B |
| 6 | S | | | | |
```
Reference: https://sgwiki.com/wiki/Vehicle_Checksum_Formula
Pseudocode
```
FOR i = 0 to 99
Car_Plate = 'SJT9' + str(i) + '2H'
IF Check_Letter(Car_Plate) is True
print (Car_Plate) on screen
ENDIF
NEXT
```
```
# we need to store the mapping from A to 1, B to 2, etc.
# for the letters part of the car plate number
# a dictionary is good for this purpose
letter_map = {}
for i in range(27): # 26 alphabets
char = chr(ord('A') + i)
letter_map[char] = i + 1
#print(letter_map) # this will output {'A':1, 'B':2, 'C':3, ..., 'Z':26}
# we also need to store the mapping from remainders to the check letter
# and we can also use a dictionary! :)
check_map = {0:'A', 1:'Z', 2:'Y', 3:'X', 4:'U', 5:'T', 6:'S', 7:'R', 8:'P', \
9:'M', 10:'L', 11:'K', 12:'J', 13:'H', 14:'G', 15:'E', 16:'D', \
17:'C', 18:'B'}
# we define a reusable Boolean function to generate the check letter and
# check if it matches the last letter of the car plate number
def check_letter(car_plate):
weights = [9, 4, 5, 4, 3, 2]
total = 0
for i in range(len(car_plate)-1):
if i < 2: # letters
num = letter_map[car_plate[i]]
else: # digits
num = int(car_plate[i])
total += num * weights[i]
remainder = total % 19
return check_map[remainder] == car_plate[-1]
#main
car_plate = 'DG6136T' # you can use this to verify the given example
if check_letter(car_plate):
print('S' + car_plate, car_plate[3:5])
print()
for i in range(100): # this loop repeats 100 times for you! :)
car_plate = 'LA9' + str(i).zfill(2) + '2H' # 'LA9002H', 'LA9012H', ...
if check_letter(car_plate):
print('S' + car_plate, car_plate[3:5])
#main
for i in range(100):
car_plate = 'LA' + str(i).zfill(2) + '68Y'
if check_letter(car_plate):
print('S' + car_plate, car_plate[2:4])
'0'.zfill(2)
```
# Challenge
- How many car_plate numbers start with SMV and end with D?
```
#main
count = 0
for i in range(10000):
car_plate = 'MV' + str(i).zfill(4) + 'D'
if check_letter(car_plate):
count += 1
print(count)
#main
wanted = []
for i in range(10000):
car_plate = 'MV' + str(i).zfill(4) + 'D'
if check_letter(car_plate):
print('S' + car_plate, end=' ')
wanted.append('S' + car_plate)
print(len(wanted))
```
# More challenges!
Suggest one or more variations of problems you can solve with car plate numbers using the power of Python programming. Some ideas include:
* Check if a given car plate number is valid
* Which valid car plate numbers have a special property (eg prime number, contains at least two '8' digits, does not contain the lucky number 13, etc.)
* If there are the same number of available car plate numbers each series (eg SMV and SMW)
* (your idea here)
Submit a pull request with your ideas and/or code to contribute to learning Mathematics using programming to benefit the world! :)
```
```
# This is really more than car plate numbers!
You have just learned an application of mathematics called modulus arithmetic in generating check letters/digits. Do you know that actually the following are also applications of modulus arithmetic?
* Singapore NRIC numbers (http://www.ngiam.net/NRIC/NRIC_numbers.ppt)
* international ISBNs (https://en.wikipedia.org/wiki/International_Standard_Book_Number)
* credit card numbers (https://en.wikipedia.org/wiki/Luhn_algorithm)
* universal product codes (https://en.wikipedia.org/wiki/Universal_Product_Code)
Can you research on other applications modulus arithmetic has? Better still, contribute by submitting Python code to unleash the power of automation!
You can submit a pull request by doing one of the following:
- suggesting a new application for modulus arithmetic
- creating a new .py file
- uploading an existing .py file
We look forward to your pull requests! :)
| true |
code
| 0.598723 | null | null | null | null |
|
```
from dgpsi import dgp, kernel, combine, lgp, path, emulator, Poisson, Hetero, NegBin
import numpy as np
import matplotlib.pyplot as plt
```
# Example 1 on heteroskedastic Gaussian likelihood
```
n=12
X=np.linspace(0,1,n)[:,None]
#Create some replications of input positions so that each input position will six different outputs. Note that SI has linear complexity with number of replications.
for i in range(5):
X=np.concatenate((X,np.linspace(0,1,n)[:,None]),axis=0)
f1= lambda x: -1. if x<0.5 else 1. #True mean function, which is a step function
f2= lambda x: np.exp(1.5*np.sin((x-0.3)*7.)-6.5) #True variance function, which has higher values around 0.5 but low values around boundaries
Y=np.array([np.random.normal(f1(x),np.sqrt(f2(x))) for x in X]) #Generate stochastic outputs according to f1 and f2
z=np.linspace(0,1.,200)[:,None]
Yz=np.array([f1(x) for x in z]).flatten()
plt.plot(z,Yz) #Plot true mean function
plt.scatter(X,Y,color='r')
#Create a 2-layered DGP + Hetero model
layer1=[kernel(length=np.array([0.5]),name='matern2.5')]
layer2=[kernel(length=np.array([0.2]),name='matern2.5',scale_est=1,connect=np.arange(1)),
kernel(length=np.array([0.2]),name='matern2.5',scale_est=1,connect=np.arange(1))]
layer3=[Hetero()]
#Construct the DGP + Hetero model
all_layer=combine(layer1,layer2,layer3)
m=dgp(X,[Y],all_layer)
#Train the model
m.train(N=500)
#Construct the emulator
final_layer_obj=m.estimate()
emu=emulator(final_layer_obj)
#Make predictions across all layers so we can extract predictions for the mean and variance functions
mu,var=emu.predict(z, method='mean_var',full_layer=True)
#Visualize the overall model prediction
s=np.sqrt(var[-1])
u=mu[-1]+2*s
l=mu[-1]-2*s
p=plt.plot(z,mu[-1],color='r',alpha=1,lw=1)
p1=plt.plot(z,u,'--',color='g',lw=1)
p1=plt.plot(z,l,'--',color='g',lw=1)
plt.scatter(X,Y,color='black')
plt.plot(z,Yz)
#Visualize the prediction for the mean function
mu_mean=mu[-2][:,0]
var_mean=var[-2][:,0]
s=np.sqrt(var_mean)
u=mu_mean+2*s
l=mu_mean-2*s
p=plt.plot(z,mu_mean,color='r',alpha=1,lw=1)
p1=plt.plot(z,u,'--',color='g',lw=1)
p1=plt.plot(z,l,'--',color='g',lw=1)
plt.plot(z,Yz,color='black',lw=1)
#Visualize the prediction for the log(variance) function
mu_var=mu[-2][:,1]
var_var=var[-2][:,1]
s=np.sqrt(var_var)
u=mu_var+2*s
l=mu_var-2*s
p=plt.plot(z,mu_var,color='r',alpha=1,lw=1)
p1=plt.plot(z,u,'--',color='g',lw=1)
p1=plt.plot(z,l,'--',color='g',lw=1)
plt.plot(z,np.array([np.log(f2(x)) for x in z]).reshape(-1,1),color='black',lw=1)
```
# Example 2 on heteroskedastic Gaussian likelihood
```
#Load and visualize the motorcycle dataset
X=np.loadtxt('./mc_input.txt').reshape(-1,1)
Y=np.loadtxt('./mc_output.txt').reshape(-1,1)
X=(X-np.min(X))/(np.max(X)-np.min(X))
Y=(Y-Y.mean())/Y.std()
plt.scatter(X,Y)
#Construct a 2-layered DGP + Hetero model
layer1=[kernel(length=np.array([0.5]),name='sexp')]
layer2=[]
for _ in range(2):
k=kernel(length=np.array([0.2]),name='sexp',scale_est=1,connect=np.arange(1))
layer2.append(k)
layer3=[Hetero()]
all_layer=combine(layer1,layer2,layer3)
m=dgp(X,[Y],all_layer)
#Train the model
m.train(N=500)
#Construct the emulator
final_layer_obj=m.estimate()
emu=emulator(final_layer_obj)
#Make predictions over [0,1]
z=np.linspace(0,1,100)[:,None]
mu,var=emu.predict(z, method='mean_var')
#Visualize the predictions
s=np.sqrt(var)
u=mu+2*s
l=mu-2*s
p=plt.plot(z,mu,color='r',alpha=1,lw=1)
p1=plt.plot(z,u,'--',color='g',lw=1)
p1=plt.plot(z,l,'--',color='g',lw=1)
plt.scatter(X,Y,color='black')
```
# Example 3 on Poisson likelihood
```
#Generate some data with replications
n=10
X=np.linspace(0,.3,n)[:,None]
for _ in range(4):
X=np.concatenate((X,np.linspace(0,.3,n)[:,None]),axis=0)
X=np.concatenate((X,np.linspace(0.35,1,n)[:,None]),axis=0)
f= lambda x: np.exp(np.exp(-1.5*np.sin(1/((0.7*0.8*(1.5*x+0.1)+0.3)**2))))
Y=np.array([np.random.poisson(f(x)) for x in X]).reshape(-1,1)
z=np.linspace(0,1.,200)[:,None]
Yz=np.array([f(x) for x in z]).flatten()
test_Yz=np.array([np.random.poisson(f(x)) for x in z]).reshape(-1,1) #generate some testing output data
plt.plot(z,Yz)
plt.scatter(X,Y,color='r')
#Train a GP + Poisson model
layer1=[kernel(length=np.array([0.5]),name='matern2.5',scale_est=1)]
layer2=[Poisson()]
all_layer=combine(layer1,layer2)
m=dgp(X,[Y],all_layer)
m.train(N=500)
#Visualize the results
final_layer_obj=m.estimate()
emu=emulator(final_layer_obj)
mu,var=emu.predict(z, method='mean_var',full_layer=True) #Make mean-variance prediction
samp=emu.predict(z, method='sampling') #Draw some samples to obtain the quantiles of the overall model
quant=np.quantile(np.squeeze(samp), [0.05,0.5,0.95],axis=1) #Compute sample-based quantiles
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(15,4))
ax1.set_title('Predicted and True Poisson Mean')
ax1.plot(z,Yz,color='black')
ax1.plot(z,mu[-1],'--',color='red',alpha=0.8,lw=3)
ax1.plot(z,quant[0,:],'--',color='b',lw=1)
ax1.plot(z,quant[1,:],'--',color='b',lw=1)
ax1.plot(z,quant[2,:],'--',color='b',lw=1)
mu_gp, var_gp=mu[-2], var[-2]
s=np.sqrt(var_gp)
u,l =mu_gp+2*s, mu_gp-2*s
ax2.set_title('Predicted and True logged Poisson Mean')
ax2.plot(z,mu_gp,color='r',alpha=1,lw=1)
ax2.plot(z,u,'--',color='g',lw=1)
ax2.plot(z,l,'--',color='g',lw=1)
ax2.plot(z,np.log(Yz),color='black',lw=1)
print('The negative log-likelihood of predictions is', emu.nllik(z,test_Yz)[0])
#Train a 2-layered DGP + Poisson model
layer1=[kernel(length=np.array([0.5]),name='matern2.5')]
layer2=[kernel(length=np.array([0.1]),name='matern2.5',scale_est=1,connect=np.arange(1))]
layer3=[Poisson()]
all_layer=combine(layer1,layer2,layer3)
m=dgp(X,[Y],all_layer)
m.train(N=500)
#Visualize the results
final_layer_obj=m.estimate()
emu=emulator(final_layer_obj)
mu,var=emu.predict(z, method='mean_var',full_layer=True) #Make mean-variance prediction
samp=emu.predict(z, method='sampling') #Draw some samples to obtain the quantiles of the overall model
quant=np.quantile(np.squeeze(samp), [0.05,0.5,0.95],axis=1) #Compute sample-based quantiles
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(15,4))
ax1.set_title('Predicted and True Poisson Mean')
ax1.plot(z,Yz,color='black')
ax1.plot(z,mu[-1],'--',color='red',alpha=0.8,lw=3)
ax1.plot(z,quant[0,:],'--',color='b',lw=1)
ax1.plot(z,quant[1,:],'--',color='b',lw=1)
ax1.plot(z,quant[2,:],'--',color='b',lw=1)
mu_gp, var_gp=mu[-2], var[-2]
s=np.sqrt(var_gp)
u,l =mu_gp+2*s, mu_gp-2*s
ax2.set_title('Predicted and True logged Poisson Mean')
ax2.plot(z,mu_gp,color='r',alpha=1,lw=1)
ax2.plot(z,u,'--',color='g',lw=1)
ax2.plot(z,l,'--',color='g',lw=1)
ax2.plot(z,np.log(Yz),color='black',lw=1)
print('The negative log-likelihood of predictions is', emu.nllik(z,test_Yz)[0])
```
# Example 4 on Negative Binomial likelihood
The Negative Binomial pmf in dgpsi is defined by
$$p_Y(y;\mu,\sigma)=\frac{\Gamma(y+\frac{1}{\sigma})}{\Gamma(1/{\sigma})\Gamma(y+1)}\left(\frac{\sigma\mu}{1+\sigma\mu}\right)^y\left(\frac{1}{1+\sigma\mu}\right)^{1/{\sigma}}$$
with mean $0<\mu<\infty$ and dispersion $0<\sigma<\infty$, which correspond to numpy's negative binomial parameters $n$ and $p$ via $n=1/\sigma$ and $p=1/(1+\mu\sigma)$.
```
#Generate some data from the Negative Binomial distribution.
n=30
X=np.linspace(0,1,n)[:,None]
for _ in range(5):
X=np.concatenate((X,np.linspace(0,1,n)[:,None]),axis=0)
f1= lambda x: 1/np.exp(2) if x<0.5 else np.exp(2) #True mean function
f2= lambda x: np.exp(6*x**2-3) #True dispersion function
Y=np.array([np.random.negative_binomial(1/f2(x),1/(1+f1(x)*f2(x))) for x in X]).reshape(-1,1)
Xt=np.linspace(0,1.,200)[:,None]
Yt=np.array([f1(x) for x in Xt]).flatten()
plt.plot(Xt,Yt)
plt.scatter(X,Y,color='r')
#Train a 2-layered DGP (one GP in the first layer and two in the second corresponding to the mean and dispersion parameters) + NegBin model
layer1=[kernel(length=np.array([0.5]),name='matern2.5')]
layer2=[kernel(length=np.array([0.02]),name='matern2.5',scale_est=1,connect=np.arange(1)),
kernel(length=np.array([0.02]),name='matern2.5',scale_est=1,connect=np.arange(1))]
layer3=[NegBin()]
all_layer=combine(layer1,layer2,layer3)
m=dgp(X,[Y],all_layer)
m.train(N=500)
#Visualize the results
final_layer_obj=m.estimate()
emu=emulator(final_layer_obj)
mu,var=emu.predict(Xt, method='mean_var',full_layer=True) #Make mean-variance prediction
samp=emu.predict(Xt, method='sampling') #Draw some samples to obtain the quantiles of the overall model
quant=np.quantile(np.squeeze(samp), [0.05,0.5,0.95],axis=1) #Compute sample-based quantiles
fig, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(15,4))
ax1.set_title('Predicted and True NegBin Mean')
ax1.plot(Xt,Yt,color='black')
ax1.plot(Xt,mu[-1],'--',color='red',alpha=0.8,lw=3)
ax1.plot(Xt,quant[0,:],'--',color='b',lw=1)
ax1.plot(Xt,quant[1,:],'--',color='b',lw=1)
ax1.plot(Xt,quant[2,:],'--',color='b',lw=1)
mu_gp, var_gp=mu[-2][:,0], var[-2][:,0]
s=np.sqrt(var_gp)
u,l =mu_gp+2*s, mu_gp-2*s
ax2.set_title('Predicted and True logged NegBin Mean')
ax2.plot(Xt,mu_gp,color='r',alpha=1,lw=1)
ax2.plot(Xt,u,'--',color='g',lw=1)
ax2.plot(Xt,l,'--',color='g',lw=1)
ax2.plot(Xt,np.log(Yt),color='black',lw=1)
mu_gp, var_gp=mu[-2][:,1], var[-2][:,1]
s=np.sqrt(var_gp)
u,l =mu_gp+2*s, mu_gp-2*s
ax3.set_title('Predicted and True logged NegBin Dispersion')
ax3.plot(Xt,mu_gp,color='r',alpha=1,lw=1)
ax3.plot(Xt,u,'--',color='g',lw=1)
ax3.plot(Xt,l,'--',color='g',lw=1)
ax3.plot(Xt,np.array([np.log(f2(x)) for x in Xt]).reshape(-1,1),color='black',lw=1)
```
| true |
code
| 0.667473 | null | null | null | null |
|
# Properties of ELGs in DR7 Imaging
The purpose of this notebook is to quantify the observed properties (particulary size and ellipticity) of ELGs using DR7 catalogs of the COSMOS region. We use the HST/ACS imaging of objects in this region as "truth."
J. Moustakas
2018 Aug 15
```
import os, warnings, pdb
import numpy as np
import fitsio
from astropy.table import Table
import matplotlib.pyplot as plt
import seaborn as sns
rc = {'font.family': 'serif'}#, 'text.usetex': True}
sns.set(style='ticks', font_scale=1.5, palette='Set2', rc=rc)
%matplotlib inline
```
#### Read the HST/ACS parent (truth) catalog.
```
acsfile = os.path.join(os.getenv('DESI_ROOT'), 'target', 'analysis', 'truth', 'parent', 'cosmos-acs.fits.gz')
allacs = Table(fitsio.read(acsfile, ext=1, upper=True))
print('Read {} objects from {}'.format(len(allacs), acsfile))
```
#### Assemble all the functions we'll need.
```
def read_tractor(subset='0'):
"""Read the Tractor catalogs for a given cosmos subsest and cross-match
with the ACS catalog.
"""
from glob import glob
from astropy.table import vstack
from astrometry.libkd.spherematch import match_radec
tractordir = '/global/cscratch1/sd/dstn/cosmos-dr7-7{}/tractor'.format(subset)
tractorfiles = glob('{}/???/tractor-*.fits'.format(tractordir))
alldr7 = []
for ii, tractorfile in enumerate(tractorfiles):
#if (ii % 10) == 0:
# print('Read {:02d} / {:02d} Tractor catalogs from subset {}.'.format(ii, len(tractorfiles), subset))
alldr7.append(Table(fitsio.read(tractorfile, ext=1, upper=True)))
alldr7 = vstack(alldr7)
alldr7 = alldr7[alldr7['BRICK_PRIMARY']]
# Cross-match
m1, m2, d12 = match_radec(allacs['RA'], allacs['DEC'], alldr7['RA'],
alldr7['DEC'], 1./3600.0, nearest=True)
print('Read {} objects with HST/ACS and DR7 photometry'.format(len(m1)))
return allacs[m1], alldr7[m2]
def select_ELGs(acs, dr7):
from desitarget.cuts import isELG_south
def unextinct_fluxes(cat):
"""We need to unextinct the fluxes ourselves rather than using desitarget.cuts.unextinct_fluxes
because the Tractor catalogs don't have forced WISE photometry.
"""
res = np.zeros(len(cat), dtype=[('GFLUX', 'f4'), ('RFLUX', 'f4'), ('ZFLUX', 'f4')])
for band in ('G', 'R', 'Z'):
res['{}FLUX'.format(band)] = ( cat['FLUX_{}'.format(band)] /
cat['MW_TRANSMISSION_{}'.format(band)] )
return Table(res)
fluxes = unextinct_fluxes(dr7)
gflux, rflux, zflux = fluxes['GFLUX'], fluxes['RFLUX'], fluxes['ZFLUX']
ielg = isELG_south(gflux=fluxes['GFLUX'], rflux=fluxes['RFLUX'],
zflux=fluxes['ZFLUX'])#, gallmask=alltarg['ALLMASK_G'],
#rallmask=alltarg['ALLMASK_R'], zallmask=alltarg['ALLMASK_Z'])
print('Selected {} / {} ELGs'.format(np.sum(ielg), len(acs)))
return acs[ielg], dr7[ielg]
def get_mag(cat, band='R'):
return 22.5 - 2.5 * np.log10(cat['FLUX_{}'.format(band)])
def get_reff_acs(cat):
"""Convert SExtractor's flux_radius to half-light radius
using the relation (derived from simulations) in Sec 4.2
of Griffith et al. 2012.
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
reff = np.log10(0.03 * 0.162 * cat['FLUX_RADIUS']**1.87)
return reff
def get_reff_tractor(cat):
fracdev = cat['FRACDEV']
reff = np.log10(fracdev * cat['SHAPEDEV_R'] + (1 - fracdev) * cat['SHAPEEXP_R'])
return reff
def get_ell_acs(cat):
ell = 1 - cat['B_IMAGE'] / cat['A_IMAGE']
return ell
def get_ell_tractor(cat):
fracdev = cat['FRACDEV']
ell_exp = np.hypot(cat['SHAPEEXP_E1'], cat['SHAPEEXP_E2'])
ell_dev = np.hypot(cat['SHAPEDEV_E1'], cat['SHAPEDEV_E2'])
ell = fracdev * ell_dev + (1 - fracdev) * ell_exp
return ell
def qa_true_properties(acs, dr7, subsetlabel='0', noplots=False,
pngsize=None, pngellipticity=None):
"""Use HST to characterize the *true* ELG size and ellipticity
distributions.
"""
istar = acs['CLASS_STAR'] > 0.9
igal = ~istar
nstar, ngal, nobj = np.sum(istar), np.sum(igal), len(acs)
print('True galaxies, N={} ({:.2f}%):'.format(ngal, 100*ngal/nobj))
for tt in ('PSF ', 'REX ', 'EXP ', 'DEV ', 'COMP'):
nn = np.sum(dr7['TYPE'][igal] == tt)
frac = 100 * nn / ngal
print(' {}: {} ({:.2f}%)'.format(tt, nn, frac))
print('True stars, N={} ({:.2f}%):'.format(nstar, 100*nstar/nobj))
for tt in ('PSF ', 'REX ', 'EXP ', 'DEV ', 'COMP'):
nn = np.sum(dr7['TYPE'][istar] == tt)
frac = 100 * nn / nstar
print(' {}: {} ({:.2f}%)'.format(tt, nn, frac))
if noplots:
return
rmag = get_mag(dr7)
reff = get_reff_acs(acs)
ell = get_ell_acs(acs)
# Size
j = sns.jointplot(rmag[igal], reff[igal], kind='hex', space=0, alpha=0.7,
stat_func=None, cmap='viridis', mincnt=3)
j.set_axis_labels('DECaLS $r$ (AB mag)', r'$\log_{10}$ (HST/ACS Half-light radius) (arcsec)')
j.fig.set_figwidth(10)
j.fig.set_figheight(7)
j.ax_joint.axhline(y=np.log10(0.45), color='k', ls='--')
j.ax_joint.scatter(rmag[istar], reff[istar], marker='s', color='orange', s=10)
j.ax_joint.text(20.8, np.log10(0.45)+0.1, r'$r_{eff}=0.45$ arcsec', ha='left', va='center',
fontsize=14)
j.ax_joint.text(0.15, 0.2, 'HST Stars', ha='left', va='center',
fontsize=14, transform=j.ax_joint.transAxes)
j.ax_joint.text(0.05, 0.9, '{}'.format(subsetlabel), ha='left', va='center',
fontsize=16, transform=j.ax_joint.transAxes)
if pngsize:
plt.savefig(pngsize)
# Ellipticity
j = sns.jointplot(rmag[igal], ell[igal], kind='hex', space=0, alpha=0.7,
stat_func=None, cmap='viridis', mincnt=3)
j.set_axis_labels('DECaLS $r$ (AB mag)', 'HST/ACS Ellipticity')
j.fig.set_figwidth(10)
j.fig.set_figheight(7)
j.ax_joint.scatter(rmag[istar], ell[istar], marker='s', color='orange', s=10)
j.ax_joint.text(0.15, 0.2, 'HST Stars', ha='left', va='center',
fontsize=14, transform=j.ax_joint.transAxes)
j.ax_joint.text(0.05, 0.9, '{}'.format(subsetlabel), ha='left', va='center',
fontsize=16, transform=j.ax_joint.transAxes)
if pngellipticity:
plt.savefig(pngellipticity)
def qa_compare_radii(acs, dr7, subsetlabel='0', seeing=None, png=None):
"""Compare the HST and Tractor sizes."""
igal = dr7['TYPE'] != 'PSF '
reff_acs = get_reff_acs(acs[igal])
reff_tractor = get_reff_tractor(dr7[igal])
sizelim = (-1.5, 1)
j = sns.jointplot(reff_acs, reff_tractor, kind='hex', space=0, alpha=0.7,
stat_func=None, cmap='viridis', mincnt=3,
xlim=sizelim, ylim=sizelim)
j.set_axis_labels(r'$\log_{10}$ (HST/ACS Half-light radius) (arcsec)',
r'$\log_{10}$ (Tractor/DR7 Half-light radius) (arcsec)')
j.fig.set_figwidth(10)
j.fig.set_figheight(7)
j.ax_joint.plot([-2, 2], [-2, 2], color='k')
if seeing:
j.ax_joint.axhline(y=np.log10(seeing), ls='--', color='k')
j.ax_joint.text(0.05, 0.9, '{}'.format(subsetlabel), ha='left', va='center',
fontsize=16, transform=j.ax_joint.transAxes)
if png:
plt.savefig(png)
```
### Use subset 0 to characterize the "true" ELG properties.
```
subset = '0'
allacs, alldr7 = read_tractor(subset=subset)
acs, dr7 = select_ELGs(allacs, alldr7)
subsetlabel = 'Subset {}\n{:.3f}" seeing'.format(subset, np.median(alldr7['PSFSIZE_R']))
qa_true_properties(acs, dr7, subsetlabel=subsetlabel, pngsize='truesize.png', pngellipticity='trueell.png')
```
### Compare radii measured in three subsets of increasingly poor seeing (but same nominal depth).
```
for subset in ('0', '4', '9'):
allacs, alldr7 = read_tractor(subset=subset)
acs, dr7 = select_ELGs(allacs, alldr7)
medseeing = np.median(alldr7['PSFSIZE_R'])
subsetlabel = 'Subset {}\n{:.3f}" seeing'.format(subset, medseeing)
qa_compare_radii(acs, dr7, subsetlabel=subsetlabel, png='size_compare_subset{}.png'.format(subset))
subset = '9'
allacs, alldr7 = read_tractor(subset=subset)
acs, dr7 = select_ELGs(allacs, alldr7)
subsetlabel = 'Subset {}\n{:.3f}" seeing'.format(subset, np.median(alldr7['PSFSIZE_R']))
qa_true_properties(acs, dr7, subsetlabel=subsetlabel, noplots=True)
```
| true |
code
| 0.500183 | null | null | null | null |
|
# Simple Test between NumPy and Numba
$$
\Gamma = \sqrt{\frac{\eta_H}{\eta_V} \kappa^2 + \eta_H \zeta_H}
$$
```
import numba
import cython
import numexpr
import numpy as np
%load_ext cython
# Used cores by numba can be shown with (xy default all cores are used):
#print(numba.config.NUMBA_DEFAULT_NUM_THREADS)
# This can be changed with the following line
#numba.config.NUMBA_NUM_THREADS = 4
from empymod import filters
from scipy.constants import mu_0 # Magn. permeability of free space [H/m]
from scipy.constants import epsilon_0 # Elec. permittivity of free space [F/m]
res = np.array([2e14, 0.3, 1, 50, 1]) # nlay
freq = np.arange(1, 201)/20. # nfre
off = np.arange(1, 101)*1000 # noff
lambd = filters.key_201_2009().base/off[:, None] # nwav
aniso = np.array([1, 1, 1.5, 2, 1])
epermH = np.array([1, 80, 9, 20, 1])
epermV = np.array([1, 40, 9, 10, 1])
mpermH = np.array([1, 1, 3, 5, 1])
etaH = 1/res + np.outer(2j*np.pi*freq, epermH*epsilon_0)
etaV = 1/(res*aniso*aniso) + np.outer(2j*np.pi*freq, epermV*epsilon_0)
zetaH = np.outer(2j*np.pi*freq, mpermH*mu_0)
```
## NumPy
Numpy version to check result and compare times
```
def test_numpy(eH, eV, zH, l):
return np.sqrt((eH/eV) * (l*l) + (zH*eH))
```
## Numba @vectorize
This is exactly the same function as with NumPy, just added the @vectorize decorater.
```
@numba.vectorize('c16(c16, c16, c16, f8)')
def test_numba_vnp(eH, eV, zH, l):
return np.sqrt((eH/eV) * (l*l) + (zH*eH))
@numba.vectorize('c16(c16, c16, c16, f8)', target='parallel')
def test_numba_v(eH, eV, zH, l):
return np.sqrt((eH/eV) * (l*l) + (zH*eH))
```
## Numba @njit
```
@numba.njit
def test_numba_nnp(eH, eV, zH, l):
o1, o3 = eH.shape
o2, o4 = l.shape
out = np.empty((o1, o2, o3, o4), dtype=numba.complex128)
for nf in numba.prange(o1):
for nl in numba.prange(o3):
ieH = eH[nf, nl]
ieV = eV[nf, nl]
izH = zH[nf, nl]
for no in numba.prange(o2):
for ni in numba.prange(o4):
il = l[no, ni]
out[nf, no, nl, ni] = np.sqrt(ieH/ieV * il*il + izH*ieH)
return out
@numba.njit(nogil=True, parallel=True)
def test_numba_n(eH, eV, zH, l):
o1, o3 = eH.shape
o2, o4 = l.shape
out = np.empty((o1, o2, o3, o4), dtype=numba.complex128)
for nf in numba.prange(o1):
for nl in numba.prange(o3):
ieH = eH[nf, nl]
ieV = eV[nf, nl]
izH = zH[nf, nl]
for no in numba.prange(o2):
for ni in numba.prange(o4):
il = l[no, ni]
out[nf, no, nl, ni] = np.sqrt(ieH/ieV * il*il + izH*ieH)
return out
```
## Run comparison for a small and a big matrix
```
eH = etaH[:, None, :, None]
eV = etaV[:, None, :, None]
zH = zetaH[:, None, :, None]
l = lambd[None, :, None, :]
# Output shape
out_shape = (freq.size, off.size, res.size, filters.key_201_2009().base.size)
print(' Shape Test Matrix ::', out_shape, '; total # elements:: '+str(freq.size*off.size*res.size*filters.key_201_2009().base.size))
print('------------------------------------------------------------------------------------------')
print(' NumPy :: ', end='')
# Get NumPy result for comparison
numpy_result = test_numpy(eH, eV, zH, l)
# Get runtime
%timeit test_numpy(eH, eV, zH, l)
print(' Numba @vectorize :: ', end='')
# Ensure it agrees with NumPy
numba_vnp_result = test_numba_vnp(eH, eV, zH, l)
if not np.allclose(numpy_result, numba_vnp_result, atol=0, rtol=1e-10):
print(' * FAIL, DOES NOT AGREE WITH NumPy RESULT!')
# Get runtime
%timeit test_numba_vnp(eH, eV, zH, l)
print(' Numba @vectorize par :: ', end='')
# Ensure it agrees with NumPy
numba_v_result = test_numba_v(eH, eV, zH, l)
if not np.allclose(numpy_result, numba_v_result, atol=0, rtol=1e-10):
print(' * FAIL, DOES NOT AGREE WITH NumPy RESULT!')
# Get runtime
%timeit test_numba_v(eH, eV, zH, l)
print(' Numba @njit :: ', end='')
# Ensure it agrees with NumPy
numba_nnp_result = test_numba_nnp(etaH, etaV, zetaH, lambd)
if not np.allclose(numpy_result, numba_nnp_result, atol=0, rtol=1e-10):
print(' * FAIL, DOES NOT AGREE WITH NumPy RESULT!')
# Get runtime
%timeit test_numba_nnp(etaH, etaV, zetaH, lambd)
print(' Numba @njit par :: ', end='')
# Ensure it agrees with NumPy
numba_n_result = test_numba_n(etaH, etaV, zetaH, lambd)
if not np.allclose(numpy_result, numba_n_result, atol=0, rtol=1e-10):
print(' * FAIL, DOES NOT AGREE WITH NumPy RESULT!')
# Get runtime
%timeit test_numba_n(etaH, etaV, zetaH, lambd)
from empymod import versions
versions('HTML', add_pckg=[cython, numba], ncol=5)
```
| true |
code
| 0.254694 | null | null | null | null |
|
<center><em>Copyright by Pierian Data Inc.</em></center>
<center><em>For more information, visit us at <a href='http://www.pieriandata.com'>www.pieriandata.com</a></em></center>
# KNN Project Exercise
Due to the simplicity of KNN for Classification, let's focus on using a PipeLine and a GridSearchCV tool, since these skills can be generalized for any model.
## The Sonar Data
### Detecting a Rock or a Mine
Sonar (sound navigation ranging) is a technique that uses sound propagation (usually underwater, as in submarine navigation) to navigate, communicate with or detect objects on or under the surface of the water, such as other vessels.
<img src="sonar.jpg" style="max-height: 500px; max-width: 500px;">
The data set contains the response metrics for 60 separate sonar frequencies sent out against a known mine field (and known rocks). These frequencies are then labeled with the known object they were beaming the sound at (either a rock or a mine).
<img src="mine.jpg" style="max-height: 500px; max-width: 500px;">
Our main goal is to create a machine learning model capable of detecting the difference between a rock or a mine based on the response of the 60 separate sonar frequencies.
Data Source: https://archive.ics.uci.edu/ml/datasets/Connectionist+Bench+(Sonar,+Mines+vs.+Rocks)
### Complete the Tasks in bold
**TASK: Run the cells below to load the data.**
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv('../Data/sonar.all-data.csv')
df.head()
```
## Data Exploration
```
df.info()
df.describe()
```
**TASK: Create a heatmap of the correlation between the difference frequency responses.**
```
plt.figure(figsize=(8,6))
sns.heatmap(df.corr(), cmap='coolwarm');
```
-----
**TASK: What are the top 5 correlated frequencies with the target\label?**
*Note: You many need to map the label to 0s and 1s.*
*Additional Note: We're looking for **absolute** correlation values.*
```
df['Label'].value_counts()
# As we can't find the correlation between numbers and label string, we need to map the label (Rock / Mine) to 0s and 1s
df['Target'] = df['Label'].map({'M': 1, 'R': 0})
df.head(1)
df.corr()['Target']
# get the highest 5 ones
np.absolute(df.corr()['Target'].sort_values(ascending=False))[:6]
#option 2
np.absolute(df.corr()['Target'].sort_values()).tail(6)
```
-------
## Train | Test Split
Our approach here will be one of using Cross Validation on 90% of the dataset, and then judging our results on a final test set of 10% to evaluate our model.
**TASK: Split the data into features and labels, and then split into a training set and test set, with 90% for Cross-Validation training, and 10% for a final test set.**
*Note: The solution uses a random_state=42*
```
from sklearn.model_selection import train_test_split
X = df.drop(['Label', 'Target'], axis=1)
y = df['Label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1, random_state=42)
```
----
**TASK: Create a PipeLine that contains both a StandardScaler and a KNN model**
```
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
scaler = StandardScaler()
knn = KNeighborsClassifier()
operations = [('scaler', scaler), ('knn', knn)]
from sklearn.pipeline import Pipeline
pipe = Pipeline(operations)
```
-----
**TASK: Perform a grid-search with the pipeline to test various values of k and report back the best performing parameters.**
```
from sklearn.model_selection import GridSearchCV
k_values = list(range(1, 30))
parameters = {'knn__n_neighbors': k_values}
full_cv_classifier = GridSearchCV(pipe, parameters, cv=5, scoring='accuracy')
full_cv_classifier.fit(X_train, y_train)
# check best estimator
full_cv_classifier.best_estimator_.get_params()
```
----
**(HARD) TASK: Using the .cv_results_ dictionary, see if you can create a plot of the mean test scores per K value.**
```
pd.DataFrame(full_cv_classifier.cv_results_).head()
mean_test_scores = full_cv_classifier.cv_results_['mean_test_score']
mean_test_scores
# plt.plot(k_values, mean_test_scores, marker='.', markersize=10)
plt.plot(k_values, mean_test_scores, 'o-')
plt.xlabel('K')
plt.ylabel('Mean Test Score / Accuracy');
```
----
### Final Model Evaluation
**TASK: Using the grid classifier object from the previous step, get a final performance classification report and confusion matrix.**
```
full_pred = full_cv_classifier.predict(X_test)
from sklearn.metrics import confusion_matrix, plot_confusion_matrix, classification_report
confusion_matrix(y_test, full_pred)
plot_confusion_matrix(full_cv_classifier, X_test, y_test);
```
**IMPORTANT:**
- As we can see from the confusion matrix, there are 1 False Positive and 1 False Negative.
- Although False Positive case (thinking Rock as a Mine) may not be dangerous, False Negative case (thinking Mine as a Rock) is extremelly dangerous.
- So we may need to revisit the modelling to make sure there is no False Negative.
```
print(classification_report(y_test, full_pred))
```
### Great Job!
| true |
code
| 0.611962 | null | null | null | null |
|
Copyright 2019 Google LLC.
SPDX-License-Identifier: Apache-2.0
**Notebook Version** - 1.0.0
```
# Install datacommons
!pip install --upgrade --quiet git+https://github.com/datacommonsorg/[email protected]
```
# Analyzing Income Distribution
The American Community Survey (published by the US Census) annually reports the number of individuals in a given income bracket at the State level. We can use this information, stored in Data Commons, to visualize disparity in income for each State in the US. Our goal for this tutorial will be to generate a plot that visualizes the total number of individuals across a given set of income brackets for a given state.
Before we begin, we'll setup our notebook
```
# Import the Data Commons library
import datacommons as dc
# Import other libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import json
from google.colab import drive
```
We will also need to provide the API with an API key. See the [Analyzing Statistics in Data Commons Using the Python Client API](https://colab.research.google.com/drive/1ZNXTHu3J0W3vo9Mg3kNUpk0hnD6Ce1u6#scrollTo=ijxoBhFHjo3Z) to see how to set this up for a Colab Notebook.
```
# Mount the Drive
drive.mount('/content/drive', force_remount=True)
# REPLACE THIS with the path to your key.
key_path = '/content/drive/My Drive/DataCommons/secret.json'
# Read the key in and provide it to the Data Commons API
with open(key_path, 'r') as f:
secrets = json.load(f)
dc.set_api_key(secrets['dc_api_key'])
```
## Preparing the Data
We'll begin by creating a dataframe with states and their total population. We can use **`get_places_in`** to get all States within the United States. We can then call **`get_populations`** and **`get_observations`** to get the population of all persons in each State.
```
# Initialize a DataFrame holding the USA.
data = pd.DataFrame({'country': ['country/USA']})
# Add a column for states and get their names
data['state'] = dc.get_places_in(data['country'], 'State')
data = dc.flatten_frame(data)
# Get all state names and store it in a column "name"
data['name'] = dc.get_property_values(data['state'], 'name')
data = dc.flatten_frame(data)
# Get StatisticalPopulations representing all persons in each state.
data['all_pop'] = dc.get_populations(data['state'], 'Person')
# Get the total count of all persons in each population
data['all'] = dc.get_observations(data['all_pop'],
'count',
'measuredValue',
'2017',
measurement_method='CenusACS5yrSurvey')
# Display the first five rows of the table.
data.head(5)
```
### Querying for Income Brackets
Next, let's get the population level for each income bracket. The datacommons graph identifies 16 different income brackets. For each bracket and state, we can get the population level. Remember that we first get the StatisticalPopulation, and then a corresponding observation. We'll filter observations to between published in 2017 by the American Community Survey.
```
# A list of income brackets
income_brackets = [
"USDollarUpto10000",
"USDollar10000To14999",
"USDollar15000To19999",
"USDollar20000To24999",
"USDollar25000To29999",
"USDollar30000To34999",
"USDollar35000To39999",
"USDollar40000To44999",
"USDollar45000To49999",
"USDollar50000To59999",
"USDollar60000To74999",
"USDollar75000To99999",
"USDollar100000To124999",
"USDollar125000To149999",
"USDollar150000To199999",
"USDollar200000Onwards",
]
# Add a column containin the population count for each income bracket
for bracket in income_brackets:
# Get the new column names
pop_col = '{}_pop'.format(bracket)
obs_col = bracket
# Create the constraining properties map
pvs = {'income': bracket}
# Get the StatisticalPopulation and Observation
data[pop_col] = dc.get_populations(data['state'], 'Household',
constraining_properties=pvs)
data[obs_col] = dc.get_observations(data[pop_col],
'count',
'measuredValue',
'2017',
measurement_method='CenusACS5yrSurvey')
# Display the table
data.head(5)
```
Let's limit the size of this DataFrame by selecting columns with only the State name and Observations.
```
# Select columns that will be used for plotting
data = data[['name', 'all'] + income_brackets]
# Display the table
data.head(5)
```
## Analyzing the Data
Let's plot our data as a histogram. Notice that the income ranges as tabulated by the US Census are not equal. At the low end, the range is 0-9999, whereas, towards the top, the range 150,000-199,999 is five times as broad! We will make the width of each of the columns correspond to their range, and will give us an idea of the total earnings, not just the number of people in that group.
First we provide code for generating the plot.
```
# Histogram bins
label_to_range = {
"USDollarUpto10000": [0, 9999],
"USDollar10000To14999": [10000, 14999],
"USDollar15000To19999": [15000, 19999],
"USDollar20000To24999": [20000, 24999],
"USDollar25000To29999": [25000, 29999],
"USDollar30000To34999": [30000, 34999],
"USDollar35000To39999": [35000, 39999],
"USDollar40000To44999": [40000, 44999],
"USDollar45000To49999": [45000, 49999],
"USDollar50000To59999": [50000, 59999],
"USDollar60000To74999": [60000, 74999],
"USDollar75000To99999": [75000, 99999],
"USDollar100000To124999": [100000, 124999],
"USDollar125000To149999": [125000, 149999],
"USDollar150000To199999": [150000, 199999],
"USDollar200000Onwards": [250000, 300000],
}
bins = [
0, 10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 60000,
75000, 100000, 125000, 150000, 250000
]
def plot_income(data, state_name):
# Assert that "state_name" is a valid state name
frame_search = data.loc[data['name'] == state_name].squeeze()
if frame_search.shape[0] == 0:
print('{} does not have sufficient income data to generate the plot!'.format(state_name))
return
# Print the resulting series
data = frame_search[2:]
# Calculate the bar lengths
lengths = []
for bracket in income_brackets:
r = label_to_range[bracket]
lengths.append(int((r[1] - r[0]) / 18))
# Calculate the x-axis positions
pos, total = [], 0
for l in lengths:
pos.append(total + (l // 2))
total += l
# Plot the histogram
plt.figure(figsize=(12, 10))
plt.xticks(pos, income_brackets, rotation=90)
plt.grid(True)
plt.bar(pos, data.values, lengths, color='b', alpha=0.3)
# Return the resulting frame.
return frame_search
```
We can then call this code with a state to plot the income bracket sizes.
```
#@title Enter State to plot { run: "auto" }
state_name = "Tennessee" #@param ["Missouri", "Arkansas", "Arizona", "Ohio", "Connecticut", "Vermont", "Illinois", "South Dakota", "Iowa", "Oklahoma", "Kansas", "Washington", "Oregon", "Hawaii", "Minnesota", "Idaho", "Alaska", "Colorado", "Delaware", "Alabama", "North Dakota", "Michigan", "California", "Indiana", "Kentucky", "Nebraska", "Louisiana", "New Jersey", "Rhode Island", "Utah", "Nevada", "South Carolina", "Wisconsin", "New York", "North Carolina", "New Hampshire", "Georgia", "Pennsylvania", "West Virginia", "Maine", "Mississippi", "Montana", "Tennessee", "New Mexico", "Massachusetts", "Wyoming", "Maryland", "Florida", "Texas", "Virginia"]
result = plot_income(data, state_name)
# Show the plot
plt.show()
```
and we can display the raw table of values.
```
# Additionally print the table of income bracket sizes
result
```
This is only the beginning! What else can you analyze? For example, you could try computing a measure of income disparity in each state (see [Gini Coefficient](https://en.wikipedia.org/wiki/Gini_coefficient)).
You could then expand the dataframe to include more information and analyze how attributes like education level, crime, or even weather effect income disparity.
| true |
code
| 0.57063 | null | null | null | null |
|
## Importing dependencies and loading the data
```
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_boston
dataset=load_boston()
dataset
```
### So in the given data there are certain features and target prices of houses in boston. So let's first transform the given data into dataframe
```
df=pd.DataFrame(dataset.data,columns=dataset.feature_names)
df.head()
```
Let's put the target variable to the dataframe
```
df['Target']=dataset.target
df.head()
```
## Since we have transformed the data into the dataframe now let's do the Exploratory Data Analysis i.e. EDA
```
df.describe()
```
Let's see if there is any missing data or not
```
df.isnull().sum()
```
### Since there is not any missing data let's see the correlation between the features
```
df.corr()
```
### Let's visualize the data on the heatmap
```
plt.figure(figsize=(10,10)) #this increase the dimension of the figure
sns.heatmap(df.corr(),annot=True) #this plots the data into the heatmap
```
### let's see the distribution of the data since all the features are continuous
```
cont=[feature for feature in df.columns]
cont
for feature in cont:
sns.distplot(df[feature])
plt.show()
```
#### Let's draw the regplot between the features and target
```
for feature in cont:
if feature!='Target':
sns.regplot(x=feature,y='Target',data=df)
plt.show()
cont
plt.figure(figsize=(10,10)) #this increase the dimension of the figure
sns.heatmap(df.corr(),annot=True)
```
### Let's do some feature engineering and drop some features which have low correlation with the target
```
'''Now let's take some of the features and test a model and after
seeing the result we can again take some more features to see if the model is working fine or not.'''
x=df.loc[:,[
'ZN',
'INDUS',
'NOX',
'RM',
'AGE',
'DIS',
'TAX',
'PTRATIO',
'B',
'LSTAT']]
y=df.Target
x.head()
# Now let's split the data into train and test data using train_test_split
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=5)
# fitting the model
from sklearn.linear_model import LinearRegression
model=LinearRegression()
model.fit(x_train,y_train)
# Predicting the values
y_pre=model.predict(x_test)
y_pre
# Let's see how our model is performing
from sklearn.metrics import r2_score
score=r2_score(y_test,y_pre)
score
sns.scatterplot(y_test,y_pre)
sns.distplot(y_test-y_pre)
```
| true |
code
| 0.605216 | null | null | null | null |
|
# Using Variational Autoencoder to Generate Faces
In this example, we are going to use VAE to generate faces. The dataset we are going to use is [CelebA](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html). The dataset consists of more than 200K celebrity face images. You have to download the Align&Cropped Images from the above website to run this example.
```
from bigdl.nn.layer import *
from bigdl.nn.criterion import *
from bigdl.optim.optimizer import *
from bigdl.dataset import mnist
import datetime as dt
from glob import glob
import os
import numpy as np
from utils import *
import imageio
image_size = 148
Z_DIM = 128
ENCODER_FILTER_NUM = 32
#download the data CelebA, and may repalce with your own data path
DATA_PATH = os.getenv("ANALYTICS_ZOO_HOME") + "/apps/variational-autoencoder/img_align_celeba"
from zoo.common.nncontext import *
sc = init_nncontext("Variational Autoencoder Example")
sc.addFile(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/variational-autoencoder/utils.py")
```
## Define the Model
Here, we define a slightly more complicate CNN networks using convolution, batchnorm, and leakyRelu.
```
def conv_bn_lrelu(in_channels, out_channles, kw=4, kh=4, sw=2, sh=2, pw=-1, ph=-1):
model = Sequential()
model.add(SpatialConvolution(in_channels, out_channles, kw, kh, sw, sh, pw, ph))
model.add(SpatialBatchNormalization(out_channles))
model.add(LeakyReLU(0.2))
return model
def upsample_conv_bn_lrelu(in_channels, out_channles, out_width, out_height, kw=3, kh=3, sw=1, sh=1, pw=-1, ph=-1):
model = Sequential()
model.add(ResizeBilinear(out_width, out_height))
model.add(SpatialConvolution(in_channels, out_channles, kw, kh, sw, sh, pw, ph))
model.add(SpatialBatchNormalization(out_channles))
model.add(LeakyReLU(0.2))
return model
def get_encoder_cnn():
input0 = Input()
#CONV
conv1 = conv_bn_lrelu(3, ENCODER_FILTER_NUM)(input0) # 32 * 32 * 32
conv2 = conv_bn_lrelu(ENCODER_FILTER_NUM, ENCODER_FILTER_NUM * 2)(conv1) # 16 * 16 * 64
conv3 = conv_bn_lrelu(ENCODER_FILTER_NUM * 2, ENCODER_FILTER_NUM * 4)(conv2) # 8 * 8 * 128
conv4 = conv_bn_lrelu(ENCODER_FILTER_NUM * 4, ENCODER_FILTER_NUM * 8)(conv3) # 4 * 4 * 256
view = View([4*4*ENCODER_FILTER_NUM*8])(conv4)
inter = Linear(4*4*ENCODER_FILTER_NUM*8, 2048)(view)
inter = BatchNormalization(2048)(inter)
inter = ReLU()(inter)
# fully connected to generate mean and log-variance
mean = Linear(2048, Z_DIM)(inter)
log_variance = Linear(2048, Z_DIM)(inter)
model = Model([input0], [mean, log_variance])
return model
def get_decoder_cnn():
input0 = Input()
linear = Linear(Z_DIM, 2048)(input0)
linear = Linear(2048, 4*4*ENCODER_FILTER_NUM * 8)(linear)
reshape = Reshape([ENCODER_FILTER_NUM * 8, 4, 4])(linear)
bn = SpatialBatchNormalization(ENCODER_FILTER_NUM * 8)(reshape)
# upsampling
up1 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM*8, ENCODER_FILTER_NUM*4, 8, 8)(bn) # 8 * 8 * 128
up2 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM*4, ENCODER_FILTER_NUM*2, 16, 16)(up1) # 16 * 16 * 64
up3 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM*2, ENCODER_FILTER_NUM, 32, 32)(up2) # 32 * 32 * 32
up4 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM, 3, 64, 64)(up3) # 64 * 64 * 3
output = Sigmoid()(up4)
model = Model([input0], [output])
return model
def get_autoencoder_cnn():
input0 = Input()
encoder = get_encoder_cnn()(input0)
sampler = GaussianSampler()(encoder)
decoder_model = get_decoder_cnn()
decoder = decoder_model(sampler)
model = Model([input0], [encoder, decoder])
return model, decoder_model
model, decoder = get_autoencoder_cnn()
```
## Load the Dataset
```
def get_data():
data_files = glob(os.path.join(DATA_PATH, "*.jpg"))
rdd_train_images = sc.parallelize(data_files[:100000]) \
.map(lambda path: inverse_transform(get_image(path, image_size)).transpose(2, 0, 1))
rdd_train_sample = rdd_train_images.map(lambda img: Sample.from_ndarray(img, [np.array(0.0), img]))
return rdd_train_sample
train_data = get_data()
```
## Define the Training Objective
```
criterion = ParallelCriterion()
criterion.add(KLDCriterion(), 1.0) # You may want to twick this parameter
criterion.add(BCECriterion(size_average=False), 1.0 / 64)
```
## Define the Optimizer
```
batch_size = 100
# Create an Optimizer
optimizer = Optimizer(
model=model,
training_rdd=train_data,
criterion=criterion,
optim_method=Adam(0.001, beta1=0.5),
end_trigger=MaxEpoch(1),
batch_size=batch_size)
app_name='vea-'+dt.datetime.now().strftime("%Y%m%d-%H%M%S")
train_summary = TrainSummary(log_dir='/tmp/vae',
app_name=app_name)
train_summary.set_summary_trigger("LearningRate", SeveralIteration(10))
train_summary.set_summary_trigger("Parameters", EveryEpoch())
optimizer.set_train_summary(train_summary)
print ("saving logs to ",app_name)
```
## Spin Up the Training
This could take a while. It took about 2 hours on a desktop with a intel i7-6700 cpu and 40GB java heap memory. You can reduce the training time by using less data (some changes in the "Load the Dataset" section), but the performce may not as good.
```
redire_spark_logs()
show_bigdl_info_logs()
def gen_image_row():
decoder.evaluate()
return np.column_stack([decoder.forward(np.random.randn(1, Z_DIM)).reshape(3, 64,64).transpose(1, 2, 0) for s in range(8)])
def gen_image():
return np.row_stack([gen_image_row() for i in range(8)])
for i in range(1, 6):
optimizer.set_end_when(MaxEpoch(i))
trained_model = optimizer.optimize()
image = gen_image()
if not os.path.exists("./images"):
os.makedirs("./images")
if not os.path.exists("./models"):
os.makedirs("./models")
# you may change the following directory accordingly and make sure the directory
# you are writing to exists
imageio.imwrite("./images/image_%s.png" % i , image)
decoder.saveModel("./models/decoder_%s.model" % i, over_write = True)
import matplotlib
matplotlib.use('Agg')
%pylab inline
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
loss = np.array(train_summary.read_scalar("Loss"))
plt.figure(figsize = (12,12))
plt.plot(loss[:,0],loss[:,1],label='loss')
plt.xlim(0,loss.shape[0]+10)
plt.grid(True)
plt.title("loss")
```
## Random Sample Some Images
```
from matplotlib.pyplot import imshow
img = gen_image()
imshow(img)
```
| true |
code
| 0.677301 | null | null | null | null |
|
<img src="../Pics/MLSb-T.png" width="160">
<br><br>
<center><u><H1>LSTM and GRU on Sentiment Analysis</H1></u></center>
```
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = True
sess = tf.Session(config=config)
set_session(sess)
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.models import Sequential
from keras.layers import Dense, Embedding, GRU, LSTM, CuDNNLSTM, CuDNNGRU, Dropout
from keras.datasets import imdb
from keras.callbacks import EarlyStopping
from keras.optimizers import Adam
num_words = 20000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=num_words)
print(len(X_train), 'train_data')
print(len(X_test), 'test_data')
print(X_train[0])
len(X_train[0])
```
## Hyperparameters:
```
max_len = 256
embedding_size = 10
batch_size = 128
n_epochs = 10
```
## Creating Sequences
```
pad = 'pre' #'post'
X_train_pad = pad_sequences(X_train, maxlen=max_len, padding=pad, truncating=pad)
X_test_pad = pad_sequences(X_test, maxlen=max_len, padding=pad, truncating=pad)
X_train_pad[0]
```
## Creating the model:
```
model = Sequential()
#The input is a 2D tensor: (samples, sequence_length)
# this layer will return 3D tensor: (samples, sequence_length, embedding_dim)
model.add(Embedding(input_dim=num_words,
output_dim=embedding_size,
input_length=max_len,
name='layer_embedding'))
model.add(Dropout(0.2))
#model.add(LSTM(128,dropout=0.2, recurrent_dropout=0.2))
model.add(CuDNNLSTM(128, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid', name='classification'))
model.summary()
```
## Compiling the model:
```
#optimizer = Adam(lr=0.001, decay=1e-6)
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
```
## Callbacks:
```
callback_early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1)
```
## Training the model:
```
%%time
model.fit(X_train_pad, y_train,
epochs=n_epochs,
batch_size=batch_size,
validation_split=0.05,
callbacks=[callback_early_stopping]
)
```
## Testing the model:
```
%%time
eval_ = model.evaluate(X_test_pad, y_test)
print("Loss: {0:.5}".format(eval_[0]))
print("Accuracy: {0:.2%}".format(eval_[1]))
```
## Saving the model:
```
model.save("..\data\models\{}".format('Sentiment-LSTM-GRU'))
```
## GRU model:
```
model_GRU = Sequential()
model_GRU.add(Embedding(input_dim=num_words,
output_dim=embedding_size,
input_length=max_len,
name='layer_embedding'))
model_GRU.add(CuDNNGRU(units=16, return_sequences=True))
model_GRU.add(CuDNNGRU(units=8, return_sequences=True))
model_GRU.add(CuDNNGRU(units=4, return_sequences=False))
model_GRU.add(Dense(1, activation='sigmoid'))
model_GRU.summary()
model_GRU.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
%%time
model_GRU.fit(X_train_pad, y_train, validation_split=0.05, epochs=n_epochs, batch_size=batch_size)
%%time
eval_GRU = model_GRU.evaluate(X_test_pad, y_test)
print("Loss: {0:.5}".format(eval_GRU[0]))
print("Accuracy: {0:.2%}".format(eval_GRU[1]))
```
## Examples of Mis-Classified Text
```
#making predictions for the first 1000 test samples
y_pred = model.predict(X_test_pad[:1000])
y_pred = y_pred.T[0]
labels_pred = np.array([1.0 if p > 0.5 else 0.0 for p in y_pred])
true_labels = np.array(y_test[:1000])
incorrect = np.where(labels_pred != true_labels)
incorrect = incorrect[0]
print(incorrect)
len(incorrect)
idx = incorrect[1]
idx
text = X_test[idx]
print(text)
y_pred[idx]
true_labels[idx]
```
## Converting integers in Text
```
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
word_index.items()
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
print(reverse_word_index)
def decode_index(text):
return ' '.join([reverse_word_index.get(i) for i in text])
decode_index(X_train[0])
text_data = []
for i in range(len(X_train)):
text_data.append(decode_index(X_train[i]))
text_data[0]
```
## Embeddings
```
layer_embedding = model.get_layer('layer_embedding')
weights_embedding = layer_embedding.get_weights()[0]
weights_embedding.shape
weights_embedding[word_index.get('good')]
```
## Similar Words
```
from scipy.spatial.distance import cdist
def print_similar_words(word, metric='cosine'):
token = word_index.get(word)
embedding = weights_embedding[token]
distances = cdist(weights_embedding, [embedding],
metric=metric).T[0]
sorted_index = np.argsort(distances)
sorted_distances = distances[sorted_index]
sorted_words = [reverse_word_index[token] for token in sorted_index
if token != 0]
def print_words(words, distances):
for word, distance in zip(words, distances):
print("{0:.3f} - {1}".format(distance, word))
N = 10
print("Distance from '{0}':".format(word))
print_words(sorted_words[0:N], sorted_distances[0:N])
print("-------")
print_words(sorted_words[-N:], sorted_distances[-N:])
print_similar_words('good', metric='cosine')
```
## Reference:
https://keras.io/layers/recurrent/
| true |
code
| 0.677421 | null | null | null | null |
|
# Monte Carlo Methods
In this notebook, you will write your own implementations of many Monte Carlo (MC) algorithms.
While we have provided some starter code, you are welcome to erase these hints and write your code from scratch.
### Part 0: Explore BlackjackEnv
We begin by importing the necessary packages.
```
import sys
import gym
import numpy as np
from collections import defaultdict
from plot_utils import plot_blackjack_values, plot_policy
```
Use the code cell below to create an instance of the [Blackjack](https://github.com/openai/gym/blob/master/gym/envs/toy_text/blackjack.py) environment.
```
env = gym.make('Blackjack-v1')
```
Each state is a 3-tuple of:
- the player's current sum $\in \{0, 1, \ldots, 31\}$,
- the dealer's face up card $\in \{1, \ldots, 10\}$, and
- whether or not the player has a usable ace (`no` $=0$, `yes` $=1$).
The agent has two potential actions:
```
STICK = 0
HIT = 1
```
Verify this by running the code cell below.
```
print(env.observation_space)
print(env.action_space)
```
Execute the code cell below to play Blackjack with a random policy.
(_The code currently plays Blackjack three times - feel free to change this number, or to run the cell multiple times. The cell is designed for you to get some experience with the output that is returned as the agent interacts with the environment._)
```
for i_episode in range(3):
state = env.reset()
while True:
print(state)
action = env.action_space.sample()
state, reward, done, info = env.step(action)
if done:
print('End game! Reward: ', reward)
print('You won :)\n') if reward > 0 else print('You lost :(\n')
break
```
### Part 1: MC Prediction
In this section, you will write your own implementation of MC prediction (for estimating the action-value function).
We will begin by investigating a policy where the player _almost_ always sticks if the sum of her cards exceeds 18. In particular, she selects action `STICK` with 80% probability if the sum is greater than 18; and, if the sum is 18 or below, she selects action `HIT` with 80% probability. The function `generate_episode_from_limit_stochastic` samples an episode using this policy.
The function accepts as **input**:
- `bj_env`: This is an instance of OpenAI Gym's Blackjack environment.
It returns as **output**:
- `episode`: This is a list of (state, action, reward) tuples (of tuples) and corresponds to $(S_0, A_0, R_1, \ldots, S_{T-1}, A_{T-1}, R_{T})$, where $T$ is the final time step. In particular, `episode[i]` returns $(S_i, A_i, R_{i+1})$, and `episode[i][0]`, `episode[i][1]`, and `episode[i][2]` return $S_i$, $A_i$, and $R_{i+1}$, respectively.
```
def generate_episode_from_limit_stochastic(bj_env):
episode = []
state = bj_env.reset()
while True:
probs = [0.8, 0.2] if state[0] > 18 else [0.2, 0.8]
action = np.random.choice(np.arange(2), p=probs)
next_state, reward, done, info = bj_env.step(action)
episode.append((state, action, reward))
state = next_state
if done:
break
return episode
```
Execute the code cell below to play Blackjack with the policy.
(*The code currently plays Blackjack three times - feel free to change this number, or to run the cell multiple times. The cell is designed for you to gain some familiarity with the output of the `generate_episode_from_limit_stochastic` function.*)
```
for i in range(5):
print(generate_episode_from_limit_stochastic(env))
```
Now, you are ready to write your own implementation of MC prediction. Feel free to implement either first-visit or every-visit MC prediction; in the case of the Blackjack environment, the techniques are equivalent.
Your algorithm has three arguments:
- `env`: This is an instance of an OpenAI Gym environment.
- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.
- `generate_episode`: This is a function that returns an episode of interaction.
- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).
The algorithm returns as output:
- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
```
def mc_prediction_q(env, num_episodes, generate_episode, gamma=1.0):
# initialize empty dictionaries of arrays
returns_sum = defaultdict(lambda: np.zeros(env.action_space.n))
N = defaultdict(lambda: np.zeros(env.action_space.n))
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 1000 == 0:
print("\rEpisode {}/{}.".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# generate an episode
episode = generate_episode(env)
# obtain the states, actions, and rewards
states, actions, rewards = zip(*episode)
# prepare for discounting
discounts = np.array([gamma**i for i in range(len(rewards)+1)])
# update the sum of the returns, number of visits, and action-value
# function estimates for each state-action pair in the episode
for i, state in enumerate(states):
returns_sum[state][actions[i]] += sum(rewards[i:]*discounts[:-(1+i)])
N[state][actions[i]] += 1.0
Q[state][actions[i]] = returns_sum[state][actions[i]] / N[state][actions[i]]
return Q
```
Use the cell below to obtain the action-value function estimate $Q$. We have also plotted the corresponding state-value function.
To check the accuracy of your implementation, compare the plot below to the corresponding plot in the solutions notebook **Monte_Carlo_Solution.ipynb**.
```
# obtain the action-value function
Q = mc_prediction_q(env, 500000, generate_episode_from_limit_stochastic)
# obtain the corresponding state-value function
V_to_plot = dict((k,(k[0]>18)*(np.dot([0.8, 0.2],v)) + (k[0]<=18)*(np.dot([0.2, 0.8],v))) \
for k, v in Q.items())
# plot the state-value function
plot_blackjack_values(V_to_plot)
```
### Part 2: MC Control
In this section, you will write your own implementation of constant-$\alpha$ MC control.
Your algorithm has four arguments:
- `env`: This is an instance of an OpenAI Gym environment.
- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.
- `alpha`: This is the step-size parameter for the update step.
- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).
The algorithm returns as output:
- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
- `policy`: This is a dictionary where `policy[s]` returns the action that the agent chooses after observing state `s`.
(_Feel free to define additional functions to help you to organize your code._)
```
def generate_episode_from_Q(env, Q, epsilon, nA):
""" generates an episode from following the epsilon-greedy policy """
episode = []
state = env.reset()
while True:
action = np.random.choice(np.arange(nA), p=get_probs(Q[state], epsilon, nA)) \
if state in Q else env.action_space.sample()
next_state, reward, done, info = env.step(action)
episode.append((state, action, reward))
state = next_state
if done:
break
return episode
def get_probs(Q_s, epsilon, nA):
""" obtains the action probabilities corresponding to epsilon-greedy policy """
policy_s = np.ones(nA) * epsilon / nA
best_a = np.argmax(Q_s)
policy_s[best_a] = 1 - epsilon + (epsilon / nA)
return policy_s
def update_Q(env, episode, Q, alpha, gamma):
""" updates the action-value function estimate using the most recent episode """
states, actions, rewards = zip(*episode)
# prepare for discounting
discounts = np.array([gamma**i for i in range(len(rewards)+1)])
for i, state in enumerate(states):
old_Q = Q[state][actions[i]]
Q[state][actions[i]] = old_Q + alpha*(sum(rewards[i:]*discounts[:-(1+i)]) - old_Q)
return Q
def mc_control(env, num_episodes, alpha, gamma=1.0, eps_start=1.0, eps_decay=.99999, eps_min=0.05):
nA = env.action_space.n
# initialize empty dictionary of arrays
Q = defaultdict(lambda: np.zeros(nA))
epsilon = eps_start
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 1000 == 0:
print("\rEpisode {}/{}.".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# set the value of epsilon
epsilon = max(epsilon*eps_decay, eps_min)
# generate an episode by following epsilon-greedy policy
episode = generate_episode_from_Q(env, Q, epsilon, nA)
# update the action-value function estimate using the episode
Q = update_Q(env, episode, Q, alpha, gamma)
# determine the policy corresponding to the final action-value function estimate
policy = dict((k,np.argmax(v)) for k, v in Q.items())
return policy, Q
```
Use the cell below to obtain the estimated optimal policy and action-value function. Note that you should fill in your own values for the `num_episodes` and `alpha` parameters.
```
# obtain the estimated optimal policy and action-value function
policy, Q = mc_control(env, 500000, 0.02)
```
Next, we plot the corresponding state-value function.
```
# obtain the corresponding state-value function
V = dict((k,np.max(v)) for k, v in Q.items())
# plot the state-value function
plot_blackjack_values(V)
```
Finally, we visualize the policy that is estimated to be optimal.
```
# plot the policy
plot_policy(policy)
```
The **true** optimal policy $\pi_*$ can be found in Figure 5.2 of the [textbook](http://go.udacity.com/rl-textbook) (and appears below). Compare your final estimate to the optimal policy - how close are you able to get? If you are not happy with the performance of your algorithm, take the time to tweak the decay rate of $\epsilon$, change the value of $\alpha$, and/or run the algorithm for more episodes to attain better results.

| true |
code
| 0.532304 | null | null | null | null |
|
## Goes over modeling, starting from modeling tables.
### We're using modeling tables which were prepared based on 12 hours worth of vital sign data from each patient, as well as medication history during the stay, and patient characteristics.
### The model predicts the probability of having a rapid response team event in 1 hour's time from the time of prediction. A RRT event is called after personnel identify that a patient has an urgent need for medical service.
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy as sp
# import datetime as datetime
import cPickle as pickle
%matplotlib inline
plt.style.use('ggplot')
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split, KFold
from sklearn.metrics import confusion_matrix, roc_auc_score, precision_score, recall_score, classification_report
from sklearn.ensemble import GradientBoostingClassifier #, RandomForestClassifier,
from sklearn.ensemble.partial_dependence import plot_partial_dependence, partial_dependence
from sklearn.grid_search import GridSearchCV
```
### function definitions
```
def score_printout(X_test, y_test, fittedModel):
print "AUC-ROC Score of model: ", roc_auc_score(y_test, fittedModel.predict_proba(X_test)[:,1])
print "Precision Score of model: ", precision_score(y_test, fittedModel.predict(X_test))
print "Recall Score of model: ", recall_score(y_test, fittedModel.predict(X_test))
def make_feature_importance_plot(featuresAndImportances, numFeatures):
topN = featuresAndImportances[:numFeatures]
labels = [pair[0] for pair in topN]
values = [pair[1] for pair in topN]
ind = np.arange(len(values)+2)
width = 0.35
plt.barh(range(numFeatures),values)
ax = plt.subplot(111)
ax.set_yticks(ind+width)
ax.set_yticklabels(labels, rotation=0, size=12)
plt.ylabel('Feature', size=20)
plt.xlabel('Importance', size=20)
plt.show()
```
### Read in data
We did not share our modeling data, so you will have to create your own. The pipeline tool can help you do this. If you save the results to a csv, `masterdf_rrt` and `masterdf_nonrrt` are dataframes with the modeling data for each of the positive and negative classes, respectively.
```
masterdf_rrt = pd.read_csv('RRT_modeling_table_13hr_raw.csv')
masterdf_nonrrt = pd.read_csv('NonRRT_modeling_table_13hr_raw.csv')
```
### Look at summary statistics for numeric columns for rrt & non-rrt tables (35 cols)
```
masterdf_rrt.columns
masterdf_rrt.describe().T
masterdf_nonrrt.describe().T
```
### We have a good amount of nan values in some columns. Lets plot the nan values to get a sense of how many there are
```
def show_df_nans(masterdf, collist=None):
'''
Create a data frame for features which may be nan.
Make nan values be 1, numeric values be 0
A heat map where dark squares/lines show where data is missing.
'''
if not collist:
plot_cols = ['obese','DBP_mean', 'DBP_recent', 'SBP_mean', 'SBP_recent', 'HR_mean', 'HR_recent',
'MAP_mean', 'MAP_recent', 'temp_mean', 'temp_recent', 'SPO2_mean',
'SPO2_recent', 'RR_mean', 'RR_recent', 'pulse_mean', 'pulse_recent',
'CO2_mean', 'CO2_recent', 'GCS_mean', 'GCS_recent']
else:
plot_cols = collist
df_viznan = pd.DataFrame(data = 1,index=masterdf.index,columns=plot_cols)
df_viznan[~pd.isnull(masterdf[plot_cols])] = 0
plt.figure(figsize=(10,8))
plt.title('Dark values are nans')
return sns.heatmap(df_viznan.astype(float))
# subset of numeric columns we'll use in modeling (sufficient data available)
plot_cols_good = ['obese','DBP_mean', 'DBP_recent', 'SBP_mean', 'SBP_recent',
'MAP_mean', 'MAP_recent', 'temp_mean', 'temp_recent', 'SPO2_mean',
'SPO2_recent', 'RR_mean', 'RR_recent', 'pulse_mean', 'pulse_recent']
show_df_nans(masterdf_nonrrt) # show all columns that may have nans
# show_df_nans(masterdf_nonrrt, plot_cols_good) # show the columns whch we plan to use for modeling
show_df_nans(masterdf_rrt)
# show_df_nans(masterdf_rrt, plot_cols_good)
```
### Let's not use those columns where there are significant nans: drop HR (heart rate; we have pulse rate instead), CO2, and GCS, which leaves us with 28 features.
```
col_use = ['age', 'sex', 'obese', 'smoker', 'prev_rrt', 'on_iv', 'bu-nal', 'DBP_mean',
'DBP_recent', 'SBP_mean', 'SBP_recent',
'MAP_mean', 'MAP_recent', 'temp_mean', 'temp_recent', 'SPO2_mean',
'SPO2_recent', 'RR_mean', 'RR_recent', 'pulse_mean', 'pulse_recent',
'anticoagulants', 'narcotics', 'narc-ans', 'antipsychotics',
'chemo', 'dialysis', 'race']
X_rrt = masterdf_rrt[col_use]
X_notrrt = masterdf_nonrrt[col_use]
```
### We need to deal with these nans before we can start modeling. (There should not be any nans in the modeling table)
```
# let's look at getting rid of the data rows where vitals signs are all nans
vitals_cols = ['DBP_mean', 'DBP_recent', # take the mean of all the measurements & the most recently observed point
'SBP_mean', 'SBP_recent',
'MAP_mean', 'MAP_recent', # mean arterial pressure
'temp_mean', 'temp_recent',# temperature
'SPO2_mean', 'SPO2_recent',
'RR_mean', 'RR_recent', # respiratory rate
'pulse_mean', 'pulse_recent']
# Write out rows that are not all 0/NaNs across. (if all nans, remove this sample)
X_rrt = X_rrt.loc[np.where(X_rrt.ix[:, vitals_cols].sum(axis=1, skipna=True)!=0)[0]]
X_rrt = X_rrt.reset_index(drop=True)
X_notrrt = X_notrrt.loc[np.where(X_notrrt.ix[:, vitals_cols].sum(axis=1, skipna=True)!=0)[0]]
X_notrrt = X_notrrt.reset_index(drop=True)
# if 'obese' is Nan, then set the patient to be not obese.
X_rrt.loc[np.where(pd.isnull(X_rrt['obese']))[0], 'obese'] = 0
X_notrrt.loc[np.where(pd.isnull(X_notrrt['obese']))[0], 'obese'] = 0
```
### Let's see how X_rrt & X_notrrt look
```
show_df_nans(X_rrt, vitals_cols)
show_df_nans(X_notrrt, vitals_cols)
```
### Some columns have significant missing values.
```
print X_rrt[['pulse_mean', 'pulse_recent']].describe().T
print "size of X_rrt: "+str(len(X_rrt))
print
print X_notrrt[['pulse_mean', 'pulse_recent']].describe().T
print "size of X_notrrt: " + str(len(X_notrrt))
```
### We have plenty of samples for the non-RRT case. We can delete off rows with values that are missing without concern that we'll lose negtive examples for RRT events for modeling.
```
# DROP THE ROWS WHERE PULSE IS NAN
X_notrrt = X_notrrt.ix[np.where(pd.isnull(X_notrrt['pulse_mean'])!=True)[0]]
X_notrrt = X_notrrt.reset_index(drop=True)
# And similarly for all rows with significant nans:
X_notrrt = X_notrrt.ix[np.where(pd.isnull(X_notrrt['RR_mean'])!=True)[0]]
X_notrrt = X_notrrt.reset_index(drop=True)
X_notrrt = X_notrrt.ix[np.where(pd.isnull(X_notrrt['MAP_mean'])!=True)[0]]
X_notrrt = X_notrrt.reset_index(drop=True)
X_notrrt = X_notrrt.ix[np.where(pd.isnull(X_notrrt['temp_mean'])!=True)[0]]
X_notrrt = X_notrrt.reset_index(drop=True)
X_notrrt = X_notrrt.ix[np.where(pd.isnull(X_notrrt['SPO2_mean'])!=True)[0]]
X_notrrt = X_notrrt.reset_index(drop=True)
all_cols = ['age', 'sex', 'obese', 'smoker', 'prev_rrt', 'on_iv', 'bu-nal',
'DBP_mean', 'DBP_recent', 'SBP_mean', 'SBP_recent', 'MAP_mean',
'MAP_recent', 'temp_mean', 'temp_recent', 'SPO2_mean',
'SPO2_recent', 'RR_mean', 'RR_recent', 'pulse_mean', 'pulse_recent',
'anticoagulants', 'narcotics', 'narc-ans', 'antipsychotics',
'chemo', 'dialysis', 'race']
show_df_nans(X_notrrt, all_cols)
```
### Still need to deal with nans in X_rrt. Temp & pulse are the most of concern
```
X_rrt[['temp_mean', 'pulse_mean']].describe().T
```
### We'll impute missing values in X_rrt after combining that data with X_notrrt, and use the mean from each column after merging to fill the values.
```
# add labels to indicate positive or negative class
X_rrt['label'] = 1
X_notrrt['label'] = 0
# Combine the tables
XY = pd.concat([X_rrt, X_notrrt])
XY = XY.reset_index(drop=True)
y = XY.pop('label')
X = XY
# Fill nans with mean of columns
X = X.fillna(X.mean())
# map genders to 1/0
X['is_male'] = X['sex'].map({'M': 1, 'F': 0})
X.pop('sex')
X.race.value_counts()
# we won't use race in modeling
X.pop('race')
show_df_nans(X, vitals_cols)
X.columns
X.describe().T
```
# Modeling
```
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
print len(y_train)
print len(y_train[y_train]==1)
len(y_test[y_test==1])
Xscaled = StandardScaler().fit_transform(X)
Xs_train, Xs_test, ys_train, ys_test = train_test_split(Xscaled, y, test_size=0.3)
```
## Gradient Boosting Classifier - Unscaled (with partial dependence plots below)
```
paramGrid = {'n_estimators': [100, 200, 300],
'learning_rate': [0.1, 0.05, 0.01, 0.2],
'max_depth': [3, 4, 5, 6],
'min_samples_leaf': [1, 2],
'subsample': [0.75, 1.0, 0.85],
'loss': ['deviance'],
'max_features': [None, 'auto']
}
gs = GridSearchCV(GradientBoostingClassifier(),
param_grid=paramGrid,
scoring='roc_auc',
n_jobs=-1,
cv=5,
verbose=10)
gs.fit(X_train, y_train)
# Result:
# GradientBoostingClassifier(init=None, learning_rate=0.05, loss='deviance',
# max_depth=3, max_features=None, max_leaf_nodes=None,
# min_samples_leaf=2, min_samples_split=2,
# min_weight_fraction_leaf=0.0, n_estimators=300,
# presort='auto', random_state=None, subsample=0.75, verbose=0,
# warm_start=False)
```
## Grid search for best GBC - Scaled (with partial dependece plots below)
```
paramGrid = {'n_estimators': [100, 200, 300],
'learning_rate': [0.1, 0.05, 0.01, 0.2],
'max_depth': [3, 4, 5, 6],
'min_samples_leaf': [1, 2],
'subsample': [0.75, 1.0, 0.85],
'loss': ['deviance'],
'max_features': [None, 'auto']
}
gss = GridSearchCV(GradientBoostingClassifier(),
param_grid=paramGrid,
scoring='roc_auc',
n_jobs=-1,
cv=5,
verbose=10)
gss.fit(Xs_train, ys_train)
# Result:
# GradientBoostingClassifier(init=None, learning_rate=0.05, loss='deviance',
# max_depth=3, max_features='auto', max_leaf_nodes=None,
# min_samples_leaf=1, min_samples_split=2,
# min_weight_fraction_leaf=0.0, n_estimators=300,
# presort='auto', random_state=None, subsample=0.75, verbose=0,
# warm_start=False)
```
## How different are best estimators for scaled & unscaled data?
```
gbc = GradientBoostingClassifier(init=None, learning_rate=0.05, loss='deviance',
max_depth=3, max_features=None, max_leaf_nodes=None,
min_samples_leaf=2, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=300,
presort='auto', random_state=None, subsample=0.75, verbose=0,
warm_start=False)
gbc.fit(X_train, y_train)
score_printout(X_test, y_test, gbc)
print classification_report(y_test, gbc.predict(X_test))
confusion_matrix(y_test, gbc.predict(X_test))
# gbcs = gss.best_estimator_
# gbcs.fit(Xs_train, ys_train)
# score_printout(Xs_test, ys_test, gbc)
# print classification_report(ys_test, gbcs.predict(Xs_test))
# confusion_matrix(ys_test, gbcs.predict(Xs_test))
```
### Use unscaled data -- better results & easier interpretability
```
# Let's plot the confusion matrix so it's a little clearer
plt.figure()
sns.set(font_scale=1.5)
sns.heatmap(confusion_matrix(y_test, gbc.predict(X_test)), annot=True, fmt='d')
```
## Let's look at the most important features in this model
```
gbcRankedFeatures = sorted(zip(X.columns, gbc.feature_importances_),
key=lambda pair: pair[1],
reverse=False)
plt.figure()
make_feature_importance_plot(gbcRankedFeatures, 27) # note - we have 27 features currently
```
### Let's look a partial dependence plots
#### If the partial dependence is high, then the model for that given value of that given feature is more likely to predict an rrt result.
#### Will not show more complex interactions -- if importance is high but partial dependence is marginal, this may be due to interactions
```
fig, axs = plot_partial_dependence(gbc, X_train, range(0, 6, 1), feature_names=X.columns.get_values(), n_jobs=-1, grid_resolution=50)
plt.subplots_adjust(top=0.9)
fig, axs = plot_partial_dependence(gbc, X_train, range(6, 12, 1), feature_names=X.columns.get_values(), n_jobs=-1, grid_resolution=50)
plt.subplots_adjust(top=0.9)
fig, axs = plot_partial_dependence(gbc, X_train, range(12, 18, 1), feature_names=X.columns.get_values(), n_jobs=-1, grid_resolution=50)
plt.subplots_adjust(top=0.9)
fig, axs = plot_partial_dependence(gbc, X_train, range(18, 24, 1), feature_names=X.columns.get_values(), n_jobs=-1, grid_resolution=50)
plt.subplots_adjust(top=0.9)
fig, axs = plot_partial_dependence(gbc, X_train, range(24, 27, 1), feature_names=X.columns.get_values(), n_jobs=-1, grid_resolution=50)
plt.subplots_adjust(top=0.9)
```
## Use 3-D plot to investigate feature interactions for weak partial dependence plots... (weak effect may be masked by stronger interaction with other features)
```
names = X_train.columns
zip(range(len(names)), names)
from mpl_toolkits.mplot3d import Axes3D
# not all features may work for this viz
fig = plt.figure(figsize=(10,8))
target_feature = (16, 18) # <-- change the two numbers here to determine what to plot up
pdp, (x_axis, y_axis) = partial_dependence(gbc, target_feature, X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('')
plt.subplots_adjust(top=0.9)
plt.show()
```
## From Model to Risk Score
```
# Return probabilities from the model, rather than predictions
y_proba = gbc.predict_proba(X_test)
# note - y_proba contains probabilities for class 0 in column 0 & probabilities for class 1 in column 1.
# we're only interested in the probability for class 1
y_proba
pred_probs = pd.DataFrame(data=y_proba[:,1], columns =["model_probability_of_rrt"], index = X_test.index)
X_test.head()
y_test.head()
pred_probs['model_probability_of_rrt'] = pd.to_numeric(pred_probs.model_probability_of_rrt)
pred_probs.hist(bins = 20, xlabelsize = 16, ylabelsize=16)
plt.tick_params(labelsize=14)
plt.title("Model output probabilities")
plt.ylabel('Count', fontsize=14)
```
### We see that although we see more values close to 0 and 1, we also see that the model outputs a full range of probabilities, which would translate well into risk scores.
### Patient Risk Score = model probability * 10
The score should be rounded to whole values to give the sense that this is not an exact measure.
```
pred_probs['score'] = pred_probs['model_probability_of_rrt'].apply(lambda x: int(round(x*10.0, 0)))
pred_probs.head()
pred_probs.score.value_counts()
```
### Save model
```
from sklearn.externals import joblib
# joblib.dump(gbc, 'gbc_base.pkl') # note - if left uncompressed, this writes a whole lot of supporting numpy files.
joblib.dump(gbc, 'my_trained_model.compressed', compress=True)
# to unpack: joblib.load(filename)
```
### Save modeling table
```
# Create combined data frame including modeling table, rrt label, and proability associated with result
df = pd.concat([X_test, pred_probs, y_test],axis=1, join_axes=[X_test.index])
df.head()
# May need to rename columns to get rid of dash in name...
df.rename(columns={'bu-nal': 'bu_nal', 'narc-ans': 'narc_ans'}, inplace=True)
df.to_csv('ModelingTable_with_results.csv')
```
| true |
code
| 0.405979 | null | null | null | null |
|
# Multipitch tracking using Echo State Networks
## Introduction
In this notebook, we demonstrate how the ESN can deal with multipitch tracking, a challenging multilabel classification problem in music analysis.
As this is a computationally expensive task, we have pre-trained models to serve as an entry point.
At first, we import all packages required for this task. You can find the import statements below.
```
import time
import numpy as np
import os
import csv
from sklearn.base import clone
from sklearn.metrics import make_scorer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from joblib import dump, load
import librosa
from madmom.processors import SequentialProcessor, ParallelProcessor
from madmom.audio import SignalProcessor, FramedSignalProcessor
from madmom.audio.stft import ShortTimeFourierTransformProcessor
from madmom.audio.filters import LogarithmicFilterbank
from madmom.audio.spectrogram import FilteredSpectrogramProcessor, LogarithmicSpectrogramProcessor, SpectrogramDifferenceProcessor
from pyrcn.util import FeatureExtractor
from pyrcn.echo_state_network import SeqToSeqESNClassifier
from pyrcn.datasets import fetch_maps_piano_dataset
from pyrcn.metrics import accuracy_score
from pyrcn.model_selection import SequentialSearchCV
from matplotlib import pyplot as plt
from matplotlib import ticker
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams["font.size"] = 10
%matplotlib inline
import pandas as pd
import seaborn as sns
from mir_eval import multipitch
```
## Feature extraction
The acoustic features extracted from the input signal are obtained by filtering short-term spectra (window length 4096 samples and hop size 10 ms) with a bank of triangular filters in the frequency domain with log-spaced frequencies. The frequency range was 30 Hz to 17 000 Hz and we used 12 filters per octave. We used logarithmic magnitudes and added 1 inside the logarithm to ensure a minimum value of 0 for a frame without energy. The first derivative between adjacent frames was added in order to enrich the features by temporal information. Binary labels indicating absent (value 0) or present (value 1) pitches for each frame are assigned to each frame. Note that this task is a multilabel classification. Each MIDI pitch is a separate class, and multiple or no classes can be active at a discrete frame index.
For a more detailed description, please have a look in our repository ([https://github.com/TUD-STKS/Automatic-Music-Transcription](https://github.com/TUD-STKS/Automatic-Music-Transcription)) with several detailed examples for music analysis tasks.
```
def create_feature_extraction_pipeline(sr=44100, frame_sizes=[1024, 2048, 4096], fps_hz=100.):
audio_loading = Pipeline([("load_audio", FeatureExtractor(librosa.load, sr=sr, mono=True)),
("normalize", FeatureExtractor(librosa.util.normalize, norm=np.inf))])
sig = SignalProcessor(num_channels=1, sample_rate=sr)
multi = ParallelProcessor([])
for frame_size in frame_sizes:
frames = FramedSignalProcessor(frame_size=frame_size, fps=fps_hz)
stft = ShortTimeFourierTransformProcessor() # caching FFT window
filt = FilteredSpectrogramProcessor(filterbank=LogarithmicFilterbank, num_bands=12, fmin=30, fmax=17000,
norm_filters=True, unique_filters=True)
spec = LogarithmicSpectrogramProcessor(log=np.log10, mul=5, add=1)
diff = SpectrogramDifferenceProcessor(diff_ratio=0.5, positive_diffs=True, stack_diffs=np.hstack)
# process each frame size with spec and diff sequentially
multi.append(SequentialProcessor([frames, stft, filt, spec, diff]))
feature_extractor = FeatureExtractor(SequentialProcessor([sig, multi, np.hstack]))
feature_extraction_pipeline = Pipeline([("audio_loading", audio_loading),
("feature_extractor", feature_extractor)])
return feature_extraction_pipeline
```
## Load and preprocess the dataset
This might require a large amount of time and memory.
```
# Load and preprocess the dataset
feature_extraction_pipeline = create_feature_extraction_pipeline(sr=44100, frame_sizes=[2048], fps_hz=100)
# New object -> PyTorch dataloader / Matlab datastore
X_train, X_test, y_train, y_test = fetch_maps_piano_dataset(data_origin="/projects/p_transcriber/MAPS",
data_home=None, preprocessor=feature_extraction_pipeline,
force_preprocessing=False, label_type="pitch")
def tsplot(ax, data,**kw):
x = np.arange(data.shape[1])
est = np.mean(data, axis=0)
sd = np.std(data, axis=0)
cis = (est - sd, est + sd)
ax.fill_between(x,cis[0],cis[1],alpha=0.2, **kw)
ax.plot(x,est,**kw)
ax.margins(x=0)
fig, ax = plt.subplots()
fig.set_size_inches(4, 1.25)
tsplot(ax, np.concatenate(np.hstack((X_train, X_test))))
ax.set_xlabel('Feature Index')
ax.set_ylabel('Magnitude')
plt.grid()
plt.savefig('features_statistics.pdf', bbox_inches='tight', pad_inches=0)
```
## Set up a ESN
To develop an ESN model for multipitch tracking, we need to tune several hyper-parameters, e.g., input_scaling, spectral_radius, bias_scaling and leaky integration.
We follow the way proposed in the paper for multipitch tracking and for acoustic modeling of piano music to optimize hyper-parameters sequentially.
We define the search spaces for each step together with the type of search (a grid search in this context).
At last, we initialize a SeqToSeqESNClassifier with the desired output strategy and with the initially fixed parameters.
```
initially_fixed_params = {'hidden_layer_size': 500,
'input_activation': 'identity',
'k_in': 10,
'bias_scaling': 0.0,
'reservoir_activation': 'tanh',
'leakage': 1.0,
'bi_directional': False,
'k_rec': 10,
'wash_out': 0,
'continuation': False,
'alpha': 1e-5,
'random_state': 42}
step1_esn_params = {'leakage': np.linspace(0.1, 1.0, 10)}
kwargs_1 = {'random_state': 42, 'verbose': 2, 'n_jobs': 70, 'pre_dispatch': 70, 'n_iter': 14,
'scoring': make_scorer(accuracy_score)}
step2_esn_params = {'input_scaling': np.linspace(0.1, 1.0, 10),
'spectral_radius': np.linspace(0.0, 1.5, 16)}
step3_esn_params = {'bias_scaling': np.linspace(0.0, 2.0, 21)}
kwargs_2_3 = {'verbose': 2, 'pre_dispatch': 70, 'n_jobs': 70,
'scoring': make_scorer(accuracy_score)}
# The searches are defined similarly to the steps of a sklearn.pipeline.Pipeline:
searches = [('step1', GridSearchCV, step1_esn_params, kwargs_1),
('step2', GridSearchCV, step2_esn_params, kwargs_2_3),
('step3', GridSearchCV, step3_esn_params, kwargs_2_3)]
base_esn = SeqToSeqESNClassifier(**initially_fixed_params)
```
## Optimization
We provide a SequentialSearchCV that basically iterates through the list of searches that we have defined before. It can be combined with any model selection tool from scikit-learn.
```
try:
sequential_search = load("sequential_search_ll.joblib")
except FileNotFoundError:
print(FileNotFoundError)
sequential_search = SequentialSearchCV(base_esn, searches=searches).fit(X_train, y_train)
dump(sequential_search, "sequential_search_ll.joblib")
```
## Visualize hyper-parameter optimization
```
df = pd.DataFrame(sequential_search.all_cv_results_["step1"])
fig = plt.figure()
fig.set_size_inches(2, 1.25)
ax = sns.lineplot(data=df, x="param_leakage", y="mean_test_score")
plt.xlabel("Leakage")
plt.ylabel("Score")
# plt.xlim((0, 1))
tick_locator = ticker.MaxNLocator(5)
ax.xaxis.set_major_locator(tick_locator)
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.4f'))
plt.grid()
plt.savefig('optimize_leakage.pdf', bbox_inches='tight', pad_inches=0)
df = pd.DataFrame(sequential_search.all_cv_results_["step2"])
pvt = pd.pivot_table(df,
values='mean_test_score', index='param_input_scaling', columns='param_spectral_radius')
pvt.columns = pvt.columns.astype(float)
pvt2 = pd.DataFrame(pvt.loc[pd.IndexSlice[0:1], pd.IndexSlice[0.0:1.0]])
fig = plt.figure()
ax = sns.heatmap(pvt2, xticklabels=pvt2.columns.values.round(2), yticklabels=pvt2.index.values.round(2), cbar_kws={'label': 'Score'})
ax.invert_yaxis()
plt.xlabel("Spectral Radius")
plt.ylabel("Input Scaling")
fig.set_size_inches(4, 2.5)
tick_locator = ticker.MaxNLocator(10)
ax.yaxis.set_major_locator(tick_locator)
ax.xaxis.set_major_locator(tick_locator)
plt.savefig('optimize_is_sr.pdf', bbox_inches='tight', pad_inches=0)
df = pd.DataFrame(sequential_search.all_cv_results_["step3"])
fig = plt.figure()
fig.set_size_inches(2, 1.25)
ax = sns.lineplot(data=df, x="param_bias_scaling", y="mean_test_score")
plt.xlabel("Bias Scaling")
plt.ylabel("Score")
plt.xlim((0, 2))
tick_locator = ticker.MaxNLocator(5)
ax.xaxis.set_major_locator(tick_locator)
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.5f'))
plt.grid()
plt.savefig('optimize_bias_scaling.pdf', bbox_inches='tight', pad_inches=0)
```
## Test the ESN
Finally, we test the ESN on unseen data.
```
def _midi_to_frequency(p):
return 440. * (2 ** ((p-69)/12))
def get_mir_eval_rows(y, fps=100.):
time_t = np.arange(len(y)) / fps
freq_hz = [_midi_to_frequency(np.asarray(np.nonzero(row))).ravel() for row in y]
return time_t, freq_hz
esn = sequential_search.best_estimator_
y_test_pred = esn.predict_proba(X=X_test)
scores = np.zeros(shape=(10, 14))
for k, thr in enumerate(np.linspace(0.1, 0.9, 9)):
res = []
for y_true, y_pred in zip(y_test, y_test_pred):
times_res, freqs_hz_res = get_mir_eval_rows(y_pred[:, 1:]>thr, fps=100.)
times_ref, freqs_hz_ref = get_mir_eval_rows(y_true[:, 1:]>thr, fps=100.)
res.append(multipitch.metrics(ref_time=times_ref, ref_freqs=freqs_hz_ref, est_time=times_res, est_freqs=freqs_hz_res))
scores[k, :] = np.mean(res, axis=0)
plt.plot(np.linspace(0.1, 1, 10), scores[:, :3])
plt.plot(np.linspace(0.1, 1, 10), 2*scores[:, 0]*scores[:, 1] / (scores[:, 0] + scores[:, 1]))
plt.xlabel("Threshold")
plt.ylabel("Scores")
plt.xlim((0.1, 0.9))
plt.legend(("Precision", "Recall", "Accuracy", "F1-Score"))
np.mean(list(sequential_search.all_refit_time_.values()))
t1 = time.time()
esn = clone(sequential_search.best_estimator_).fit(X_train, y_train, n_jobs=8)
print("Fitted in {0} seconds".format(time.time() - t1))
t1 = time.time()
esn = clone(sequential_search.best_estimator_).fit(X_train, y_train)
print("Fitted in {0} seconds".format(time.time() - t1))
```
| true |
code
| 0.69495 | null | null | null | null |
|
```
%pylab inline
import pandas as pd
import plotnine as p
p.theme_set(p.theme_classic())
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.spines.right'] = False
counts = pd.read_parquet('mca_brain_counts.parquet')
sample_info = pd.read_parquet('mca_brain_cell_info.parquet')
```
### Differential expression
Now let us investigate how this count depth effect plays in to a differential expression analysis. With all published large scale experiments cataloging cell types, it is getting increasingly easy to simply fetch some data and do quick comparisons. We will use data from the recent [single cell Mouse Cell Atlas][paper link]. To get something easy to compare, we use the samples called "Brain" and focus on the cells annotated as "Microglia" and "Astrocyte". Out of the ~400,000 cells in the study, these two cell types have 338 and 199 representative cells. On average they have about 700 total UMI counts each, so while the entire study is a pretty large scale, the individual cell types and cells are on a relatively small scale. The final table has 537 cells and 21,979 genes.
[paper link]: http://www.cell.com/cell/abstract/S0092-8674(18)30116-8
```
sample_info['super_cell_type'].value_counts()
sub_samples = sample_info.query('super_cell_type in ["Microglia", "Astrocyte"]').copy()
sub_counts = counts.reindex(index=sub_samples.index)
sub_counts.shape
sub_samples['is_astrocyte'] = sub_samples['super_cell_type'] == 'Astrocyte'
import NaiveDE
sub_samples['total_count'] = sub_counts.sum(1)
figsize(11, 3)
sub_samples.total_count.hist(grid=False, fc='w', ec='k')
sub_samples.total_count.median(), sub_samples.total_count.mean()
print(sub_samples.head())
```
In a differential expression test you simply include a covariate in the design matrix that informs the linear model about the different conditions you want to compare. Here we are comparing microglia and astrocytes.
```
%%time
lr_results = NaiveDE.lr_tests(sub_samples, np.log1p(sub_counts.T),
alt_model='C(is_astrocyte) + np.log(total_count) + 1',
null_model='np.log(total_count) + 1')
lr_results.pval = lr_results.pval.clip_lower(lr_results.query('pval != 0')['pval'].min())
lr_results.qval = lr_results.qval.clip_lower(lr_results.query('qval != 0')['qval'].min())
print(lr_results.sort_values('pval').head())
example_genes = ['Apoe', 'Sparcl1', 'Tmsb4x', 'C1qa']
examples = lr_results.loc[example_genes]
img = \
p.qplot('C(is_astrocyte)[T.True]', '-np.log10(pval)', lr_results) \
+ p.annotate('text',
x=examples['C(is_astrocyte)[T.True]'] + 0.33,
y=-np.log10(examples['pval']),
label=examples.index) \
+ p.labs(title='Brain cell data')
img.save('4.png', verbose=False)
img
img = \
p.qplot('C(is_astrocyte)[T.True]', 'np.log(total_count)', lr_results) \
+ p.annotate('text',
x=examples['C(is_astrocyte)[T.True]'] + 0.33,
y=examples['np.log(total_count)'],
label=examples.index) \
+ p.labs(title='Brain cell data')
img.save('5.png', verbose=False)
img
print(lr_results.sort_values('C(is_astrocyte)[T.True]').head())
print(lr_results.sort_values('C(is_astrocyte)[T.True]').tail())
```
Also in this case we can see that the count depth weights are deflated for lowly abundant genes.
```
img = \
p.qplot(sub_counts.sum(0).clip_lower(1), lr_results['np.log(total_count)'],
log='x') \
+ p.labs(x='Gene count across dataset', y='np.log(total_count)',
title='Brain cell data')
img.save('6.png', verbose=False)
img
xx = np.linspace(np.log(sub_samples.total_count.min()),
np.log(sub_samples.total_count.max()))
def linres(gene):
yy = \
lr_results.loc[gene, 'np.log(total_count)'] * xx \
+ lr_results.loc[gene, 'Intercept']
yy1 = np.exp(yy)
yy2 = np.exp(yy + lr_results.loc[gene, 'C(is_astrocyte)[T.True]'])
return yy1, yy2
```
Similar to above, we can look at the relation between count depth and observed counts for a few genes, but we can also make sure to plot the stratifiction into the two cell types and how the regression models are predicting the counts.
```
figsize(11, 3)
ax = plt.gca()
for i, gene in enumerate(['Apoe', 'Sparcl1', 'Tmsb4x', 'C1qa']):
sub_samples['gene'] = counts[gene]
plt.subplot(1, 4, i + 1, sharey=ax)
if i == 0:
plt.ylabel('Counts + 1')
plt.loglog()
plt.scatter(sub_samples.loc[~sub_samples.is_astrocyte]['total_count'],
sub_samples.loc[~sub_samples.is_astrocyte]['gene'] + 1,
c='grey', marker='o', label='Microglia')
plt.scatter(sub_samples.loc[sub_samples.is_astrocyte]['total_count'],
sub_samples.loc[sub_samples.is_astrocyte]['gene'] + 1,
c='k', marker='x', label='Astrocyte')
yy1, yy2 = linres(gene)
plt.plot(np.exp(xx), yy1, c='w', lw=5)
plt.plot(np.exp(xx), yy1, c='r', lw=3, ls=':')
plt.plot(np.exp(xx), yy2, c='w', lw=5)
plt.plot(np.exp(xx), yy2, c='r', lw=3)
plt.title(gene)
plt.xlabel('Total counts')
plt.legend(scatterpoints=3);
plt.tight_layout()
plt.savefig('7.png', bbox_inches='tight')
```
Again we can see the overall abundance is related to the slope of the lines. Another thing which seem to pop out in these plots is an interaction between cell type and slope. For example looking at C1qa the slope for the microglia seem underestimated. This makes sense, if this is an effect of count noise at low abundances.
My takeaway from this is that OLS regression might be OK if counts are large, but at lower levels model parameters are not estimated correctly due to the count nature of the data.
Notebooks of the analysis in this post are available [here](https://github.com/vals/Blog/tree/master/180226-count-offsets).
| true |
code
| 0.653652 | null | null | null | null |
|
<table> <tr>
<td style="background-color:#ffffff;">
<a href="http://qworld.lu.lv" target="_blank"><img src="../images/qworld.jpg" width="50%" align="left"> </a></td>
<td width="70%" style="background-color:#ffffff;vertical-align:bottom;text-align:right;">
prepared by Maksim Dimitrijev(<a href="http://qworld.lu.lv/index.php/qlatvia/">QLatvia</a>)
and Özlem Salehi (<a href="http://qworld.lu.lv/index.php/qturkey/">QTurkey</a>)
</td>
</tr></table>
<table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>
$ \newcommand{\bra}[1]{\langle #1|} $
$ \newcommand{\ket}[1]{|#1\rangle} $
$ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
$ \newcommand{\dot}[2]{ #1 \cdot #2} $
$ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
$ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
$ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
$ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
$ \newcommand{\mypar}[1]{\left( #1 \right)} $
$ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
$ \newcommand{\onehalf}{\frac{1}{2}} $
$ \newcommand{\donehalf}{\dfrac{1}{2}} $
$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
$ \newcommand{\vzero}{\myvector{1\\0}} $
$ \newcommand{\vone}{\myvector{0\\1}} $
$ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $
$ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
$ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
$ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $
$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
$ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
$ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
<h2> <font color="blue"> Solutions for </font>Grover's Search: Implementation</h2>
<a id="task2"></a>
<h3>Task 2</h3>
Let $N=4$. Implement the query phase and check the unitary matrix for the query operator. Note that we are interested in the top-left $4 \times 4$ part of the matrix since the remaining parts are due to the ancilla qubit.
You are given a function $f$ and its corresponding quantum operator $U_f$. First run the following cell to load operator $U_f$. Then you can make queries to $f$ by applying the operator $U_f$ via the following command:
<pre>Uf(circuit,qreg).
```
%run ../include/quantum.py
```
Now use phase kickback to flip the sign of the marked element:
<ul>
<li>Set output qubit (qreg[2]) to $\ket{-}$ by applying X and H.</li>
<li>Apply operator $U_f$
<li>Set output qubit (qreg[2]) back.</li>
</ul>
(Can you guess the marked element by looking at the unitary matrix?)
<h3>Solution</h3>
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
qreg = QuantumRegister(3)
#No need to define classical register as we are not measuring
mycircuit = QuantumCircuit(qreg)
#set ancilla
mycircuit.x(qreg[2])
mycircuit.h(qreg[2])
Uf(mycircuit,qreg)
#set ancilla back
mycircuit.h(qreg[2])
mycircuit.x(qreg[2])
job = execute(mycircuit,Aer.get_backend('unitary_simulator'))
u=job.result().get_unitary(mycircuit,decimals=3)
#We are interested in the top-left 4x4 part
for i in range(4):
s=""
for j in range(4):
val = str(u[i][j].real)
while(len(val)<5): val = " "+val
s = s + val
print(s)
mycircuit.draw(output='mpl')
```
<a id="task3"></a>
<h3>Task 3</h3>
Let $N=4$. Implement the inversion operator and check whether you obtain the following matrix:
$\mymatrix{cccc}{-0.5 & 0.5 & 0.5 & 0.5 \\ 0.5 & -0.5 & 0.5 & 0.5 \\ 0.5 & 0.5 & -0.5 & 0.5 \\ 0.5 & 0.5 & 0.5 & -0.5}$.
<h3>Solution</h3>
```
def inversion(circuit,quantum_reg):
#step 1
circuit.h(quantum_reg[1])
circuit.h(quantum_reg[0])
#step 2
circuit.x(quantum_reg[1])
circuit.x(quantum_reg[0])
#step 3
circuit.ccx(quantum_reg[1],quantum_reg[0],quantum_reg[2])
#step 4
circuit.x(quantum_reg[1])
circuit.x(quantum_reg[0])
#step 5
circuit.x(quantum_reg[2])
#step 6
circuit.h(quantum_reg[1])
circuit.h(quantum_reg[0])
```
Below you can check the matrix of your inversion operator and how the circuit looks like. We are interested in top-left $4 \times 4$ part of the matrix, the remaining parts are because we used ancilla qubit.
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
qreg1 = QuantumRegister(3)
mycircuit1 = QuantumCircuit(qreg1)
#set ancilla qubit
mycircuit1.x(qreg1[2])
mycircuit1.h(qreg1[2])
inversion(mycircuit1,qreg1)
#set ancilla qubit back
mycircuit1.h(qreg1[2])
mycircuit1.x(qreg1[2])
job = execute(mycircuit1,Aer.get_backend('unitary_simulator'))
u=job.result().get_unitary(mycircuit1,decimals=3)
for i in range(4):
s=""
for j in range(4):
val = str(u[i][j].real)
while(len(val)<5): val = " "+val
s = s + val
print(s)
mycircuit1.draw(output='mpl')
```
<a id="task4"></a>
<h3>Task 4: Testing Grover's search</h3>
Now we are ready to test our operations and run Grover's search. Suppose that there are 4 elements in the list and try to find the marked element.
You are given the operator $U_f$. First run the following cell to load it. You can access it via <pre>Uf(circuit,qreg).</pre>
qreg[2] is the ancilla qubit and it is shared by the query and the inversion operators.
Which state do you observe the most?
```
%run ..\include\quantum.py
```
<h3>Solution</h3>
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
qreg = QuantumRegister(3)
creg = ClassicalRegister(2)
mycircuit = QuantumCircuit(qreg,creg)
#Grover
#initial step - equal superposition
for i in range(2):
mycircuit.h(qreg[i])
#set ancilla
mycircuit.x(qreg[2])
mycircuit.h(qreg[2])
mycircuit.barrier()
#change the number of iterations
iterations=1
#Grover's iterations.
for i in range(iterations):
#query
Uf(mycircuit,qreg)
mycircuit.barrier()
#inversion
inversion(mycircuit,qreg)
mycircuit.barrier()
#set ancilla back
mycircuit.h(qreg[2])
mycircuit.x(qreg[2])
mycircuit.measure(qreg[0],creg[0])
mycircuit.measure(qreg[1],creg[1])
job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=10000)
counts = job.result().get_counts(mycircuit)
# print the outcome
for outcome in counts:
print(outcome,"is observed",counts[outcome],"times")
mycircuit.draw(output='mpl')
```
<a id="task5"></a>
<h3>Task 5 (Optional, challenging)</h3>
Implement the inversion operation for $n=3$ ($N=8$). This time you will need 5 qubits - 3 for the operation, 1 for ancilla, and one more qubit to implement not gate controlled by three qubits.
In the implementation the ancilla qubit will be qubit 3, while qubits for control are 0, 1 and 2; qubit 4 is used for the multiple control operation. As a result you should obtain the following values in the top-left $8 \times 8$ entries:
$\mymatrix{cccccccc}{-0.75 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 \\ 0.25 & -0.75 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 \\ 0.25 & 0.25 & -0.75 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 \\ 0.25 & 0.25 & 0.25 & -0.75 & 0.25 & 0.25 & 0.25 & 0.25 \\ 0.25 & 0.25 & 0.25 & 0.25 & -0.75 & 0.25 & 0.25 & 0.25 \\ 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & -0.75 & 0.25 & 0.25 \\ 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & -0.75 & 0.25 \\ 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & -0.75}$.
<h3>Solution</h3>
```
def big_inversion(circuit,quantum_reg):
for i in range(3):
circuit.h(quantum_reg[i])
circuit.x(quantum_reg[i])
circuit.ccx(quantum_reg[1],quantum_reg[0],quantum_reg[4])
circuit.ccx(quantum_reg[2],quantum_reg[4],quantum_reg[3])
circuit.ccx(quantum_reg[1],quantum_reg[0],quantum_reg[4])
for i in range(3):
circuit.x(quantum_reg[i])
circuit.h(quantum_reg[i])
circuit.x(quantum_reg[3])
```
Below you can check the matrix of your inversion operator. We are interested in the top-left $8 \times 8$ part of the matrix, the remaining parts are because of additional qubits.
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
big_qreg2 = QuantumRegister(5)
big_mycircuit2 = QuantumCircuit(big_qreg2)
#set ancilla
big_mycircuit2.x(big_qreg2[3])
big_mycircuit2.h(big_qreg2[3])
big_inversion(big_mycircuit2,big_qreg2)
#set ancilla back
big_mycircuit2.h(big_qreg2[3])
big_mycircuit2.x(big_qreg2[3])
job = execute(big_mycircuit2,Aer.get_backend('unitary_simulator'))
u=job.result().get_unitary(big_mycircuit2,decimals=3)
for i in range(8):
s=""
for j in range(8):
val = str(u[i][j].real)
while(len(val)<6): val = " "+val
s = s + val
print(s)
```
<a id="task6"></a>
<h3>Task 6: Testing Grover's search for 8 elements (Optional, challenging)</h3>
Now we will test Grover's search on 8 elements.
You are given the operator $U_{f_8}$. First run the following cell to load it. You can access it via:
<pre>Uf_8(circuit,qreg)</pre>
Which state do you observe the most?
```
%run ..\include\quantum.py
```
<h3>Solution</h3>
```
def big_inversion(circuit,quantum_reg):
for i in range(3):
circuit.h(quantum_reg[i])
circuit.x(quantum_reg[i])
circuit.ccx(quantum_reg[1],quantum_reg[0],quantum_reg[4])
circuit.ccx(quantum_reg[2],quantum_reg[4],quantum_reg[3])
circuit.ccx(quantum_reg[1],quantum_reg[0],quantum_reg[4])
for i in range(3):
circuit.x(quantum_reg[i])
circuit.h(quantum_reg[i])
circuit.x(quantum_reg[3])
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
qreg8 = QuantumRegister(5)
creg8 = ClassicalRegister(3)
mycircuit8 = QuantumCircuit(qreg8,creg8)
#set ancilla
mycircuit8.x(qreg8[3])
mycircuit8.h(qreg8[3])
#Grover
for i in range(3):
mycircuit8.h(qreg8[i])
mycircuit8.barrier()
#Try 1,2,6,12 8iterations of Grover
for i in range(2):
Uf_8(mycircuit8,qreg8)
mycircuit8.barrier()
big_inversion(mycircuit8,qreg8)
mycircuit8.barrier()
#set ancilla back
mycircuit8.h(qreg8[3])
mycircuit8.x(qreg8[3])
for i in range(3):
mycircuit8.measure(qreg8[i],creg8[i])
job = execute(mycircuit8,Aer.get_backend('qasm_simulator'),shots=10000)
counts8 = job.result().get_counts(mycircuit8)
# print the reverse of the outcome
for outcome in counts8:
print(outcome,"is observed",counts8[outcome],"times")
mycircuit8.draw(output='mpl')
```
<a id="task8"></a>
<h3>Task 8</h3>
Implement an oracle function which marks the element 00. Run Grover's search with the oracle you have implemented.
```
def oracle_00(circuit,qreg):
```
<h3>Solution</h3>
```
def oracle_00(circuit,qreg):
circuit.x(qreg[0])
circuit.x(qreg[1])
circuit.ccx(qreg[0],qreg[1],qreg[2])
circuit.x(qreg[0])
circuit.x(qreg[1])
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
qreg = QuantumRegister(3)
creg = ClassicalRegister(2)
mycircuit = QuantumCircuit(qreg,creg)
#Grover
#initial step - equal superposition
for i in range(2):
mycircuit.h(qreg[i])
#set ancilla
mycircuit.x(qreg[2])
mycircuit.h(qreg[2])
mycircuit.barrier()
#change the number of iterations
iterations=1
#Grover's iterations.
for i in range(iterations):
#query
oracle_00(mycircuit,qreg)
mycircuit.barrier()
#inversion
inversion(mycircuit,qreg)
mycircuit.barrier()
#set ancilla back
mycircuit.h(qreg[2])
mycircuit.x(qreg[2])
mycircuit.measure(qreg[0],creg[0])
mycircuit.measure(qreg[1],creg[1])
job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=10000)
counts = job.result().get_counts(mycircuit)
# print the reverse of the outcome
for outcome in counts:
reverse_outcome = ''
for i in outcome:
reverse_outcome = i + reverse_outcome
print(reverse_outcome,"is observed",counts[outcome],"times")
mycircuit.draw(output='mpl')
```
| true |
code
| 0.47025 | null | null | null | null |
|
Foreign Function Interface
====
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import numpy as np
```
Wrapping functions written in C
----
### Steps
- Write the C header and implementation files
- Write the Cython `.pxd` file to declare C function signatures
- Write the Cython `.pyx` file to wrap the C functions for Python
- Write `setup.py` to automate buiding of the Python extension module
- Run `python setup.py build_ext --inplace` to build the module
- Import module in Python like any other Python module
### C header file
```
%%file c_math.h
#pragma once
double plus(double a, double b);
double mult(double a, double b);
double square(double a);
double acc(double *xs, int size);
```
### C implementation file
```
%%file c_math.c
#include <math.h>
#include "c_math.h"
double plus(double a, double b) {
return a + b;
};
double mult(double a, double b) {
return a * b;
};
double square(double a) {
return pow(a, 2);
};
double acc(double *xs, int size) {
double s = 0;
for (int i=0; i<size; i++) {
s += xs[i];
}
return s;
};
```
### Cython "header" file
The `.pxd` file is similar to a header file for Cython. In other words, we can `cimport <filename>.pxd` in the regular Cython `.pyx` files to get access to functions declared in the `.pxd` files.
```
%%file cy_math.pxd
cdef extern from "c_math.h":
double plus(double a, double b)
double mult(double a, double b)
double square(double a)
double acc(double *xs, int size)
```
### Cython "implementation" file
Here is whhere we actually wrap the C code for use in Python. Note especially how we handle passing in of arrays to a C function expecting a pointer to double using `typed memoryviews`.
```
%%file cy_math.pyx
cimport cy_math
def py_plus(double a, double b):
return cy_math.plus(a, b)
def py_mult(double a, double b):
return cy_math.mult(a, b)
def py_square(double a):
return cy_math.square(a)
def py_sum(double[::1] xs):
cdef int size = len(xs)
return cy_math.acc(&xs[0], size)
```
### Build script `setup.py`
This is a build script for Python, similar to a Makefile
```
%%file setup.py
from distutils.core import setup, Extension
from Cython.Build import cythonize
import numpy as np
ext = Extension("cy_math",
sources=["cy_math.pyx", "c_math.c"],
libraries=["m"],
extra_compile_args=["-w", "-std=c99"])
setup(name = "Math Funcs",
ext_modules = cythonize(ext))
```
### Building an extension module
```
! python setup.py clean
! python setup.py -q build_ext --inplace
! ls cy_math*
```
### Using the extension module in Python
```
import cy_math
import numpy as np
print(cy_math.py_plus(3, 4))
print(cy_math.py_mult(3, 4))
print(cy_math.py_square(3))
xs = np.arange(10, dtype='float')
print(cy_math.py_sum(xs))
```
### Confirm that we are getting C speedups by comparing with pure Python accumulator
```
def acc(xs):
s = 0
for x in xs:
s += x
return s
import cy_math
xs = np.arange(1000000, dtype='float')
%timeit -r3 -n3 acc(xs)
%timeit -r3 -n3 cy_math.py_sum(xs)
```
C++
----
This is similar to C. We will use Cython to wrap a simple funciton.
```
%%file add.hpp
#pragma once
int add(int a, int b);
%%file add.cpp
int add(int a, int b) {
return a+b;
}
%%file plus.pyx
cdef extern from 'add.cpp':
int add(int a, int b)
def plus(a, b):
return add(a, b)
```
#### Note that essentially the only difference from C is `language="C++"` and the flag `-std=c++11`
```
%%file setup.py
from distutils.core import setup, Extension
from Cython.Build import cythonize
ext = Extension("plus",
sources=["plus.pyx", "add.cpp"],
extra_compile_args=["-w", "-std=c++11"])
setup(
ext_modules = cythonize(
ext,
language="c++",
))
%%bash
python setup.py -q build_ext --inplace
import plus
plus.plus(3, 4)
```
Wrap an R function from libRmath using `ctypes`
----
R comes with a standalone C library of special functions and distributions, as described in the official documentation. These functions can be wrapped for use in Python.
### Building the Rmath standalone library
```bash
git clone https://github.com/JuliaLang/Rmath-julia.git
cd Rmath-julia/src
make
cd ../..
```
#### Functions to wrap
```
! grep "\s.norm(" Rmath-julia/include/Rmath.h
from ctypes import CDLL, c_int, c_double
%%bash
ls Rmath-julia/src/*so
lib = CDLL('Rmath-julia/src/libRmath-julia.so')
def rnorm(mu=0, sigma=1):
lib.rnorm.argtypes = [c_double, c_double]
lib.rnorm.restype = c_double
return lib.rnorm(mu, sigma)
def dnorm(x, mean=0, sd=1, log=0):
lib.dnorm4.argtypes = [c_double, c_double, c_double, c_int]
lib.dnorm4.restype = c_double
return lib.dnorm4(x, mean, sd, log)
def pnorm(q, mu=0, sd=1, lower_tail=1, log_p=0):
lib.pnorm5.argtypes = [c_double, c_double, c_double, c_int, c_int]
lib.pnorm5.restype = c_double
return lib.pnorm5(q, mu, sd, lower_tail, log_p)
def qnorm(p, mu=0, sd=1, lower_tail=1, log_p=0):
lib.qnorm5.argtypes = [c_double, c_double, c_double, c_int, c_int]
lib.qnorm5.restype = c_double
return lib.qnorm5(p, mu, sd, lower_tail, log_p)
pnorm(0, mu=2)
qnorm(0.022750131948179212, mu=2)
plt.hist([rnorm() for i in range(100)])
pass
xs = np.linspace(-3,3,100)
plt.plot(xs, list(map(dnorm, xs)))
pass
```
### Using Cython to wrap standalone library
```
%%file rmath.pxd
cdef extern from "Rmath-julia/include/Rmath.h":
double dnorm(double, double, double, int)
double pnorm(double, double, double, int, int)
double qnorm(double, double, double, int, int)
double rnorm(double, double)
%%file rmath.pyx
cimport rmath
def rnorm_(mu=0, sigma=1):
return rmath.rnorm(mu, sigma)
def dnorm_(x, mean=0, sd=1, log=0):
return rmath.dnorm(x, mean, sd, log)
def pnorm_(q, mu=0, sd=1, lower_tail=1, log_p=0):
return rmath.pnorm(q, mu, sd, lower_tail, log_p)
def qnorm_(p, mu=0, sd=1, lower_tail=1, log_p=0):
return rmath.qnorm(p, mu, sd, lower_tail, log_p)
%%file setup.py
from distutils.core import setup, Extension
from Cython.Build import cythonize
ext = Extension("rmath",
sources=["rmath.pyx"],
include_dirs=["Rmath-julia/include"],
library_dirs=["Rmath-julia/src"],
libraries=["Rmath-julia"],
runtime_library_dirs=["Rmath-julia/src"],
extra_compile_args=["-w", "-std=c99", "-DMATHLIB_STANDALONE"],
extra_link_args=[],
)
setup(
ext_modules = cythonize(
ext
))
! python setup.py build_ext --inplace
import rmath
plt.hist([rmath.rnorm_() for i in range(100)])
pass
xs = np.linspace(-3,3,100)
plt.plot(xs, list(map(rmath.dnorm_, xs)))
pass
```
### `Cython` wrappers are faster than `ctypes`
```
%timeit pnorm(0, mu=2)
%timeit rmath.pnorm_(0, mu=2)
```
Fortran
----
```
! pip install fortran-magic
%load_ext fortranmagic
%%fortran
subroutine fort_sum(N, s)
integer*8, intent(in) :: N
integer*8, intent(out) :: s
integer*8 i
s = 0
do i = 1, N
s = s + i*i
end do
end
fort_sum(10)
```
#### Another example from the [documentation](http://nbviewer.ipython.org/github/mgaitan/fortran_magic/blob/master/documentation.ipynb)
```
%%fortran --link lapack
subroutine solve(A, b, x, n)
! solve the matrix equation A*x=b using LAPACK
implicit none
real*8, dimension(n,n), intent(in) :: A
real*8, dimension(n), intent(in) :: b
real*8, dimension(n), intent(out) :: x
integer :: pivot(n), ok
integer, intent(in) :: n
x = b
! find the solution using the LAPACK routine SGESV
call DGESV(n, 1, A, n, pivot, x, n, ok)
end subroutine
A = np.array([[1, 2.5], [-3, 4]])
b = np.array([1, 2.5])
solve(A, b)
```
Interfacing with R
----
```
%load_ext rpy2.ipython
%%R
library(ggplot2)
suppressPackageStartupMessages(
ggplot(mtcars, aes(x=wt, y=mpg)) + geom_point() + geom_smooth(method=loess)
)
```
#### Converting between Python and R
```
%R -o mtcars
```
#### `mtcars` is now a Python dataframe
```
mtcars.head(n=3)
```
#### We can also pass data from Python to R
```
x = np.linspace(0, 2*np.pi, 100)
y = np.sin(x)
%%R -i x,y
plot(x, y, main="Sine curve in R base graphics")
```
| true |
code
| 0.360545 | null | null | null | null |
|
# COCO Reader
Reader operator that reads a COCO dataset (or subset of COCO), which consists of an annotation file and the images directory.
`DALI_EXTRA_PATH` environment variable should point to the place where data from [DALI extra repository](https://github.com/NVIDIA/DALI_extra) is downloaded. Please make sure that the proper release tag is checked out.
```
from __future__ import print_function
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import numpy as np
from time import time
import os.path
test_data_root = os.environ['DALI_EXTRA_PATH']
file_root = os.path.join(test_data_root, 'db', 'coco', 'images')
annotations_file = os.path.join(test_data_root, 'db', 'coco', 'instances.json')
num_gpus = 1
batch_size = 16
class COCOPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(COCOPipeline, self).__init__(batch_size, num_threads, device_id, seed = 15)
self.input = ops.COCOReader(file_root = file_root, annotations_file = annotations_file,
shard_id = device_id, num_shards = num_gpus, ratio=True)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
def define_graph(self):
inputs, bboxes, labels = self.input()
images = self.decode(inputs)
return (images, bboxes, labels)
start = time()
pipes = [COCOPipeline(batch_size=batch_size, num_threads=2, device_id = device_id) for device_id in range(num_gpus)]
for pipe in pipes:
pipe.build()
total_time = time() - start
print("Computation graph built and dataset loaded in %f seconds." % total_time)
pipe_out = [pipe.run() for pipe in pipes]
images_cpu = pipe_out[0][0].as_cpu()
bboxes_cpu = pipe_out[0][1]
labels_cpu = pipe_out[0][2]
```
Bounding boxes returned by the operator are lists of floats containing composed of **\[x, y, width, height]** (`ltrb` is set to `False` by default).
```
bboxes = bboxes_cpu.at(4)
bboxes
```
Let's see the ground truth bounding boxes drawn on the image.
```
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import random
img_index = 4
img = images_cpu.at(img_index)
H = img.shape[0]
W = img.shape[1]
fig,ax = plt.subplots(1)
ax.imshow(img)
bboxes = bboxes_cpu.at(img_index)
labels = labels_cpu.at(img_index)
categories_set = set()
for label in labels:
categories_set.add(label[0])
category_id_to_color = dict([ (cat_id , [random.uniform(0, 1) ,random.uniform(0, 1), random.uniform(0, 1)]) for cat_id in categories_set])
for bbox, label in zip(bboxes, labels):
rect = patches.Rectangle((bbox[0]*W,bbox[1]*H),bbox[2]*W,bbox[3]*H,linewidth=1,edgecolor=category_id_to_color[label[0]],facecolor='none')
ax.add_patch(rect)
plt.show()
```
| true |
code
| 0.748347 | null | null | null | null |
|
```
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
%matplotlib inline
df = pd.read_csv('boston_house_prices.csv')
```
<b>Explanation of Features</b>
* CRIM: per capita crime rate per town (assumption: if CRIM high, target small)
* ZN: proportion of residential land zoned for lots over 25,000 sq. ft (assumption: if ZN high, target big)
* INDUS: proportion of non-retail business acres per town (assumption: if INDUS high, target small)
* CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) (categorical! assumption: if 1, target high)
* NOX: nitrogen oxides concentration (parts per 10 million) (assumption: if NOX high, target small)
* RM: average number of rooms per dwelling.(assumption: if RM high, target big)
* AGE: proportion of owner-occupied units built prior to 1940. (assumption: if AGE high, target big)
* DIS: weighted mean of distances to five Boston employment centres. (assumption: if DIS high, target small)
* RAD: index of accessibility to radial highways. (assumption: if RAD high, target big)
* TAX: full-value property-tax rate per \$10,000. (assumption: if TAX high, target big)
* PTRATIO: pupil-teacher ratio by town. (assumption: if PTRATIO high, target big)
* B: 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town. (assumption: if B high, target small)
* LSTAT: lower status of the population (percent). (assumption: if LSTAT high, target small)
* MEDV: median value of owner-occupied homes in \$1000s. (target)
```
df.head()
#get number of rows and columns
df.shape
#get overview of dataset values
df.describe()
df.info()
df.isnull().sum()
#check distribution of target variable
#looks like normal distribution, no need to do logarithm
sns.distplot(df.MEDV, kde=False)
#get number of rows in df
n = len(df)
#calculate proportions for training, validation and testing datasets
n_val = int(0.2 * n)
n_test = int(0.2 * n)
n_train = n - (n_val + n_test)
#fix the random seed, so that results are reproducible
np.random.seed(2)
#create a numpy array with indices from 0 to (n-1) and shuffle it
idx = np.arange(n)
np.random.shuffle(idx)
#use the array with indices 'idx' to get a shuffled dataframe
#idx now becomes the index of the df,
#and order of rows in df is according to order of rows in idx
df_shuffled = df.iloc[idx]
#split shuffled df into train, validation and test
#e.g. for train: program starts from index 0
#until the index, that is defined by variable (n_train -1)
df_train = df_shuffled.iloc[:n_train].copy()
df_val = df_shuffled.iloc[n_train:n_train+n_val].copy()
df_test = df_shuffled.iloc[n_train+n_val:].copy()
#keep df's with target value
df_train_incl_target = df_shuffled.iloc[:n_train].copy()
df_val_incl_target = df_shuffled.iloc[n_train:n_train+n_val].copy()
df_test_incl_target = df_shuffled.iloc[n_train+n_val:].copy()
#create target variable arrays
y_train = df_train.MEDV.values
y_val = df_val.MEDV.values
y_test = df_test.MEDV.values
#remove target variable form df's
del df_train['MEDV']
del df_val['MEDV']
del df_test['MEDV']
#define first numerical features
#new training set only contains the selected base columns
#training set is transformed to matrix array with 'value' method
base = ['CRIM', 'ZN', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD']
df_num = df_train[base]
X_train = df_num.values
#return the weights
def linear_regression(X, y):
ones = np.ones(X.shape[0])
X = np.column_stack([ones, X])
XTX = X.T.dot(X)
XTX_inv = np.linalg.inv(XTX)
w = XTX_inv.dot(X.T).dot(y)
return w[0], w[1:]
w_0, w = linear_regression(X_train, y_train)
#prediction of target variable, based on training set
y_pred = w_0 + X_train.dot(w)
#the plot shows difference between distribution of
#real target variable and predicted target variable
sns.distplot(y_pred, label='pred')
sns.distplot(y_train, label='target')
plt.legend()
#calculation of root mean squared error
#based on difference between distribution of
#real target variable and predicted target variable
def rmse(y, y_pred):
error = y_pred - y
mse = (error ** 2).mean()
return np.sqrt(mse)
rmse(y_train, y_pred)
```
Validating the Model
```
#create X_val matrix array
df_num = df_val[base]
X_val = df_num.values
#take the bias and the weights (w_0 and w), what we got from the linear regression
#and get the prediction of the target variable for the validation dataset
y_pred = w_0 + X_val.dot(w)
#compare y_pred with real target values 'y_val'
#that number should be used for comparing models
rmse(y_val, y_pred)
```
<b>prepare_X</b> function converts dataframe to matrix array
```
#this function takes in feature variables (base),
#and returns a matrix array with 'values' method
def prepare_X(df):
df_num = df[base]
X = df_num.values
return X
#traub the model by calculating the weights
X_train = prepare_X(df_train)
w_0, w = linear_regression(X_train, y_train)
#apply model to validation dataset
X_val = prepare_X(df_val)
y_pred = w_0 + X_val.dot(w)
#compute RMSE on validation dataset
print('validation', rmse(y_val, y_pred))
```
Feature engineering: Add more features to the model<br>
We use the validation framework to see whether more features improve the model
```
#use prepare_X function to add more features
def prepare_X(df):
df = df.copy()
base_02 = ['CRIM', 'ZN', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT']
df_num = df[base_02]
X = df_num.values
return X
#check if adding 4 more numerical features can improve the model
#X_train should now be a matrix array with totally 12 numerical features
#train the model
X_train = prepare_X(df_train)
w_0, w = linear_regression(X_train, y_train)
#apply model to validation dataset
X_val = prepare_X(df_val)
y_pred = w_0 + X_val.dot(w)
#computer RMSE on validation dataset
print('validation:', rmse(y_val, y_pred))
#above we can see that the RMSE decreased a bit
#plot distribution of real target values (target)
#and the predicted target values (pred)
#after we considered 12 feature variables
sns.distplot(y_pred, label='pred')
sns.distplot(y_val, label='target')
plt.legend()
```
Feature engineering: Add the CHAS feature to the model <br>
Actually it is a categorical variable, but it has only 2 values (0 and 1) <br>
So there is no need to do one-hot encoding <br>
We use the validation framework to see whether this additional feature improves the model
```
base_02 = ['CRIM', 'ZN', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT']
#use prepare_X function to add CHAS as a feature
def prepare_X(df):
df = df.copy()
features = base_02.copy()
features.append('CHAS')
df_num = df[features]
X = df_num.values
return X
#check if adding 'CHAS' as a feature can improve the model
#X_train should now be a matrix array with totally 12 numerical features and 1 categorical feature
#train the model
X_train = prepare_X(df_train)
w_0, w = linear_regression(X_train, y_train)
#apply model to validation dataset
X_val = prepare_X(df_val)
y_pred = w_0 + X_val.dot(w)
#computer RMSE on validation dataset
print('validation:', rmse(y_val, y_pred))
#above we can see that the RMSE decreased a bit
#compared to the plot above, the amount of predicted values for '30'
#gets closer to the amount of real values for '30'
#plot distribution of real target values (target)
#and the predicted target values (pred)
#after we considered 12 feature variables
sns.distplot(y_pred, label='pred')
sns.distplot(y_val, label='target')
plt.legend()
#we could try regularization in case the data is 'noisy'
#regularize with the parameter r
def linear_regression_reg(X, y, r=0.01):
ones = np.ones(X.shape[0])
X = np.column_stack([ones, X])
XTX = X.T.dot(X)
#add r to main diagonal of XTX
reg = r * np.eye(XTX.shape[0])
XTX = XTX + reg
XTX_inv = np.linalg.inv(XTX)
w = XTX_inv.dot(X.T).dot(y)
return w[0], w[1:]
#the bigger r (alpha), the smaller the weights (the denominator becomes bigger)
#on the left 'column', you can see r, that growths with each step
#in the other columns, there are written the weights
for r in [0, 0.001, 0.01, 0.1, 1, 10]:
w_0, w = linear_regression_reg(X_train, y_train, r=r)
print('%5s, %.2f, %.2f, %.2f' % (r, w_0, w[3], w[5]))
#calculate the RMSE after we used ridge regression
X_train = prepare_X(df_train)
w_0, w = linear_regression_reg(X_train, y_train, r=0.001)
X_val = prepare_X(df_val)
y_pred = w_0 + X_val.dot(w)
print('validation:', rmse(y_val, y_pred))
#run a grid search to identify the best value of r
X_train = prepare_X(df_train)
X_val = prepare_X(df_val)
for r in [0.000001, 0.0001, 0.001, 0.01, 0.1, 1, 5, 10]:
w_0, w = linear_regression_reg(X_train, y_train, r=r)
y_pred = w_0 + X_val.dot(w)
print('%6s' %r, rmse(y_val, y_pred))
```
as we can see from the new rmse, the ridge regression has no positive effect
Now we can help the user to predict the price of a real estate in Boston
```
df_test_incl_target.head(10)
#create a dictionary from rows
#delete target value
pred_price_list = []
z = 0
while z < 10:
ad = df_test_incl_target.iloc[z].to_dict()
del ad['MEDV']
#dt_test is a dataframe with one row (contains above dict info)
df_test = pd.DataFrame([ad])
X_test = prepare_X(df_test)
#train model without ridge regression
w_0, w = linear_regression(X_train, y_train)
#prediction of the price
y_pred = w_0 + X_test.dot(w)
pred_price_list.append(y_pred)
z = z + 1
pred_price_list
real_price = df_test_incl_target.MEDV.tolist()
#get average of difference between real price and predicted price
y = 0
diff_list = []
while y < 10:
diff = real_price[y] - pred_price_list[y]
diff_list.append(diff)
y += 1
sum(diff_list) / len(diff_list)
```
later on, we can also try other models and see, if the rmse can be further reduced<br>
Lastly, I want to check how increased or decreaesed feature variables will influence the target variable
```
ad = df_test_incl_target.iloc[0].to_dict()
ad
ad_test = {'CRIM': 0.223,
'ZN': 0,
'INDUS': 9.69,
'CHAS': 0,
'NOX': 0.585,
'RM': 6.025,
'AGE': 79.9,
'DIS': 2.4982,
'RAD': 6.0,
'TAX': 391.0,
'PTRATIO': 19.2,
'B': 396.9,
'LSTAT': 14.33}
#dt_test is a dataframe with one row (contains above dict info)
df_test = pd.DataFrame([ad_test])
X_test = prepare_X(df_test)
#train model without ridge regression
w_0, w = linear_regression(X_train, y_train)
#prediction of the price
y_pred = w_0 + X_test.dot(w)
y_pred
```
<b>Explanation of Features</b>
* CRIM: per capita crime rate per town (assumption: if CRIM high, target small --> correct)
* ZN: proportion of residential land zoned for lots over 25,000 sq. ft (assumption: if ZN high, target big --> correct)
* INDUS: proportion of non-retail business acres per town (assumption: if INDUS high, target small --> correct)
* CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) (categorical! assumption: if 1, target high --> correct)
* NOX: nitrogen oxides concentration (parts per 10 million) (assumption: if NOX high, target small --> correct)
* RM: average number of rooms per dwelling.(assumption: if RM high, target big --> correct)
* AGE: proportion of owner-occupied units built prior to 1940. (assumption: if AGE high, target big --> not clear)
* DIS: weighted mean of distances to five Boston employment centres. (assumption: if DIS high, target small --> correct)
* RAD: index of accessibility to radial highways. (assumption: if RAD high, target big --> correct)
* TAX: full-value property-tax rate per \$10,000. (assumption: if TAX high, target big --> not correct)
* PTRATIO: pupil-teacher ratio by town. (assumption: if PTRATIO high, target small--> correct)
* B: 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town. (assumption: if B high, target small--> not correct)
* LSTAT: lower status of the population (percent). (assumption: if LSTAT high, target small --> correct)
* MEDV: median value of owner-occupied homes in \$1000s. (target)
```
#check the against test dataset to see if model works
X_train = prepare_X(df_train)
w_0, w = linear_regression(X_train, y_train)
X_val = prepare_X(df_val)
y_pred = w_0 + X_val.dot(w)
print('validation:', rmse(y_val, y_pred))
X_test = prepare_X(df_test)
y_pred = w_0 + X_test.dot(w)
print('test:', rmse(y_test, y_pred))
```
| true |
code
| 0.609466 | null | null | null | null |
|
# Parameters in QCoDeS
A `Parameter` is the basis of measurements and control within QCoDeS. Anything that you want to either measure or control within QCoDeS should satisfy the `Parameter` interface. You may read more about the `Parameter` [here](http://qcodes.github.io/Qcodes/user/intro.html#parameter).
```
import numpy as np
from qcodes.instrument.parameter import Parameter, ArrayParameter, MultiParameter, ManualParameter
from qcodes.utils import validators
```
QCoDeS provides the following classes of built-in parameters:
- `Parameter` represents a single value at a time
- Example: voltage
- `ParameterWithSetpoints` is intended for array-values parameters.
This Parameter class is intended for anything where a call to the instrument
returns an array of values. [This notebook](Simple-Example-of-ParameterWithSetpoints.ipynb)
gives more detailed examples of how this parameter can be used.
- `ArrayParameter` represents an array of values of all the same type that are returned all at once.
- Example: voltage vs time waveform
- **NOTE:** This is an older base class for array-valued parameters. For any new driver we strongly recommend using `ParameterWithSetpoints` class which is both more flexible and significantly easier to use. Refer to notebook on [writing drivers with ParameterWithSetpoints](Simple-Example-of-ParameterWithSetpoints.ipynb)
- `MultiParameter` represents a collection of values with different meaning and possibly different dimension
- Example: I and Q, or I vs time and Q vs time
Parameters are described in detail in the [Creating Instrument Drivers](../writing_drivers/Creating-Instrument-Drivers.ipynb) tutorial.
## Parameter
Most of the time you can use `Parameter` directly; even if you have custom `get`/`set` functions, but sometimes it's useful to subclass `Parameter`. Note that since the superclass `Parameter` actually wraps these functions (to include some extra nice-to-have functionality), your subclass should define `get_raw` and `set_raw` rather than `get` and `set`.
```
class MyCounter(Parameter):
def __init__(self, name):
# only name is required
super().__init__(name, label='Times this has been read',
vals=validators.Ints(min_value=0),
docstring='counts how many times get has been called '
'but can be reset to any integer >= 0 by set')
self._count = 0
# you must provide a get method, a set method, or both.
def get_raw(self):
self._count += 1
return self._count
def set_raw(self, val):
self._count = val
c = MyCounter('c')
c2 = MyCounter('c2')
# c() is equivalent to c.get()
print('first call:', c())
print('second call:', c())
# c2(val) is equivalent to c2.set(val)
c2(22)
```
## ArrayParameter
**NOTE:** This is an older base class for array-valued parameters. For any new driver we strongly recommend using `ParameterWithSetpoints` class which is both more flexible and significantly easier to use. Refer to notebook on [writing drivers with ParameterWithSetpoints](Simple-Example-of-ParameterWithSetpoints.ipynb).
We have kept the documentation shown below of `ArrayParameter` for the legacy purpose.
For actions that create a whole array of values at once. When you use it in a `Loop`, it makes a single `DataArray` with the array returned by `get` nested inside extra dimension(s) for the loop.
`ArrayParameter` is, for now, only gettable.
```
class ArrayCounter(ArrayParameter):
def __init__(self):
# only name and shape are required
# the setpoints I'm giving here are identical to the defaults
# this param would get but I'll give them anyway for
# demonstration purposes
super().__init__('array_counter', shape=(3, 2),
label='Total number of values provided',
unit='',
# first setpoint array is 1D, second is 2D, etc...
setpoints=((0, 1, 2), ((0, 1), (0, 1), (0, 1))),
setpoint_names=('index0', 'index1'),
setpoint_labels=('Outer param index', 'Inner param index'),
docstring='fills a 3x2 array with increasing integers')
self._val = 0
def get_raw(self):
# here I'm returning a nested list, but any sequence type will do.
# tuple, np.array, DataArray...
out = [[self._val + 2 * i + j for j in range(2)] for i in range(3)]
self._val += 6
return out
array_counter = ArrayCounter()
# simple get
print('first call:', array_counter())
```
## MultiParameter
Return multiple items at once, where each item can be a single value or an array.
NOTE: Most of the kwarg names here are the plural of those used in `Parameter` and `ArrayParameter`. In particular, `MultiParameter` is the ONLY one that uses `units`, all the others use `unit`.
`MultiParameter` is, for now, only gettable.
```
class SingleIQPair(MultiParameter):
def __init__(self, scale_param):
# only name, names, and shapes are required
# this version returns two scalars (shape = `()`)
super().__init__('single_iq', names=('I', 'Q'), shapes=((), ()),
labels=('In phase amplitude', 'Quadrature amplitude'),
units=('V', 'V'),
# including these setpoints is unnecessary here, but
# if you have a parameter that returns a scalar alongside
# an array you can represent the scalar as an empty sequence.
setpoints=((), ()),
docstring='param that returns two single values, I and Q')
self._scale_param = scale_param
def get_raw(self):
scale_val = self._scale_param()
return (scale_val, scale_val / 2)
scale = ManualParameter('scale', initial_value=2)
iq = SingleIQPair(scale_param=scale)
# simple get
print('simple get:', iq())
class IQArray(MultiParameter):
def __init__(self, scale_param):
# names, labels, and units are the same
super().__init__('iq_array', names=('I', 'Q'), shapes=((5,), (5,)),
labels=('In phase amplitude', 'Quadrature amplitude'),
units=('V', 'V'),
# note that EACH item needs a sequence of setpoint arrays
# so a 1D item has its setpoints wrapped in a length-1 tuple
setpoints=(((0, 1, 2, 3, 4),), ((0, 1, 2, 3, 4),)),
docstring='param that returns two single values, I and Q')
self._scale_param = scale_param
self._indices = np.array([0, 1, 2, 3, 4])
def get_raw(self):
scale_val = self._scale_param()
return (self._indices * scale_val, self._indices * scale_val / 2)
iq_array = IQArray(scale_param=scale)
# simple get
print('simple get', iq_array())
```
| true |
code
| 0.684093 | null | null | null | null |
|
# 基于注意力的神经机器翻译
此笔记本训练一个将卡比尔语翻译为英语的序列到序列(sequence to sequence,简写为 seq2seq)模型。此例子难度较高,需要对序列到序列模型的知识有一定了解。
训练完此笔记本中的模型后,你将能够输入一个卡比尔语句子,例如 *"Times!"*,并返回其英语翻译 *"Fire!"*
对于一个简单的例子来说,翻译质量令人满意。但是更有趣的可能是生成的注意力图:它显示在翻译过程中,输入句子的哪些部分受到了模型的注意。
<img src="https://tensorflow.google.cn/images/spanish-english.png" alt="spanish-english attention plot">
请注意:运行这个例子用一个 P100 GPU 需要花大约 10 分钟。
```
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from sklearn.model_selection import train_test_split
import unicodedata
import re
import numpy as np
import os
import io
import time
```
## 下载和准备数据集
我们将使用 http://www.manythings.org/anki/ 提供的一个语言数据集。这个数据集包含如下格式的语言翻译对:
```
May I borrow this book? ¿Puedo tomar prestado este libro?
```
这个数据集中有很多种语言可供选择。我们将使用英语 - 卡比尔语数据集。为方便使用,我们在谷歌云上提供了此数据集的一份副本。但是你也可以自己下载副本。下载完数据集后,我们将采取下列步骤准备数据:
1. 给每个句子添加一个 *开始* 和一个 *结束* 标记(token)。
2. 删除特殊字符以清理句子。
3. 创建一个单词索引和一个反向单词索引(即一个从单词映射至 id 的词典和一个从 id 映射至单词的词典)。
4. 将每个句子填充(pad)到最大长度。
```
'''
# 下载文件
path_to_zip = tf.keras.utils.get_file(
'spa-eng.zip', origin='http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip',
extract=True)
path_to_file = os.path.dirname(path_to_zip)+"/spa-eng/spa.txt"
'''
path_to_file = "./lan/kab.txt"
# 将 unicode 文件转换为 ascii
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def preprocess_sentence(w):
w = unicode_to_ascii(w.lower().strip())
# 在单词与跟在其后的标点符号之间插入一个空格
# 例如: "he is a boy." => "he is a boy ."
# 参考:https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation
w = re.sub(r"([?.!,¿])", r" \1 ", w)
w = re.sub(r'[" "]+', " ", w)
# 除了 (a-z, A-Z, ".", "?", "!", ","),将所有字符替换为空格
w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w)
w = w.rstrip().strip()
# 给句子加上开始和结束标记
# 以便模型知道何时开始和结束预测
w = '<start> ' + w + ' <end>'
return w
en_sentence = u"May I borrow this book?"
sp_sentence = u"¿Puedo tomar prestado este libro?"
print(preprocess_sentence(en_sentence))
print(preprocess_sentence(sp_sentence).encode('utf-8'))
# 1. 去除重音符号
# 2. 清理句子
# 3. 返回这样格式的单词对:[ENGLISH, SPANISH]
def create_dataset(path, num_examples):
lines = io.open(path, encoding='UTF-8').read().strip().split('\n')
word_pairs = [[preprocess_sentence(w) for w in l.split('\t')] for l in lines[:num_examples]]
return zip(*word_pairs)
en, sp = create_dataset(path_to_file, None)
print(en[-1])
print(sp[-1])
def max_length(tensor):
return max(len(t) for t in tensor)
def tokenize(lang):
lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(
filters='')
lang_tokenizer.fit_on_texts(lang)
tensor = lang_tokenizer.texts_to_sequences(lang)
tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor,
padding='post')
return tensor, lang_tokenizer
def load_dataset(path, num_examples=None):
# 创建清理过的输入输出对
targ_lang, inp_lang = create_dataset(path, num_examples)
input_tensor, inp_lang_tokenizer = tokenize(inp_lang)
target_tensor, targ_lang_tokenizer = tokenize(targ_lang)
return input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer
```
### 限制数据集的大小以加快实验速度(可选)
在超过 10 万个句子的完整数据集上训练需要很长时间。为了更快地训练,我们可以将数据集的大小限制为 3 万个句子(当然,翻译质量也会随着数据的减少而降低):
```
# 尝试实验不同大小的数据集
num_examples = 30000
input_tensor, target_tensor, inp_lang, targ_lang = load_dataset(path_to_file, num_examples)
# 计算目标张量的最大长度 (max_length)
max_length_targ, max_length_inp = max_length(target_tensor), max_length(input_tensor)
# 采用 80 - 20 的比例切分训练集和验证集
input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2)
# 显示长度
print(len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val))
def convert(lang, tensor):
for t in tensor:
if t!=0:
print ("%d ----> %s" % (t, lang.index_word[t]))
print ("Input Language; index to word mapping")
convert(inp_lang, input_tensor_train[0])
print ()
print ("Target Language; index to word mapping")
convert(targ_lang, target_tensor_train[0])
```
### 创建一个 tf.data 数据集
```
BUFFER_SIZE = len(input_tensor_train)
BATCH_SIZE = 64
steps_per_epoch = len(input_tensor_train)//BATCH_SIZE
embedding_dim = 256
units = 1024
vocab_inp_size = len(inp_lang.word_index)+1
vocab_tar_size = len(targ_lang.word_index)+1
dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
example_input_batch, example_target_batch = next(iter(dataset))
example_input_batch.shape, example_target_batch.shape
```
## 编写编码器 (encoder) 和解码器 (decoder) 模型
实现一个基于注意力的编码器 - 解码器模型。关于这种模型,你可以阅读 TensorFlow 的 [神经机器翻译 (序列到序列) 教程](https://github.com/tensorflow/nmt)。本示例采用一组更新的 API。此笔记本实现了上述序列到序列教程中的 [注意力方程式](https://github.com/tensorflow/nmt#background-on-the-attention-mechanism)。下图显示了注意力机制为每个输入单词分配一个权重,然后解码器将这个权重用于预测句子中的下一个单词。下图和公式是 [Luong 的论文](https://arxiv.org/abs/1508.04025v5)中注意力机制的一个例子。
<img src="https://tensorflow.google.cn/images/seq2seq/attention_mechanism.jpg" width="500" alt="attention mechanism">
输入经过编码器模型,编码器模型为我们提供形状为 *(批大小,最大长度,隐藏层大小)* 的编码器输出和形状为 *(批大小,隐藏层大小)* 的编码器隐藏层状态。
下面是所实现的方程式:
<img src="https://tensorflow.google.cn/images/seq2seq/attention_equation_0.jpg" alt="attention equation 0" width="800">
<img src="https://tensorflow.google.cn/images/seq2seq/attention_equation_1.jpg" alt="attention equation 1" width="800">
本教程的编码器采用 [Bahdanau 注意力](https://arxiv.org/pdf/1409.0473.pdf)。在用简化形式编写之前,让我们先决定符号:
* FC = 完全连接(密集)层
* EO = 编码器输出
* H = 隐藏层状态
* X = 解码器输入
以及伪代码:
* `score = FC(tanh(FC(EO) + FC(H)))`
* `attention weights = softmax(score, axis = 1)`。 Softmax 默认被应用于最后一个轴,但是这里我们想将它应用于 *第一个轴*, 因为分数 (score) 的形状是 *(批大小,最大长度,隐藏层大小)*。最大长度 (`max_length`) 是我们的输入的长度。因为我们想为每个输入分配一个权重,所以 softmax 应该用在这个轴上。
* `context vector = sum(attention weights * EO, axis = 1)`。选择第一个轴的原因同上。
* `embedding output` = 解码器输入 X 通过一个嵌入层。
* `merged vector = concat(embedding output, context vector)`
* 此合并后的向量随后被传送到 GRU
每个步骤中所有向量的形状已在代码的注释中阐明:
```
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.enc_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state = hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
# 样本输入
sample_hidden = encoder.initialize_hidden_state()
sample_output, sample_hidden = encoder(example_input_batch, sample_hidden)
print ('Encoder output shape: (batch size, sequence length, units) {}'.format(sample_output.shape))
print ('Encoder Hidden state shape: (batch size, units) {}'.format(sample_hidden.shape))
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, query, values):
# 隐藏层的形状 == (批大小,隐藏层大小)
# hidden_with_time_axis 的形状 == (批大小,1,隐藏层大小)
# 这样做是为了执行加法以计算分数
hidden_with_time_axis = tf.expand_dims(query, 1)
# 分数的形状 == (批大小,最大长度,1)
# 我们在最后一个轴上得到 1, 因为我们把分数应用于 self.V
# 在应用 self.V 之前,张量的形状是(批大小,最大长度,单位)
score = self.V(tf.nn.tanh(
self.W1(values) + self.W2(hidden_with_time_axis)))
# 注意力权重 (attention_weights) 的形状 == (批大小,最大长度,1)
attention_weights = tf.nn.softmax(score, axis=1)
# 上下文向量 (context_vector) 求和之后的形状 == (批大小,隐藏层大小)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
attention_layer = BahdanauAttention(10)
attention_result, attention_weights = attention_layer(sample_hidden, sample_output)
print("Attention result shape: (batch size, units) {}".format(attention_result.shape))
print("Attention weights shape: (batch_size, sequence_length, 1) {}".format(attention_weights.shape))
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
# 用于注意力
self.attention = BahdanauAttention(self.dec_units)
def call(self, x, hidden, enc_output):
# 编码器输出 (enc_output) 的形状 == (批大小,最大长度,隐藏层大小)
context_vector, attention_weights = self.attention(hidden, enc_output)
# x 在通过嵌入层后的形状 == (批大小,1,嵌入维度)
x = self.embedding(x)
# x 在拼接 (concatenation) 后的形状 == (批大小,1,嵌入维度 + 隐藏层大小)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# 将合并后的向量传送到 GRU
output, state = self.gru(x)
# 输出的形状 == (批大小 * 1,隐藏层大小)
output = tf.reshape(output, (-1, output.shape[2]))
# 输出的形状 == (批大小,vocab)
x = self.fc(output)
return x, state, attention_weights
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
sample_decoder_output, _, _ = decoder(tf.random.uniform((64, 1)),
sample_hidden, sample_output)
print ('Decoder output shape: (batch_size, vocab size) {}'.format(sample_decoder_output.shape))
```
## 定义优化器和损失函数
```
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
```
## 检查点(基于对象保存)
```
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
encoder=encoder,
decoder=decoder)
```
## 训练
1. 将 *输入* 传送至 *编码器*,编码器返回 *编码器输出* 和 *编码器隐藏层状态*。
2. 将编码器输出、编码器隐藏层状态和解码器输入(即 *开始标记*)传送至解码器。
3. 解码器返回 *预测* 和 *解码器隐藏层状态*。
4. 解码器隐藏层状态被传送回模型,预测被用于计算损失。
5. 使用 *教师强制 (teacher forcing)* 决定解码器的下一个输入。
6. *教师强制* 是将 *目标词* 作为 *下一个输入* 传送至解码器的技术。
7. 最后一步是计算梯度,并将其应用于优化器和反向传播。
```
@tf.function
def train_step(inp, targ, enc_hidden):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = encoder(inp, enc_hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word_index['<start>']] * BATCH_SIZE, 1)
# 教师强制 - 将目标词作为下一个输入
for t in range(1, targ.shape[1]):
# 将编码器输出 (enc_output) 传送至解码器
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
# 使用教师强制
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = (loss / int(targ.shape[1]))
variables = encoder.trainable_variables + decoder.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return batch_loss
EPOCHS = 10
for epoch in range(EPOCHS):
start = time.time()
enc_hidden = encoder.initialize_hidden_state()
total_loss = 0
for (batch, (inp, targ)) in enumerate(dataset.take(steps_per_epoch)):
batch_loss = train_step(inp, targ, enc_hidden)
total_loss += batch_loss
if batch % 100 == 0:
print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
batch,
batch_loss.numpy()))
# 每 2 个周期(epoch),保存(检查点)一次模型
if (epoch + 1) % 2 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print('Epoch {} Loss {:.4f}'.format(epoch + 1,
total_loss / steps_per_epoch))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
```
## 翻译
* 评估函数类似于训练循环,不同之处在于在这里我们不使用 *教师强制*。每个时间步的解码器输入是其先前的预测、隐藏层状态和编码器输出。
* 当模型预测 *结束标记* 时停止预测。
* 存储 *每个时间步的注意力权重*。
请注意:对于一个输入,编码器输出仅计算一次。
```
def evaluate(sentence):
attention_plot = np.zeros((max_length_targ, max_length_inp))
sentence = preprocess_sentence(sentence)
inputs = [inp_lang.word_index[i] for i in sentence.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],
maxlen=max_length_inp,
padding='post')
inputs = tf.convert_to_tensor(inputs)
result = ''
hidden = [tf.zeros((1, units))]
enc_out, enc_hidden = encoder(inputs, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word_index['<start>']], 0)
for t in range(max_length_targ):
predictions, dec_hidden, attention_weights = decoder(dec_input,
dec_hidden,
enc_out)
# 存储注意力权重以便后面制图
attention_weights = tf.reshape(attention_weights, (-1, ))
attention_plot[t] = attention_weights.numpy()
predicted_id = tf.argmax(predictions[0]).numpy()
result += targ_lang.index_word[predicted_id] + ' '
if targ_lang.index_word[predicted_id] == '<end>':
return result, sentence, attention_plot
# 预测的 ID 被输送回模型
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence, attention_plot
# 注意力权重制图函数
def plot_attention(attention, sentence, predicted_sentence):
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap='viridis')
fontdict = {'fontsize': 14}
ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90)
ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def translate(sentence):
result, sentence, attention_plot = evaluate(sentence)
print('Input: %s' % (sentence))
print('Predicted translation: {}'.format(result))
attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))]
plot_attention(attention_plot, sentence.split(' '), result.split(' '))
```
## 恢复最新的检查点并验证
```
# 恢复检查点目录 (checkpoint_dir) 中最新的检查点
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
translate(u'hace mucho frio aqui.')
translate(u'esta es mi vida.')
translate(u'¿todavia estan en casa?')
# 错误的翻译
translate(u'trata de averiguarlo.')
```
| true |
code
| 0.561395 | null | null | null | null |
|
<div align="center">
<h1><img width="30" src="https://madewithml.com/static/images/rounded_logo.png"> <a href="https://madewithml.com/">Made With ML</a></h1>
Applied ML · MLOps · Production
<br>
Join 30K+ developers in learning how to responsibly <a href="https://madewithml.com/about/">deliver value</a> with ML.
<br>
</div>
<br>
<div align="center">
<a target="_blank" href="https://newsletter.madewithml.com"><img src="https://img.shields.io/badge/Subscribe-30K-brightgreen"></a>
<a target="_blank" href="https://github.com/GokuMohandas/MadeWithML"><img src="https://img.shields.io/github/stars/GokuMohandas/MadeWithML.svg?style=social&label=Star"></a>
<a target="_blank" href="https://www.linkedin.com/in/goku"><img src="https://img.shields.io/badge/style--5eba00.svg?label=LinkedIn&logo=linkedin&style=social"></a>
<a target="_blank" href="https://twitter.com/GokuMohandas"><img src="https://img.shields.io/twitter/follow/GokuMohandas.svg?label=Follow&style=social"></a>
<br>
🔥 Among the <a href="https://github.com/topics/deep-learning" target="_blank">top ML</a> repositories on GitHub
</div>
<br>
<hr>
# Transformers
In this lesson we will learn how to implement the Transformer architecture to extract contextual embeddings for our text classification task.
<div align="left">
<a target="_blank" href="https://madewithml.com/courses/foundations/transformers/"><img src="https://img.shields.io/badge/📖 Read-blog post-9cf"></a>
<a href="https://github.com/GokuMohandas/MadeWithML/blob/main/notebooks/15_Transformers.ipynb" role="button"><img src="https://img.shields.io/static/v1?label=&message=View%20On%20GitHub&color=586069&logo=github&labelColor=2f363d"></a>
<a href="https://colab.research.google.com/github/GokuMohandas/MadeWithML/blob/main/notebooks/15_Transformers.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
</div>
# Overview
Transformers are a very popular architecture that leverage and extend the concept of self-attention to create very useful representations of our input data for a downstream task.
- **advantages**:
- better representation for our input tokens via contextual embeddings where the token representation is based on the specific neighboring tokens using self-attention.
- sub-word tokens, as opposed to character tokens, since they can hold more meaningful representation for many of our keywords, prefixes, suffixes, etc.
- attend (in parallel) to all the tokens in our input, as opposed to being limited by filter spans (CNNs) or memory issues from sequential processing (RNNs).
- **disadvantages**:
- computationally intensive
- required large amounts of data (mitigated using pretrained models)
<div align="left">
<img src="https://madewithml.com/static/images/foundations/transformers/architecture.png" width="800">
</div>
<div align="left">
<small><a href="https://arxiv.org/abs/1706.03762" target="_blank">Attention Is All You Need</a></small>
</div>
# Set up
```
!pip install transformers==3.0.2 -q
import numpy as np
import pandas as pd
import random
import torch
import torch.nn as nn
SEED = 1234
def set_seeds(seed=1234):
"""Set seeds for reproducibility."""
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # multi-GPU# Set seeds for reproducibility
set_seeds(seed=SEED)
# Set seeds for reproducibility
set_seeds(seed=SEED)
# Set device
cuda = True
device = torch.device("cuda" if (
torch.cuda.is_available() and cuda) else "cpu")
torch.set_default_tensor_type("torch.FloatTensor")
if device.type == "cuda":
torch.set_default_tensor_type("torch.cuda.FloatTensor")
print (device)
```
## Load data
We will download the [AG News dataset](http://www.di.unipi.it/~gulli/AG_corpus_of_news_articles.html), which consists of 120K text samples from 4 unique classes (`Business`, `Sci/Tech`, `Sports`, `World`)
```
import numpy as np
import pandas as pd
import re
import urllib
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/news.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
# Reduce data size (too large to fit in Colab's limited memory)
df = df[:10000]
print (len(df))
```
## Preprocessing
We're going to clean up our input data first by doing operations such as lower text, removing stop (filler) words, filters using regular expressions, etc.
```
import nltk
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import re
nltk.download("stopwords")
STOPWORDS = stopwords.words("english")
print (STOPWORDS[:5])
porter = PorterStemmer()
def preprocess(text, stopwords=STOPWORDS):
"""Conditional preprocessing on our text unique to our task."""
# Lower
text = text.lower()
# Remove stopwords
pattern = re.compile(r'\b(' + r'|'.join(stopwords) + r')\b\s*')
text = pattern.sub('', text)
# Remove words in paranthesis
text = re.sub(r'\([^)]*\)', '', text)
# Spacing and filters
text = re.sub(r"([-;;.,!?<=>])", r" \1 ", text)
text = re.sub('[^A-Za-z0-9]+', ' ', text) # remove non alphanumeric chars
text = re.sub(' +', ' ', text) # remove multiple spaces
text = text.strip()
return text
# Sample
text = "Great week for the NYSE!"
preprocess(text=text)
# Apply to dataframe
preprocessed_df = df.copy()
preprocessed_df.title = preprocessed_df.title.apply(preprocess)
print (f"{df.title.values[0]}\n\n{preprocessed_df.title.values[0]}")
```
## Split data
```
import collections
from sklearn.model_selection import train_test_split
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# Data
X = preprocessed_df["title"].values
y = preprocessed_df["category"].values
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
```
## Label encoder
```
class LabelEncoder(object):
"""Label encoder for tag labels."""
def __init__(self, class_to_index={}):
self.class_to_index = class_to_index
self.index_to_class = {v: k for k, v in self.class_to_index.items()}
self.classes = list(self.class_to_index.keys())
def __len__(self):
return len(self.class_to_index)
def __str__(self):
return f"<LabelEncoder(num_classes={len(self)})>"
def fit(self, y):
classes = np.unique(y)
for i, class_ in enumerate(classes):
self.class_to_index[class_] = i
self.index_to_class = {v: k for k, v in self.class_to_index.items()}
self.classes = list(self.class_to_index.keys())
return self
def encode(self, y):
y_one_hot = np.zeros((len(y), len(self.class_to_index)), dtype=int)
for i, item in enumerate(y):
y_one_hot[i][self.class_to_index[item]] = 1
return y_one_hot
def decode(self, y):
classes = []
for i, item in enumerate(y):
index = np.where(item == 1)[0][0]
classes.append(self.index_to_class[index])
return classes
def save(self, fp):
with open(fp, "w") as fp:
contents = {'class_to_index': self.class_to_index}
json.dump(contents, fp, indent=4, sort_keys=False)
@classmethod
def load(cls, fp):
with open(fp, "r") as fp:
kwargs = json.load(fp=fp)
return cls(**kwargs)
# Encode
label_encoder = LabelEncoder()
label_encoder.fit(y_train)
num_classes = len(label_encoder)
label_encoder.class_to_index
# Class weights
counts = np.bincount([label_encoder.class_to_index[class_] for class_ in y_train])
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.encode(y_train)
y_val = label_encoder.encode(y_val)
y_test = label_encoder.encode(y_test)
print (f"y_train[0]: {y_train[0]}")
print (f"decode([y_train[0]]): {label_encoder.decode([y_train[0]])}")
```
## Tokenizer
We'll be using the [BertTokenizer](https://huggingface.co/transformers/model_doc/bert.html#berttokenizer) to tokenize our input text in to sub-word tokens.
```
from transformers import DistilBertTokenizer
from transformers import BertTokenizer
# Load tokenizer and model
# tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
tokenizer = BertTokenizer.from_pretrained("allenai/scibert_scivocab_uncased")
vocab_size = len(tokenizer)
print (vocab_size)
# Tokenize inputs
encoded_input = tokenizer(X_train.tolist(), return_tensors="pt", padding=True)
X_train_ids = encoded_input["input_ids"]
X_train_masks = encoded_input["attention_mask"]
print (X_train_ids.shape, X_train_masks.shape)
encoded_input = tokenizer(X_val.tolist(), return_tensors="pt", padding=True)
X_val_ids = encoded_input["input_ids"]
X_val_masks = encoded_input["attention_mask"]
print (X_val_ids.shape, X_val_masks.shape)
encoded_input = tokenizer(X_test.tolist(), return_tensors="pt", padding=True)
X_test_ids = encoded_input["input_ids"]
X_test_masks = encoded_input["attention_mask"]
print (X_test_ids.shape, X_test_masks.shape)
# Decode
print (f"{X_train_ids[0]}\n{tokenizer.decode(X_train_ids[0])}")
# Sub-word tokens
print (tokenizer.convert_ids_to_tokens(ids=X_train_ids[0]))
```
## Datasets
We're going to create Datasets and DataLoaders to be able to efficiently create batches with our data splits.
```
class TransformerTextDataset(torch.utils.data.Dataset):
def __init__(self, ids, masks, targets):
self.ids = ids
self.masks = masks
self.targets = targets
def __len__(self):
return len(self.targets)
def __str__(self):
return f"<Dataset(N={len(self)})>"
def __getitem__(self, index):
ids = torch.tensor(self.ids[index], dtype=torch.long)
masks = torch.tensor(self.masks[index], dtype=torch.long)
targets = torch.FloatTensor(self.targets[index])
return ids, masks, targets
def create_dataloader(self, batch_size, shuffle=False, drop_last=False):
return torch.utils.data.DataLoader(
dataset=self,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last,
pin_memory=False)
# Create datasets
train_dataset = TransformerTextDataset(ids=X_train_ids, masks=X_train_masks, targets=y_train)
val_dataset = TransformerTextDataset(ids=X_val_ids, masks=X_val_masks, targets=y_val)
test_dataset = TransformerTextDataset(ids=X_test_ids, masks=X_test_masks, targets=y_test)
print ("Data splits:\n"
f" Train dataset:{train_dataset.__str__()}\n"
f" Val dataset: {val_dataset.__str__()}\n"
f" Test dataset: {test_dataset.__str__()}\n"
"Sample point:\n"
f" ids: {train_dataset[0][0]}\n"
f" masks: {train_dataset[0][1]}\n"
f" targets: {train_dataset[0][2]}")
# Create dataloaders
batch_size = 128
train_dataloader = train_dataset.create_dataloader(
batch_size=batch_size)
val_dataloader = val_dataset.create_dataloader(
batch_size=batch_size)
test_dataloader = test_dataset.create_dataloader(
batch_size=batch_size)
batch = next(iter(train_dataloader))
print ("Sample batch:\n"
f" ids: {batch[0].size()}\n"
f" masks: {batch[1].size()}\n"
f" targets: {batch[2].size()}")
```
## Trainer
Let's create the `Trainer` class that we'll use to facilitate training for our experiments.
```
import torch.nn.functional as F
class Trainer(object):
def __init__(self, model, device, loss_fn=None, optimizer=None, scheduler=None):
# Set params
self.model = model
self.device = device
self.loss_fn = loss_fn
self.optimizer = optimizer
self.scheduler = scheduler
def train_step(self, dataloader):
"""Train step."""
# Set model to train mode
self.model.train()
loss = 0.0
# Iterate over train batches
for i, batch in enumerate(dataloader):
# Step
batch = [item.to(self.device) for item in batch] # Set device
inputs, targets = batch[:-1], batch[-1]
self.optimizer.zero_grad() # Reset gradients
z = self.model(inputs) # Forward pass
J = self.loss_fn(z, targets) # Define loss
J.backward() # Backward pass
self.optimizer.step() # Update weights
# Cumulative Metrics
loss += (J.detach().item() - loss) / (i + 1)
return loss
def eval_step(self, dataloader):
"""Validation or test step."""
# Set model to eval mode
self.model.eval()
loss = 0.0
y_trues, y_probs = [], []
# Iterate over val batches
with torch.inference_mode():
for i, batch in enumerate(dataloader):
# Step
batch = [item.to(self.device) for item in batch] # Set device
inputs, y_true = batch[:-1], batch[-1]
z = self.model(inputs) # Forward pass
J = self.loss_fn(z, y_true).item()
# Cumulative Metrics
loss += (J - loss) / (i + 1)
# Store outputs
y_prob = F.softmax(z).cpu().numpy()
y_probs.extend(y_prob)
y_trues.extend(y_true.cpu().numpy())
return loss, np.vstack(y_trues), np.vstack(y_probs)
def predict_step(self, dataloader):
"""Prediction step."""
# Set model to eval mode
self.model.eval()
y_probs = []
# Iterate over val batches
with torch.inference_mode():
for i, batch in enumerate(dataloader):
# Forward pass w/ inputs
inputs, targets = batch[:-1], batch[-1]
z = self.model(inputs)
# Store outputs
y_prob = F.softmax(z).cpu().numpy()
y_probs.extend(y_prob)
return np.vstack(y_probs)
def train(self, num_epochs, patience, train_dataloader, val_dataloader):
best_val_loss = np.inf
for epoch in range(num_epochs):
# Steps
train_loss = self.train_step(dataloader=train_dataloader)
val_loss, _, _ = self.eval_step(dataloader=val_dataloader)
self.scheduler.step(val_loss)
# Early stopping
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = self.model
_patience = patience # reset _patience
else:
_patience -= 1
if not _patience: # 0
print("Stopping early!")
break
# Logging
print(
f"Epoch: {epoch+1} | "
f"train_loss: {train_loss:.5f}, "
f"val_loss: {val_loss:.5f}, "
f"lr: {self.optimizer.param_groups[0]['lr']:.2E}, "
f"_patience: {_patience}"
)
return best_model
```
# Transformer
## Scaled dot-product attention
The most popular type of self-attention is scaled dot-product attention from the widely-cited [Attention is all you need](https://arxiv.org/abs/1706.03762) paper. This type of attention involves projecting our encoded input sequences onto three matrices, queries (Q), keys (K) and values (V), whose weights we learn.
$ inputs \in \mathbb{R}^{NXMXH} $ ($N$ = batch size, $M$ = sequence length, $H$ = hidden dim)
$ Q = XW_q $ where $ W_q \in \mathbb{R}^{HXd_q} $
$ K = XW_k $ where $ W_k \in \mathbb{R}^{HXd_k} $
$ V = XW_v $ where $ W_v \in \mathbb{R}^{HXd_v} $
$ attention (Q, K, V) = softmax( \frac{Q K^{T}}{\sqrt{d_k}} )V \in \mathbb{R}^{MXd_v} $
## Multi-head attention
Instead of applying self-attention only once across the entire encoded input, we can also separate the input and apply self-attention in parallel (heads) to each input section and concatenate them. This allows the different head to learn unique representations while maintaining the complexity since we split the input into smaller subspaces.
$ MultiHead(Q, K, V) = concat({head}_1, ..., {head}_{h})W_O $
* ${head}_i = attention(Q_i, K_i, V_i) $
* $h$ = # of self-attention heads
* $W_O \in \mathbb{R}^{hd_vXH} $
* $H$ = hidden dim. (or dimension of the model $d_{model}$)
## Positional encoding
With self-attention, we aren't able to account for the sequential position of our input tokens. To address this, we can use positional encoding to create a representation of the location of each token with respect to the entire sequence. This can either be learned (with weights) or we can use a fixed function that can better extend to create positional encoding for lengths during inference that were not observed during training.
$ PE_{(pos,2i)} = sin({pos}/{10000^{2i/H}}) $
$ PE_{(pos,2i+1)} = cos({pos}/{10000^{2i/H}}) $
where:
* $pos$ = position of the token $(1...M)$
* $i$ = hidden dim $(1..H)$
This effectively allows us to represent each token's relative position using a fixed function for very large sequences. And because we've constrained the positional encodings to have the same dimensions as our encoded inputs, we can simply concatenate them before feeding them into the multi-head attention heads.
## Architecture
And here's how it all fits together! It's an end-to-end architecture that creates these contextual representations and uses an encoder-decoder architecture to predict the outcomes (one-to-one, many-to-one, many-to-many, etc.) Due to the complexity of the architecture, they require massive amounts of data for training without overfitting, however, they can be leveraged as pretrained models to finetune with smaller datasets that are similar to the larger set it was initially trained on.
<div align="left">
<img src="https://madewithml.com/static/images/foundations/transformers/architecture.png" width="800">
</div>
<div align="left">
<small><a href="https://arxiv.org/abs/1706.03762" target="_blank">Attention Is All You Need</a></small>
</div>
> We're not going to the implement the Transformer [from scratch](https://nlp.seas.harvard.edu/2018/04/03/attention.html) but we will use the[ Hugging Face library](https://github.com/huggingface/transformers) to load a pretrained [BertModel](https://huggingface.co/transformers/model_doc/bert.html#bertmodel) , which we'll use as a feature extractor and fine-tune on our own dataset.
## Model
We're going to use a pretrained [BertModel](https://huggingface.co/transformers/model_doc/bert.html#bertmodel) to act as a feature extractor. We'll only use the encoder to receive sequential and pooled outputs (`is_decoder=False` is default).
```
from transformers import BertModel
# transformer = BertModel.from_pretrained("distilbert-base-uncased")
# embedding_dim = transformer.config.dim
transformer = BertModel.from_pretrained("allenai/scibert_scivocab_uncased")
embedding_dim = transformer.config.hidden_size
class Transformer(nn.Module):
def __init__(self, transformer, dropout_p, embedding_dim, num_classes):
super(Transformer, self).__init__()
self.transformer = transformer
self.dropout = torch.nn.Dropout(dropout_p)
self.fc1 = torch.nn.Linear(embedding_dim, num_classes)
def forward(self, inputs):
ids, masks = inputs
seq, pool = self.transformer(input_ids=ids, attention_mask=masks)
z = self.dropout(pool)
z = self.fc1(z)
return z
```
> We decided to work with the pooled output, but we could have just as easily worked with the sequential output (encoder representation for each sub-token) and applied a CNN (or other decoder options) on top of it.
```
# Initialize model
dropout_p = 0.5
model = Transformer(
transformer=transformer, dropout_p=dropout_p,
embedding_dim=embedding_dim, num_classes=num_classes)
model = model.to(device)
print (model.named_parameters)
```
## Training
```
# Arguments
lr = 1e-4
num_epochs = 100
patience = 10
# Define loss
class_weights_tensor = torch.Tensor(np.array(list(class_weights.values())))
loss_fn = nn.BCEWithLogitsLoss(weight=class_weights_tensor)
# Define optimizer & scheduler
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode="min", factor=0.1, patience=5)
# Trainer module
trainer = Trainer(
model=model, device=device, loss_fn=loss_fn,
optimizer=optimizer, scheduler=scheduler)
# Train
best_model = trainer.train(num_epochs, patience, train_dataloader, val_dataloader)
```
## Evaluation
```
import json
from sklearn.metrics import precision_recall_fscore_support
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
return performance
# Get predictions
test_loss, y_true, y_prob = trainer.eval_step(dataloader=test_dataloader)
y_pred = np.argmax(y_prob, axis=1)
# Determine performance
performance = get_performance(
y_true=np.argmax(y_true, axis=1), y_pred=y_pred, classes=label_encoder.classes)
print (json.dumps(performance["overall"], indent=2))
# Save artifacts
from pathlib import Path
dir = Path("transformers")
dir.mkdir(parents=True, exist_ok=True)
label_encoder.save(fp=Path(dir, "label_encoder.json"))
torch.save(best_model.state_dict(), Path(dir, "model.pt"))
with open(Path(dir, "performance.json"), "w") as fp:
json.dump(performance, indent=2, sort_keys=False, fp=fp)
```
## Inference
```
def get_probability_distribution(y_prob, classes):
"""Create a dict of class probabilities from an array."""
results = {}
for i, class_ in enumerate(classes):
results[class_] = np.float64(y_prob[i])
sorted_results = {k: v for k, v in sorted(
results.items(), key=lambda item: item[1], reverse=True)}
return sorted_results
# Load artifacts
device = torch.device("cpu")
tokenizer = BertTokenizer.from_pretrained("allenai/scibert_scivocab_uncased")
label_encoder = LabelEncoder.load(fp=Path(dir, "label_encoder.json"))
transformer = BertModel.from_pretrained("allenai/scibert_scivocab_uncased")
embedding_dim = transformer.config.hidden_size
model = Transformer(
transformer=transformer, dropout_p=dropout_p,
embedding_dim=embedding_dim, num_classes=num_classes)
model.load_state_dict(torch.load(Path(dir, "model.pt"), map_location=device))
model.to(device);
# Initialize trainer
trainer = Trainer(model=model, device=device)
# Create datasets
train_dataset = TransformerTextDataset(ids=X_train_ids, masks=X_train_masks, targets=y_train)
val_dataset = TransformerTextDataset(ids=X_val_ids, masks=X_val_masks, targets=y_val)
test_dataset = TransformerTextDataset(ids=X_test_ids, masks=X_test_masks, targets=y_test)
print ("Data splits:\n"
f" Train dataset:{train_dataset.__str__()}\n"
f" Val dataset: {val_dataset.__str__()}\n"
f" Test dataset: {test_dataset.__str__()}\n"
"Sample point:\n"
f" ids: {train_dataset[0][0]}\n"
f" masks: {train_dataset[0][1]}\n"
f" targets: {train_dataset[0][2]}")
# Dataloader
text = "The final tennis tournament starts next week."
X = preprocess(text)
encoded_input = tokenizer(X, return_tensors="pt", padding=True).to(torch.device("cpu"))
ids = encoded_input["input_ids"]
masks = encoded_input["attention_mask"]
y_filler = label_encoder.encode([label_encoder.classes[0]]*len(ids))
dataset = TransformerTextDataset(ids=ids, masks=masks, targets=y_filler)
dataloader = dataset.create_dataloader(batch_size=int(batch_size))
# Inference
y_prob = trainer.predict_step(dataloader)
y_pred = np.argmax(y_prob, axis=1)
label_encoder.index_to_class[y_pred[0]]
# Class distributions
prob_dist = get_probability_distribution(y_prob=y_prob[0], classes=label_encoder.classes)
print (json.dumps(prob_dist, indent=2))
```
## Interpretability
Let's visualize the self-attention weights from each of the attention heads in the encoder.
```
import sys
!rm -r bertviz_repo
!test -d bertviz_repo || git clone https://github.com/jessevig/bertviz bertviz_repo
if not "bertviz_repo" in sys.path:
sys.path += ["bertviz_repo"]
from bertviz import head_view
# Print input ids
print (ids)
print (tokenizer.batch_decode(ids))
# Get encoder attentions
seq, pool, attn = model.transformer(input_ids=ids, attention_mask=masks, output_attentions=True)
print (len(attn)) # 12 attention layers (heads)
print (attn[0].shape)
# HTML set up
def call_html():
import IPython
display(IPython.core.display.HTML('''
<script src="/static/components/requirejs/require.js"></script>
<script>
requirejs.config({
paths: {
base: '/static/base',
"d3": "https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.8/d3.min",
jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min',
},
});
</script>
'''))
# Visualize self-attention weights
call_html()
tokens = tokenizer.convert_ids_to_tokens(ids[0])
head_view(attention=attn, tokens=tokens)
```
> Now you're ready to start the [MLOps lessons](https://madewithml.com/#mlops) to learn how to apply all this foundational modeling knowledge to responsibly deliver value.
| true |
code
| 0.712251 | null | null | null | null |
|
# Введение в координатный спуск (coordinate descent): теория и приложения
## Постановка задачи и основное предположение
$$
\min_{x \in \mathbb{R}^n} f(x)
$$
- $f$ выпуклая функция
- Если по каждой координате будет выполнено $f(x + \varepsilon e_i) \geq f(x)$, будет ли это означать, что $x$ точка минимума?
- Если $f$ гладкая, то да, по критерию первого порядка $f'(x) = 0$
- Если $f$ негладкая, то нет, так как условие может быть выполнено в "угловых" точках, которые не являются точками минимума
- Если $f$ негладкая, но композитная с сепарабельной негладкой частью, то есть
$$
f(x) = g(x) + \sum_{i=1}^n h_i(x_i),
$$
то да. Почему?
- Для любого $y$ и $x$, в котором выполнено условие оптимальности по каждому направлению, выполнено
$$
f(y) - f(x) = g(y) - g(x) + \sum_{i=1}^n (h_i(y_i) - h_i(x_i)) \geq \langle g'(x), y - x \rangle+ \sum_{i=1}^n (h_i(y_i) - h_i(x_i)) = \sum_{i=1}^n [g'_i(x)(y_i - x_i) + h_i(y_i) - h_i(x_i)] \geq 0
$$
- Значит для функций такого вида поиск минимума можно проводить покоординатно, а в результате всё равно получить точку минимума
### Вычислительные нюансы
- На этапе вычисления $i+1$ координаты используются обновлённые значения $1, 2, \ldots, i$ координат при последовательном переборе координат
- Вспомните разницу между методами Якоби и Гаусса-Зейделя для решения линейных систем!
- Порядок выбора координат имеет значение
- Сложность обновления полного вектора $\sim$ сложности обновления $n$ его компонент, то есть для покоординатного обновления целевой переменной не требуется оперировать с полным градиентом!
## Простой пример
- $f(x) = \frac12 \|Ax - b\|_2^2$, где $A \in \mathbb{R}^{m \times n}$ и $m \gg n$
- Выберем некоторую координату $i$
- Тогда покоординатное условие оптимальности $[f'(x)]_i = A^{\top}_i(Ax - b) = A^{\top}_i(A_{-i} x_{-i} + A_ix_i - b) = 0$
- Откуда $x_i = \dfrac{A^{\top}_i (b - A_{-i} x_{-i})}{\|A_i\|_2^2}$ - сложность $O(nm)$, что сопоставимо с вычислением полного градиента. Можно ли быстрее?
- Да, можно! Для этого необходимо заметить следующее
$$
x_i = \dfrac{A^{\top}_i (b - A_{-i} x_{-i})}{\|A_i\|_2^2} = \dfrac{A^{\top}_i (b - Ax + A_{i}x_i)}{\|A_i\|_2^2} = x_i - \dfrac{A^{\top}_i r}{\|A_i\|_2^2},
$$
где $r = Ax - b$
- Обновление $r$ - $\mathcal{O}(m)$, вычисление $A^{\top}_i r$ - $\mathcal{O}(m)$
- В итоге, обновить одну координату стоит $\mathcal{O}(m)$, то есть сложность обновления всех координат сопоставима с вычислением полного градиента $\mathcal{O}(mn)$
## Как выбирать координаты?
- По циклы от 1 до $n$
- Случайной перестановкой
- Правило Gauss-Southwell: $i = \arg\max_k |f'_k(x)|$ - потенциально более дорогое чем остальные
```
import numpy as np
import matplotlib.pyplot as plt
plt.rc("text", usetex=True)
m = 1000
n = 100
A = np.random.randn(m, n)
u, s, v = np.linalg.svd(A, compute_uv=True, full_matrices=False)
print(s)
s[-1] = 2
A = u @ np.diag(s) @ v
print(np.linalg.cond(A))
print(np.linalg.cond(A.T @ A))
x_true = np.random.randn(n)
b = A @ x_true + 1e-7 * np.random.randn(m)
def coordinate_descent_lsq(x0, num_iter, sampler="sequential"):
conv = [x0]
x = x0.copy()
r = A @ x0 - b
grad = A.T @ r
if sampler == "sequential" or sampler == "GS":
perm = np.arange(x.shape[0])
elif sampler == "random":
perm = np.random.permutation(x.shape[0])
else:
raise ValueError("Unknown sampler!")
for i in range(num_iter):
for idx in perm:
if sampler == "GS":
idx = np.argmax(np.abs(grad))
new_x_idx = x[idx] - A[:, idx] @ r / (A[:, idx] @ A[:, idx])
r = r + A[:, idx] * (new_x_idx - x[idx])
if sampler == "GS":
grad = A.T @ r
x[idx] = new_x_idx
if sampler == "random":
perm = np.random.permutation(x.shape[0])
conv.append(x.copy())
# print(np.linalg.norm(A @ x - b))
return x, conv
x0 = np.random.randn(n)
num_iter = 500
x_cd_seq, conv_cd_seq = coordinate_descent_lsq(x0, num_iter)
x_cd_rand, conv_cd_rand = coordinate_descent_lsq(x0, num_iter, "random")
x_cd_gs, conv_cd_gs = coordinate_descent_lsq(x0, num_iter, "GS")
# !pip install git+https://github.com/amkatrutsa/liboptpy
import liboptpy.unconstr_solvers as methods
import liboptpy.step_size as ss
def f(x):
res = A @ x - b
return 0.5 * res @ res
def gradf(x):
res = A @ x - b
return A.T @ res
L = np.max(np.linalg.eigvalsh(A.T @ A))
gd = methods.fo.GradientDescent(f, gradf, ss.ConstantStepSize(1 / L))
x_gd = gd.solve(x0=x0, max_iter=num_iter)
acc_gd = methods.fo.AcceleratedGD(f, gradf, ss.ConstantStepSize(1 / L))
x_accgd = acc_gd.solve(x0=x0, max_iter=num_iter)
plt.figure(figsize=(15, 10))
plt.semilogy([np.linalg.norm(A @ x - b) for x in conv_cd_rand], label="Random")
plt.semilogy([np.linalg.norm(A @ x - b) for x in conv_cd_seq], label="Sequential")
plt.semilogy([np.linalg.norm(A @ x - b) for x in conv_cd_gs], label="GS")
plt.semilogy([np.linalg.norm(A @ x - b) for x in gd.get_convergence()], label="GD")
plt.semilogy([np.linalg.norm(A @ x - b) for x in acc_gd.get_convergence()], label="Nesterov")
plt.legend(fontsize=20)
plt.xlabel("Number of iterations", fontsize=24)
plt.ylabel("$\|Ax - b\|_2$", fontsize=24)
plt.grid(True)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.show()
plt.semilogy([np.linalg.norm(x - x_true) for x in conv_cd_rand], label="Random")
plt.semilogy([np.linalg.norm(x - x_true) for x in conv_cd_seq], label="Sequential")
plt.semilogy([np.linalg.norm(x - x_true) for x in conv_cd_gs], label="GS")
plt.semilogy([np.linalg.norm(x - x_true) for x in gd.get_convergence()], label="GD")
plt.semilogy([np.linalg.norm(x - x_true) for x in acc_gd.get_convergence()], label="Nesterov")
plt.legend(fontsize=20)
plt.xlabel("Number of iterations", fontsize=24)
plt.ylabel("$\|x - x^*\|_2$", fontsize=24)
plt.grid(True)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.show()
```
## Сходимость
- Сублинейная для выпуклых гладких с Липшицевым градиентом
- Линейная для сильно выпуклых функций
- Прямая аналогия с градиентным спуском
- Но много особенностей использования
## Типичные примеры использования
- Lasso (снова)
- SMO метод обучения SVM - блочный координатный спуск с размером блока равным 2
- Вывод в графических моделях
| true |
code
| 0.522507 | null | null | null | null |
|
# Principal Component Analysis in Shogun
#### By Abhijeet Kislay (GitHub ID: <a href='https://github.com/kislayabhi'>kislayabhi</a>)
This notebook is about finding Principal Components (<a href="http://en.wikipedia.org/wiki/Principal_component_analysis">PCA</a>) of data (<a href="http://en.wikipedia.org/wiki/Unsupervised_learning">unsupervised</a>) in Shogun. Its <a href="http://en.wikipedia.org/wiki/Dimensionality_reduction">dimensional reduction</a> capabilities are further utilised to show its application in <a href="http://en.wikipedia.org/wiki/Data_compression">data compression</a>, image processing and <a href="http://en.wikipedia.org/wiki/Facial_recognition_system">face recognition</a>.
```
%pylab inline
%matplotlib inline
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
# import all shogun classes
from shogun import *
```
## Some Formal Background (Skip if you just want code examples)
PCA is a useful statistical technique that has found application in fields such as face recognition and image compression, and is a common technique for finding patterns in data of high dimension.
In machine learning problems data is often high dimensional - images, bag-of-word descriptions etc. In such cases we cannot expect the training data to densely populate the space, meaning that there will be large parts in which little is known about the data. Hence it is expected that only a small number of directions are relevant for describing the data to a reasonable accuracy.
The data vectors may be very high dimensional, they will therefore typically lie closer to a much lower dimensional 'manifold'.
Here we concentrate on linear dimensional reduction techniques. In this approach a high dimensional datapoint $\mathbf{x}$ is 'projected down' to a lower dimensional vector $\mathbf{y}$ by:
$$\mathbf{y}=\mathbf{F}\mathbf{x}+\text{const}.$$
where the matrix $\mathbf{F}\in\mathbb{R}^{\text{M}\times \text{D}}$, with $\text{M}<\text{D}$. Here $\text{M}=\dim(\mathbf{y})$ and $\text{D}=\dim(\mathbf{x})$.
From the above scenario, we assume that
* The number of principal components to use is $\text{M}$.
* The dimension of each data point is $\text{D}$.
* The number of data points is $\text{N}$.
We express the approximation for datapoint $\mathbf{x}^n$ as:$$\mathbf{x}^n \approx \mathbf{c} + \sum\limits_{i=1}^{\text{M}}y_i^n \mathbf{b}^i \equiv \tilde{\mathbf{x}}^n.$$
* Here the vector $\mathbf{c}$ is a constant and defines a point in the lower dimensional space.
* The $\mathbf{b}^i$ define vectors in the lower dimensional space (also known as 'principal component coefficients' or 'loadings').
* The $y_i^n$ are the low dimensional co-ordinates of the data.
Our motive is to find the reconstruction $\tilde{\mathbf{x}}^n$ given the lower dimensional representation $\mathbf{y}^n$(which has components $y_i^n,i = 1,...,\text{M})$. For a data space of dimension $\dim(\mathbf{x})=\text{D}$, we hope to accurately describe the data using only a small number $(\text{M}\ll \text{D})$ of coordinates of $\mathbf{y}$.
To determine the best lower dimensional representation it is convenient to use the square distance error between $\mathbf{x}$ and its reconstruction $\tilde{\mathbf{x}}$:$$\text{E}(\mathbf{B},\mathbf{Y},\mathbf{c})=\sum\limits_{n=1}^{\text{N}}\sum\limits_{i=1}^{\text{D}}[x_i^n - \tilde{x}_i^n]^2.$$
* Here the basis vectors are defined as $\mathbf{B} = [\mathbf{b}^1,...,\mathbf{b}^\text{M}]$ (defining $[\text{B}]_{i,j} = b_i^j$).
* Corresponding low dimensional coordinates are defined as $\mathbf{Y} = [\mathbf{y}^1,...,\mathbf{y}^\text{N}].$
* Also, $x_i^n$ and $\tilde{x}_i^n$ represents the coordinates of the data points for the original and the reconstructed data respectively.
* The bias $\mathbf{c}$ is given by the mean of the data $\sum_n\mathbf{x}^n/\text{N}$.
Therefore, for simplification purposes we centre our data, so as to set $\mathbf{c}$ to zero. Now we concentrate on finding the optimal basis $\mathbf{B}$( which has the components $\mathbf{b}^i, i=1,...,\text{M} $).
#### Deriving the optimal linear reconstruction
To find the best basis vectors $\mathbf{B}$ and corresponding low dimensional coordinates $\mathbf{Y}$, we may minimize the sum of squared differences between each vector $\mathbf{x}$ and its reconstruction $\tilde{\mathbf{x}}$:
$\text{E}(\mathbf{B},\mathbf{Y}) = \sum\limits_{n=1}^{\text{N}}\sum\limits_{i=1}^{\text{D}}\left[x_i^n - \sum\limits_{j=1}^{\text{M}}y_j^nb_i^j\right]^2 = \text{trace} \left( (\mathbf{X}-\mathbf{B}\mathbf{Y})^T(\mathbf{X}-\mathbf{B}\mathbf{Y}) \right)$
where $\mathbf{X} = [\mathbf{x}^1,...,\mathbf{x}^\text{N}].$
Considering the above equation under the orthonormality constraint $\mathbf{B}^T\mathbf{B} = \mathbf{I}$ (i.e the basis vectors are mutually orthogonal and of unit length), we differentiate it w.r.t $y_k^n$. The squared error $\text{E}(\mathbf{B},\mathbf{Y})$ therefore has zero derivative when:
$y_k^n = \sum_i b_i^kx_i^n$
By substituting this solution in the above equation, the objective becomes
$\text{E}(\mathbf{B}) = (\text{N}-1)\left[\text{trace}(\mathbf{S}) - \text{trace}\left(\mathbf{S}\mathbf{B}\mathbf{B}^T\right)\right],$
where $\mathbf{S}$ is the sample covariance matrix of the data.
To minimise equation under the constraint $\mathbf{B}^T\mathbf{B} = \mathbf{I}$, we use a set of Lagrange Multipliers $\mathbf{L}$, so that the objective is to minimize:
$-\text{trace}\left(\mathbf{S}\mathbf{B}\mathbf{B}^T\right)+\text{trace}\left(\mathbf{L}\left(\mathbf{B}^T\mathbf{B} - \mathbf{I}\right)\right).$
Since the constraint is symmetric, we can assume that $\mathbf{L}$ is also symmetric. Differentiating with respect to $\mathbf{B}$ and equating to zero we obtain that at the optimum
$\mathbf{S}\mathbf{B} = \mathbf{B}\mathbf{L}$.
This is a form of eigen-equation so that a solution is given by taking $\mathbf{L}$ to be diagonal and $\mathbf{B}$ as the matrix whose columns are the corresponding eigenvectors of $\mathbf{S}$. In this case,
$\text{trace}\left(\mathbf{S}\mathbf{B}\mathbf{B}^T\right) =\text{trace}(\mathbf{L}),$
which is the sum of the eigenvalues corresponding to the eigenvectors forming $\mathbf{B}$. Since we wish to minimise $\text{E}(\mathbf{B})$, we take the eigenvectors with the largest corresponding eigenvalues.
Whilst the solution to this eigen-problem is unique, this only serves to define the solution subspace since one may rotate and scale $\mathbf{B}$ and $\mathbf{Y}$ such that the value of the squared loss is exactly the same. The justification for choosing the non-rotated eigen solution is given by the additional requirement that the principal components corresponds to directions of maximal variance.
#### Maximum variance criterion
We aim to find that single direction $\mathbf{b}$ such that, when the data is projected onto this direction, the variance of this projection is maximal amongst all possible such projections.
The projection of a datapoint onto a direction $\mathbf{b}$ is $\mathbf{b}^T\mathbf{x}^n$ for a unit length vector $\mathbf{b}$. Hence the sum of squared projections is: $$\sum\limits_{n}\left(\mathbf{b}^T\mathbf{x}^n\right)^2 = \mathbf{b}^T\left[\sum\limits_{n}\mathbf{x}^n(\mathbf{x}^n)^T\right]\mathbf{b} = (\text{N}-1)\mathbf{b}^T\mathbf{S}\mathbf{b} = \lambda(\text{N} - 1)$$
which ignoring constants, is simply the negative of the equation for a single retained eigenvector $\mathbf{b}$(with $\mathbf{S}\mathbf{b} = \lambda\mathbf{b}$). Hence the optimal single $\text{b}$ which maximises the projection variance is given by the eigenvector corresponding to the largest eigenvalues of $\mathbf{S}.$ The second largest eigenvector corresponds to the next orthogonal optimal direction and so on. This explains why, despite the squared loss equation being invariant with respect to arbitrary rotation of the basis vectors, the ones given by the eigen-decomposition have the additional property that they correspond to directions of maximal variance. These maximal variance directions found by PCA are called the $\text{principal} $ $\text{directions}.$
There are two eigenvalue methods through which shogun can perform PCA namely
* Eigenvalue Decomposition Method.
* Singular Value Decomposition.
#### EVD vs SVD
* The EVD viewpoint requires that one compute the eigenvalues and eigenvectors of the covariance matrix, which is the product of $\mathbf{X}\mathbf{X}^\text{T}$, where $\mathbf{X}$ is the data matrix. Since the covariance matrix is symmetric, the matrix is diagonalizable, and the eigenvectors can be normalized such that they are orthonormal:
$\mathbf{S}=\frac{1}{\text{N}-1}\mathbf{X}\mathbf{X}^\text{T},$
where the $\text{D}\times\text{N}$ matrix $\mathbf{X}$ contains all the data vectors: $\mathbf{X}=[\mathbf{x}^1,...,\mathbf{x}^\text{N}].$
Writing the $\text{D}\times\text{N}$ matrix of eigenvectors as $\mathbf{E}$ and the eigenvalues as an $\text{N}\times\text{N}$ diagonal matrix $\mathbf{\Lambda}$, the eigen-decomposition of the covariance $\mathbf{S}$ is
$\mathbf{X}\mathbf{X}^\text{T}\mathbf{E}=\mathbf{E}\mathbf{\Lambda}\Longrightarrow\mathbf{X}^\text{T}\mathbf{X}\mathbf{X}^\text{T}\mathbf{E}=\mathbf{X}^\text{T}\mathbf{E}\mathbf{\Lambda}\Longrightarrow\mathbf{X}^\text{T}\mathbf{X}\tilde{\mathbf{E}}=\tilde{\mathbf{E}}\mathbf{\Lambda},$
where we defined $\tilde{\mathbf{E}}=\mathbf{X}^\text{T}\mathbf{E}$. The final expression above represents the eigenvector equation for $\mathbf{X}^\text{T}\mathbf{X}.$ This is a matrix of dimensions $\text{N}\times\text{N}$ so that calculating the eigen-decomposition takes $\mathcal{O}(\text{N}^3)$ operations, compared with $\mathcal{O}(\text{D}^3)$ operations in the original high-dimensional space. We then can therefore calculate the eigenvectors $\tilde{\mathbf{E}}$ and eigenvalues $\mathbf{\Lambda}$ of this matrix more easily. Once found, we use the fact that the eigenvalues of $\mathbf{S}$ are given by the diagonal entries of $\mathbf{\Lambda}$ and the eigenvectors by
$\mathbf{E}=\mathbf{X}\tilde{\mathbf{E}}\mathbf{\Lambda}^{-1}$
* On the other hand, applying SVD to the data matrix $\mathbf{X}$ follows like:
$\mathbf{X}=\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}$
where $\mathbf{U}^\text{T}\mathbf{U}=\mathbf{I}_\text{D}$ and $\mathbf{V}^\text{T}\mathbf{V}=\mathbf{I}_\text{N}$ and $\mathbf{\Sigma}$ is a diagonal matrix of the (positive) singular values. We assume that the decomposition has ordered the singular values so that the upper left diagonal element of $\mathbf{\Sigma}$ contains the largest singular value.
Attempting to construct the covariance matrix $(\mathbf{X}\mathbf{X}^\text{T})$from this decomposition gives:
$\mathbf{X}\mathbf{X}^\text{T} = \left(\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}\right)\left(\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}\right)^\text{T}$
$\mathbf{X}\mathbf{X}^\text{T} = \left(\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}\right)\left(\mathbf{V}\mathbf{\Sigma}\mathbf{U}^\text{T}\right)$
and since $\mathbf{V}$ is an orthogonal matrix $\left(\mathbf{V}^\text{T}\mathbf{V}=\mathbf{I}\right),$
$\mathbf{X}\mathbf{X}^\text{T}=\left(\mathbf{U}\mathbf{\Sigma}^\mathbf{2}\mathbf{U}^\text{T}\right)$
Since it is in the form of an eigen-decomposition, the PCA solution given by performing the SVD decomposition of $\mathbf{X}$, for which the eigenvectors are then given by $\mathbf{U}$, and corresponding eigenvalues by the square of the singular values.
#### [CPCA](http://www.shogun-toolbox.org/doc/en/3.0.0/classshogun_1_1CPCA.html) Class Reference (Shogun)
CPCA class of Shogun inherits from the [CPreprocessor](http://www.shogun-toolbox.org/doc/en/3.0.0/classshogun_1_1CPreprocessor.html) class. Preprocessors are transformation functions that doesn't change the domain of the input features. Specifically, CPCA performs principal component analysis on the input vectors and keeps only the specified number of eigenvectors. On preprocessing, the stored covariance matrix is used to project vectors into eigenspace.
Performance of PCA depends on the algorithm used according to the situation in hand.
Our PCA preprocessor class provides 3 method options to compute the transformation matrix:
* $\text{PCA(EVD)}$ sets $\text{PCAmethod == EVD}$ : Eigen Value Decomposition of Covariance Matrix $(\mathbf{XX^T}).$
The covariance matrix $\mathbf{XX^T}$ is first formed internally and then
its eigenvectors and eigenvalues are computed using QR decomposition of the matrix.
The time complexity of this method is $\mathcal{O}(D^3)$ and should be used when $\text{N > D.}$
* $\text{PCA(SVD)}$ sets $\text{PCAmethod == SVD}$ : Singular Value Decomposition of feature matrix $\mathbf{X}$.
The transpose of feature matrix, $\mathbf{X^T}$, is decomposed using SVD. $\mathbf{X^T = UDV^T}.$
The matrix V in this decomposition contains the required eigenvectors and
the diagonal entries of the diagonal matrix D correspond to the non-negative
eigenvalues.The time complexity of this method is $\mathcal{O}(DN^2)$ and should be used when $\text{N < D.}$
* $\text{PCA(AUTO)}$ sets $\text{PCAmethod == AUTO}$ : This mode automagically chooses one of the above modes for the user based on whether $\text{N>D}$ (chooses $\text{EVD}$) or $\text{N<D}$ (chooses $\text{SVD}$)
## PCA on 2D data
#### Step 1: Get some data
We will generate the toy data by adding orthogonal noise to a set of points lying on an arbitrary 2d line. We expect PCA to recover this line, which is a one-dimensional linear sub-space.
```
#number of data points.
n=100
#generate a random 2d line(y1 = mx1 + c)
m = random.randint(1,10)
c = random.randint(1,10)
x1 = random.random_integers(-20,20,n)
y1=m*x1+c
#generate the noise.
noise=random.random_sample([n]) * random.random_integers(-35,35,n)
#make the noise orthogonal to the line y=mx+c and add it.
x=x1 + noise*m/sqrt(1+square(m))
y=y1 + noise/sqrt(1+square(m))
twoD_obsmatrix=array([x,y])
#to visualise the data we must plot it.
rcParams['figure.figsize'] = 7, 7
figure,axis=subplots(1,1)
xlim(-50,50)
ylim(-50,50)
axis.plot(twoD_obsmatrix[0,:],twoD_obsmatrix[1,:],'o',color='green',markersize=6)
#the line from which we generated the data is plotted in red
axis.plot(x1[:],y1[:],linewidth=0.3,color='red')
title('One-Dimensional sub-space with noise')
xlabel("x axis")
_=ylabel("y axis")
```
#### Step 2: Subtract the mean.
For PCA to work properly, we must subtract the mean from each of the data dimensions. The mean subtracted is the average across each dimension. So, all the $x$ values have $\bar{x}$ subtracted, and all the $y$ values have $\bar{y}$ subtracted from them, where:$$\bar{\mathbf{x}} = \frac{\sum\limits_{i=1}^{n}x_i}{n}$$ $\bar{\mathbf{x}}$ denotes the mean of the $x_i^{'s}$
##### Shogun's way of doing things :
Preprocessor PCA performs principial component analysis on input feature vectors/matrices. It provides an interface to set the target dimension by $\text{put('target_dim', target_dim) method}.$ When the $\text{init()}$ method in $\text{PCA}$ is called with proper
feature matrix $\text{X}$ (with say $\text{N}$ number of vectors and $\text{D}$ feature dimension), a transformation matrix is computed and stored internally.It inherenty also centralizes the data by subtracting the mean from it.
```
#convert the observation matrix into dense feature matrix.
train_features = features(twoD_obsmatrix)
#PCA(EVD) is choosen since N=100 and D=2 (N>D).
#However we can also use PCA(AUTO) as it will automagically choose the appropriate method.
preprocessor = PCA(EVD)
#since we are projecting down the 2d data, the target dim is 1. But here the exhaustive method is detailed by
#setting the target dimension to 2 to visualize both the eigen vectors.
#However, in future examples we will get rid of this step by implementing it directly.
preprocessor.put('target_dim', 2)
#Centralise the data by subtracting its mean from it.
preprocessor.init(train_features)
#get the mean for the respective dimensions.
mean_datapoints=preprocessor.get_real_vector('mean_vector')
mean_x=mean_datapoints[0]
mean_y=mean_datapoints[1]
```
#### Step 3: Calculate the covariance matrix
To understand the relationship between 2 dimension we define $\text{covariance}$. It is a measure to find out how much the dimensions vary from the mean $with$ $respect$ $to$ $each$ $other.$$$cov(X,Y)=\frac{\sum\limits_{i=1}^{n}(X_i-\bar{X})(Y_i-\bar{Y})}{n-1}$$
A useful way to get all the possible covariance values between all the different dimensions is to calculate them all and put them in a matrix.
Example: For a 3d dataset with usual dimensions of $x,y$ and $z$, the covariance matrix has 3 rows and 3 columns, and the values are this:
$$\mathbf{S} = \quad\begin{pmatrix}cov(x,x)&cov(x,y)&cov(x,z)\\cov(y,x)&cov(y,y)&cov(y,z)\\cov(z,x)&cov(z,y)&cov(z,z)\end{pmatrix}$$
#### Step 4: Calculate the eigenvectors and eigenvalues of the covariance matrix
Find the eigenvectors $e^1,....e^M$ of the covariance matrix $\mathbf{S}$.
##### Shogun's way of doing things :
Step 3 and Step 4 are directly implemented by the PCA preprocessor of Shogun toolbar. The transformation matrix is essentially a $\text{D}$$\times$$\text{M}$ matrix, the columns of which correspond to the eigenvectors of the covariance matrix $(\text{X}\text{X}^\text{T})$ having top $\text{M}$ eigenvalues.
```
#Get the eigenvectors(We will get two of these since we set the target to 2).
E = preprocessor.get_real_matrix('transformation_matrix')
#Get all the eigenvalues returned by PCA.
eig_value=preprocessor.get_real_vector('eigenvalues_vector')
e1 = E[:,0]
e2 = E[:,1]
eig_value1 = eig_value[0]
eig_value2 = eig_value[1]
```
#### Step 5: Choosing components and forming a feature vector.
Lets visualize the eigenvectors and decide upon which to choose as the $principle$ $component$ of the data set.
```
#find out the M eigenvectors corresponding to top M number of eigenvalues and store it in E
#Here M=1
#slope of e1 & e2
m1=e1[1]/e1[0]
m2=e2[1]/e2[0]
#generate the two lines
x1=range(-50,50)
x2=x1
y1=multiply(m1,x1)
y2=multiply(m2,x2)
#plot the data along with those two eigenvectors
figure, axis = subplots(1,1)
xlim(-50, 50)
ylim(-50, 50)
axis.plot(x[:], y[:],'o',color='green', markersize=5, label="green")
axis.plot(x1[:], y1[:], linewidth=0.7, color='black')
axis.plot(x2[:], y2[:], linewidth=0.7, color='blue')
p1 = Rectangle((0, 0), 1, 1, fc="black")
p2 = Rectangle((0, 0), 1, 1, fc="blue")
legend([p1,p2],["1st eigenvector","2nd eigenvector"],loc='center left', bbox_to_anchor=(1, 0.5))
title('Eigenvectors selection')
xlabel("x axis")
_=ylabel("y axis")
```
In the above figure, the blue line is a good fit of the data. It shows the most significant relationship between the data dimensions.
It turns out that the eigenvector with the $highest$ eigenvalue is the $principle$ $component$ of the data set.
Form the matrix $\mathbf{E}=[\mathbf{e}^1,...,\mathbf{e}^M].$
Here $\text{M}$ represents the target dimension of our final projection
```
#The eigenvector corresponding to higher eigenvalue(i.e eig_value2) is choosen (i.e e2).
#E is the feature vector.
E=e2
```
#### Step 6: Projecting the data to its Principal Components.
This is the final step in PCA. Once we have choosen the components(eigenvectors) that we wish to keep in our data and formed a feature vector, we simply take the vector and multiply it on the left of the original dataset.
The lower dimensional representation of each data point $\mathbf{x}^n$ is given by
$\mathbf{y}^n=\mathbf{E}^T(\mathbf{x}^n-\mathbf{m})$
Here the $\mathbf{E}^T$ is the matrix with the eigenvectors in rows, with the most significant eigenvector at the top. The mean adjusted data, with data items in each column, with each row holding a seperate dimension is multiplied to it.
##### Shogun's way of doing things :
Step 6 can be performed by shogun's PCA preprocessor as follows:
The transformation matrix that we got after $\text{init()}$ is used to transform all $\text{D-dim}$ feature matrices (with $\text{D}$ feature dimensions) supplied, via $\text{apply_to_feature_matrix methods}$.This transformation outputs the $\text{M-Dim}$ approximation of all these input vectors and matrices (where $\text{M}$ $\leq$ $\text{min(D,N)}$).
```
#transform all 2-dimensional feature matrices to target-dimensional approximations.
yn=preprocessor.apply_to_feature_matrix(train_features)
#Since, here we are manually trying to find the eigenvector corresponding to the top eigenvalue.
#The 2nd row of yn is choosen as it corresponds to the required eigenvector e2.
yn1=yn[1,:]
```
Step 5 and Step 6 can be applied directly with Shogun's PCA preprocessor (from next example). It has been done manually here to show the exhaustive nature of Principal Component Analysis.
#### Step 7: Form the approximate reconstruction of the original data $\mathbf{x}^n$
The approximate reconstruction of the original datapoint $\mathbf{x}^n$ is given by : $\tilde{\mathbf{x}}^n\approx\text{m}+\mathbf{E}\mathbf{y}^n$
```
x_new=(yn1 * E[0]) + tile(mean_x,[n,1]).T[0]
y_new=(yn1 * E[1]) + tile(mean_y,[n,1]).T[0]
```
The new data is plotted below
```
figure, axis = subplots(1,1)
xlim(-50, 50)
ylim(-50, 50)
axis.plot(x[:], y[:],'o',color='green', markersize=5, label="green")
axis.plot(x_new, y_new, 'o', color='blue', markersize=5, label="red")
title('PCA Projection of 2D data into 1D subspace')
xlabel("x axis")
ylabel("y axis")
#add some legend for information
p1 = Rectangle((0, 0), 1, 1, fc="r")
p2 = Rectangle((0, 0), 1, 1, fc="g")
p3 = Rectangle((0, 0), 1, 1, fc="b")
legend([p1,p2,p3],["normal projection","2d data","1d projection"],loc='center left', bbox_to_anchor=(1, 0.5))
#plot the projections in red:
for i in range(n):
axis.plot([x[i],x_new[i]],[y[i],y_new[i]] , color='red')
```
## PCA on a 3d data.
#### Step1: Get some data
We generate points from a plane and then add random noise orthogonal to it. The general equation of a plane is: $$\text{a}\mathbf{x}+\text{b}\mathbf{y}+\text{c}\mathbf{z}+\text{d}=0$$
```
rcParams['figure.figsize'] = 8,8
#number of points
n=100
#generate the data
a=random.randint(1,20)
b=random.randint(1,20)
c=random.randint(1,20)
d=random.randint(1,20)
x1=random.random_integers(-20,20,n)
y1=random.random_integers(-20,20,n)
z1=-(a*x1+b*y1+d)/c
#generate the noise
noise=random.random_sample([n])*random.random_integers(-30,30,n)
#the normal unit vector is [a,b,c]/magnitude
magnitude=sqrt(square(a)+square(b)+square(c))
normal_vec=array([a,b,c]/magnitude)
#add the noise orthogonally
x=x1+noise*normal_vec[0]
y=y1+noise*normal_vec[1]
z=z1+noise*normal_vec[2]
threeD_obsmatrix=array([x,y,z])
#to visualize the data, we must plot it.
from mpl_toolkits.mplot3d import Axes3D
fig = pyplot.figure()
ax=fig.add_subplot(111, projection='3d')
#plot the noisy data generated by distorting a plane
ax.scatter(x, y, z,marker='o', color='g')
ax.set_xlabel('x label')
ax.set_ylabel('y label')
ax.set_zlabel('z label')
legend([p2],["3d data"],loc='center left', bbox_to_anchor=(1, 0.5))
title('Two dimensional subspace with noise')
xx, yy = meshgrid(range(-30,30), range(-30,30))
zz=-(a * xx + b * yy + d) / c
```
#### Step 2: Subtract the mean.
```
#convert the observation matrix into dense feature matrix.
train_features = features(threeD_obsmatrix)
#PCA(EVD) is choosen since N=100 and D=3 (N>D).
#However we can also use PCA(AUTO) as it will automagically choose the appropriate method.
preprocessor = PCA(EVD)
#If we set the target dimension to 2, Shogun would automagically preserve the required 2 eigenvectors(out of 3) according to their
#eigenvalues.
preprocessor.put('target_dim', 2)
preprocessor.init(train_features)
#get the mean for the respective dimensions.
mean_datapoints=preprocessor.get_real_vector('mean_vector')
mean_x=mean_datapoints[0]
mean_y=mean_datapoints[1]
mean_z=mean_datapoints[2]
```
#### Step 3 & Step 4: Calculate the eigenvectors of the covariance matrix
```
#get the required eigenvectors corresponding to top 2 eigenvalues.
E = preprocessor.get_real_matrix('transformation_matrix')
```
#### Steps 5: Choosing components and forming a feature vector.
Since we performed PCA for a target $\dim = 2$ for the $3 \dim$ data, we are directly given
the two required eigenvectors in $\mathbf{E}$
E is automagically filled by setting target dimension = M. This is different from the 2d data example where we implemented this step manually.
#### Step 6: Projecting the data to its Principal Components.
```
#This can be performed by shogun's PCA preprocessor as follows:
yn=preprocessor.apply_to_feature_matrix(train_features)
```
#### Step 7: Form the approximate reconstruction of the original data $\mathbf{x}^n$
The approximate reconstruction of the original datapoint $\mathbf{x}^n$ is given by : $\tilde{\mathbf{x}}^n\approx\text{m}+\mathbf{E}\mathbf{y}^n$
```
new_data=dot(E,yn)
x_new=new_data[0,:]+tile(mean_x,[n,1]).T[0]
y_new=new_data[1,:]+tile(mean_y,[n,1]).T[0]
z_new=new_data[2,:]+tile(mean_z,[n,1]).T[0]
#all the above points lie on the same plane. To make it more clear we will plot the projection also.
fig=pyplot.figure()
ax=fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z,marker='o', color='g')
ax.set_xlabel('x label')
ax.set_ylabel('y label')
ax.set_zlabel('z label')
legend([p1,p2,p3],["normal projection","3d data","2d projection"],loc='center left', bbox_to_anchor=(1, 0.5))
title('PCA Projection of 3D data into 2D subspace')
for i in range(100):
ax.scatter(x_new[i], y_new[i], z_new[i],marker='o', color='b')
ax.plot([x[i],x_new[i]],[y[i],y_new[i]],[z[i],z_new[i]],color='r')
```
#### PCA Performance
Uptill now, we were using the EigenValue Decomposition method to compute the transformation matrix$\text{(N>D)}$ but for the next example $\text{(N<D)}$ we will be using Singular Value Decomposition.
## Practical Example : Eigenfaces
The problem with the image representation we are given is its high dimensionality. Two-dimensional $\text{p} \times \text{q}$ grayscale images span a $\text{m=pq}$ dimensional vector space, so an image with $\text{100}\times\text{100}$ pixels lies in a $\text{10,000}$ dimensional image space already.
The question is, are all dimensions really useful for us?
$\text{Eigenfaces}$ are based on the dimensional reduction approach of $\text{Principal Component Analysis(PCA)}$. The basic idea is to treat each image as a vector in a high dimensional space. Then, $\text{PCA}$ is applied to the set of images to produce a new reduced subspace that captures most of the variability between the input images. The $\text{Pricipal Component Vectors}$(eigenvectors of the sample covariance matrix) are called the $\text{Eigenfaces}$. Every input image can be represented as a linear combination of these eigenfaces by projecting the image onto the new eigenfaces space. Thus, we can perform the identfication process by matching in this reduced space. An input image is transformed into the $\text{eigenspace,}$ and the nearest face is identified using a $\text{Nearest Neighbour approach.}$
#### Step 1: Get some data.
Here data means those Images which will be used for training purposes.
```
rcParams['figure.figsize'] = 10, 10
import os
def get_imlist(path):
""" Returns a list of filenames for all jpg images in a directory"""
return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.pgm')]
#set path of the training images
path_train=os.path.join(SHOGUN_DATA_DIR, 'att_dataset/training/')
#set no. of rows that the images will be resized.
k1=100
#set no. of columns that the images will be resized.
k2=100
filenames = get_imlist(path_train)
filenames = array(filenames)
#n is total number of images that has to be analysed.
n=len(filenames)
```
Lets have a look on the data:
```
# we will be using this often to visualize the images out there.
def showfig(image):
imgplot=imshow(image, cmap='gray')
imgplot.axes.get_xaxis().set_visible(False)
imgplot.axes.get_yaxis().set_visible(False)
from PIL import Image
from scipy import misc
# to get a hang of the data, lets see some part of the dataset images.
fig = pyplot.figure()
title('The Training Dataset')
for i in range(49):
fig.add_subplot(7,7,i+1)
train_img=array(Image.open(filenames[i]).convert('L'))
train_img=misc.imresize(train_img, [k1,k2])
showfig(train_img)
```
Represent every image $I_i$ as a vector $\Gamma_i$
```
#To form the observation matrix obs_matrix.
#read the 1st image.
train_img = array(Image.open(filenames[0]).convert('L'))
#resize it to k1 rows and k2 columns
train_img=misc.imresize(train_img, [k1,k2])
#since features accepts only data of float64 datatype, we do a type conversion
train_img=array(train_img, dtype='double')
#flatten it to make it a row vector.
train_img=train_img.flatten()
# repeat the above for all images and stack all those vectors together in a matrix
for i in range(1,n):
temp=array(Image.open(filenames[i]).convert('L'))
temp=misc.imresize(temp, [k1,k2])
temp=array(temp, dtype='double')
temp=temp.flatten()
train_img=vstack([train_img,temp])
#form the observation matrix
obs_matrix=train_img.T
```
#### Step 2: Subtract the mean
It is very important that the face images $I_1,I_2,...,I_M$ are $centered$ and of the $same$ size
We observe here that the no. of $\dim$ for each image is far greater than no. of training images. This calls for the use of $\text{SVD}$.
Setting the $\text{PCA}$ in the $\text{AUTO}$ mode does this automagically according to the situation.
```
train_features = features(obs_matrix)
preprocessor=PCA(AUTO)
preprocessor.put('target_dim', 100)
preprocessor.init(train_features)
mean=preprocessor.get_real_vector('mean_vector')
```
#### Step 3 & Step 4: Calculate the eigenvectors and eigenvalues of the covariance matrix.
```
#get the required eigenvectors corresponding to top 100 eigenvalues
E = preprocessor.get_real_matrix('transformation_matrix')
#lets see how these eigenfaces/eigenvectors look like:
fig1 = pyplot.figure()
title('Top 20 Eigenfaces')
for i in range(20):
a = fig1.add_subplot(5,4,i+1)
eigen_faces=E[:,i].reshape([k1,k2])
showfig(eigen_faces)
```
These 20 eigenfaces are not sufficient for a good image reconstruction. Having more eigenvectors gives us the most flexibility in the number of faces we can reconstruct. Though we are adding vectors with low variance, they are in directions of change nonetheless, and an external image that is not in our database could in fact need these eigenvectors to get even relatively close to it. But at the same time we must also keep in mind that adding excessive eigenvectors results in addition of little or no variance, slowing down the process.
Clearly a tradeoff is required.
We here set for M=100.
#### Step 5: Choosing components and forming a feature vector.
Since we set target $\dim = 100$ for this $n \dim$ data, we are directly given the $100$ required eigenvectors in $\mathbf{E}$
E is automagically filled. This is different from the 2d data example where we implemented this step manually.
#### Step 6: Projecting the data to its Principal Components.
The lower dimensional representation of each data point $\mathbf{x}^n$ is given by $$\mathbf{y}^n=\mathbf{E}^T(\mathbf{x}^n-\mathbf{m})$$
```
#we perform the required dot product.
yn=preprocessor.apply_to_feature_matrix(train_features)
```
#### Step 7: Form the approximate reconstruction of the original image $I_n$
The approximate reconstruction of the original datapoint $\mathbf{x}^n$ is given by : $\mathbf{x}^n\approx\text{m}+\mathbf{E}\mathbf{y}^n$
```
re=tile(mean,[n,1]).T[0] + dot(E,yn)
#lets plot the reconstructed images.
fig2 = pyplot.figure()
title('Reconstructed Images from 100 eigenfaces')
for i in range(1,50):
re1 = re[:,i].reshape([k1,k2])
fig2.add_subplot(7,7,i)
showfig(re1)
```
## Recognition part.
In our face recognition process using the Eigenfaces approach, in order to recognize an unseen image, we proceed with the same preprocessing steps as applied to the training images.
Test images are represented in terms of eigenface coefficients by projecting them into face space$\text{(eigenspace)}$ calculated during training. Test sample is recognized by measuring the similarity distance between the test sample and all samples in the training. The similarity measure is a metric of distance calculated between two vectors. Traditional Eigenface approach utilizes $\text{Euclidean distance}$.
```
#set path of the training images
path_train=os.path.join(SHOGUN_DATA_DIR, 'att_dataset/testing/')
test_files=get_imlist(path_train)
test_img=array(Image.open(test_files[0]).convert('L'))
rcParams.update({'figure.figsize': (3, 3)})
#we plot the test image , for which we have to identify a good match from the training images we already have
fig = pyplot.figure()
title('The Test Image')
showfig(test_img)
#We flatten out our test image just the way we have done for the other images
test_img=misc.imresize(test_img, [k1,k2])
test_img=array(test_img, dtype='double')
test_img=test_img.flatten()
#We centralise the test image by subtracting the mean from it.
test_f=test_img-mean
```
Here we have to project our training image as well as the test image on the PCA subspace.
The Eigenfaces method then performs face recognition by:
1. Projecting all training samples into the PCA subspace.
2. Projecting the query image into the PCA subspace.
3. Finding the nearest neighbour between the projected training images and the projected query image.
```
#We have already projected our training images into pca subspace as yn.
train_proj = yn
#Projecting our test image into pca subspace
test_proj = dot(E.T, test_f)
```
##### Shogun's way of doing things:
Shogun uses [CEuclideanDistance](http://www.shogun-toolbox.org/doc/en/3.0.0/classshogun_1_1CEuclideanDistance.html) class to compute the familiar Euclidean distance for real valued features. It computes the square root of the sum of squared disparity between the corresponding feature dimensions of two data points.
$\mathbf{d(x,x')=}$$\sqrt{\mathbf{\sum\limits_{i=0}^{n}}|\mathbf{x_i}-\mathbf{x'_i}|^2}$
```
#To get Eucledian Distance as the distance measure use EuclideanDistance.
workfeat = features(mat(train_proj))
testfeat = features(mat(test_proj).T)
RaRb=EuclideanDistance(testfeat, workfeat)
#The distance between one test image w.r.t all the training is stacked in matrix d.
d=empty([n,1])
for i in range(n):
d[i]= RaRb.distance(0,i)
#The one having the minimum distance is found out
min_distance_index = d.argmin()
iden=array(Image.open(filenames[min_distance_index]))
title('Identified Image')
showfig(iden)
```
## References:
[1] David Barber. Bayesian Reasoning and Machine Learning.
[2] Lindsay I Smith. A tutorial on Principal Component Analysis.
[3] Philipp Wanger. Face Recognition with GNU Octave/MATLAB.
| true |
code
| 0.672681 | null | null | null | null |
|
# Forecasting in statsmodels
This notebook describes forecasting using time series models in statsmodels.
**Note**: this notebook applies only to the state space model classes, which are:
- `sm.tsa.SARIMAX`
- `sm.tsa.UnobservedComponents`
- `sm.tsa.VARMAX`
- `sm.tsa.DynamicFactor`
```
%matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
macrodata = sm.datasets.macrodata.load_pandas().data
macrodata.index = pd.period_range('1959Q1', '2009Q3', freq='Q')
```
## Basic example
A simple example is to use an AR(1) model to forecast inflation. Before forecasting, let's take a look at the series:
```
endog = macrodata['infl']
endog.plot(figsize=(15, 5))
```
### Constructing and estimating the model
The next step is to formulate the econometric model that we want to use for forecasting. In this case, we will use an AR(1) model via the `SARIMAX` class in statsmodels.
After constructing the model, we need to estimate its parameters. This is done using the `fit` method. The `summary` method produces several convenient tables showing the results.
```
# Construct the model
mod = sm.tsa.SARIMAX(endog, order=(1, 0, 0), trend='c')
# Estimate the parameters
res = mod.fit()
print(res.summary())
```
### Forecasting
Out-of-sample forecasts are produced using the `forecast` or `get_forecast` methods from the results object.
The `forecast` method gives only point forecasts.
```
# The default is to get a one-step-ahead forecast:
print(res.forecast())
```
The `get_forecast` method is more general, and also allows constructing confidence intervals.
```
# Here we construct a more complete results object.
fcast_res1 = res.get_forecast()
# Most results are collected in the `summary_frame` attribute.
# Here we specify that we want a confidence level of 90%
print(fcast_res1.summary_frame(alpha=0.10))
```
The default confidence level is 95%, but this can be controlled by setting the `alpha` parameter, where the confidence level is defined as $(1 - \alpha) \times 100\%$. In the example above, we specified a confidence level of 90%, using `alpha=0.10`.
### Specifying the number of forecasts
Both of the functions `forecast` and `get_forecast` accept a single argument indicating how many forecasting steps are desired. One option for this argument is always to provide an integer describing the number of steps ahead you want.
```
print(res.forecast(steps=2))
fcast_res2 = res.get_forecast(steps=2)
# Note: since we did not specify the alpha parameter, the
# confidence level is at the default, 95%
print(fcast_res2.summary_frame())
```
However, **if your data included a Pandas index with a defined frequency** (see the section at the end on Indexes for more information), then you can alternatively specify the date through which you want forecasts to be produced:
```
print(res.forecast('2010Q2'))
fcast_res3 = res.get_forecast('2010Q2')
print(fcast_res3.summary_frame())
```
### Plotting the data, forecasts, and confidence intervals
Often it is useful to plot the data, the forecasts, and the confidence intervals. There are many ways to do this, but here's one example
```
fig, ax = plt.subplots(figsize=(15, 5))
# Plot the data (here we are subsetting it to get a better look at the forecasts)
endog.loc['1999':].plot(ax=ax)
# Construct the forecasts
fcast = res.get_forecast('2011Q4').summary_frame()
fcast['mean'].plot(ax=ax, style='k--')
ax.fill_between(fcast.index, fcast['mean_ci_lower'], fcast['mean_ci_upper'], color='k', alpha=0.1);
```
### Note on what to expect from forecasts
The forecast above may not look very impressive, as it is almost a straight line. This is because this is a very simple, univariate forecasting model. Nonetheless, keep in mind that these simple forecasting models can be extremely competitive.
## Prediction vs Forecasting
The results objects also contain two methods that all for both in-sample fitted values and out-of-sample forecasting. They are `predict` and `get_prediction`. The `predict` method only returns point predictions (similar to `forecast`), while the `get_prediction` method also returns additional results (similar to `get_forecast`).
In general, if your interest is out-of-sample forecasting, it is easier to stick to the `forecast` and `get_forecast` methods.
## Cross validation
**Note**: some of the functions used in this section were first introduced in statsmodels v0.11.0.
A common use case is to cross-validate forecasting methods by performing h-step-ahead forecasts recursively using the following process:
1. Fit model parameters on a training sample
2. Produce h-step-ahead forecasts from the end of that sample
3. Compare forecasts against test dataset to compute error rate
4. Expand the sample to include the next observation, and repeat
Economists sometimes call this a pseudo-out-of-sample forecast evaluation exercise, or time-series cross-validation.
### Example
We will conduct a very simple exercise of this sort using the inflation dataset above. The full dataset contains 203 observations, and for expositional purposes we'll use the first 80% as our training sample and only consider one-step-ahead forecasts.
A single iteration of the above procedure looks like the following:
```
# Step 1: fit model parameters w/ training sample
training_obs = int(len(endog) * 0.8)
training_endog = endog[:training_obs]
training_mod = sm.tsa.SARIMAX(
training_endog, order=(1, 0, 0), trend='c')
training_res = training_mod.fit()
# Print the estimated parameters
print(training_res.params)
# Step 2: produce one-step-ahead forecasts
fcast = training_res.forecast()
# Step 3: compute root mean square forecasting error
true = endog.reindex(fcast.index)
error = true - fcast
# Print out the results
print(pd.concat([true.rename('true'),
fcast.rename('forecast'),
error.rename('error')], axis=1))
```
To add on another observation, we can use the `append` or `extend` results methods. Either method can produce the same forecasts, but they differ in the other results that are available:
- `append` is the more complete method. It always stores results for all training observations, and it optionally allows refitting the model parameters given the new observations (note that the default is *not* to refit the parameters).
- `extend` is a faster method that may be useful if the training sample is very large. It *only* stores results for the new observations, and it does not allow refitting the model parameters (i.e. you have to use the parameters estimated on the previous sample).
If your training sample is relatively small (less than a few thousand observations, for example) or if you want to compute the best possible forecasts, then you should use the `append` method. However, if that method is infeasible (for example, because you have a very large training sample) or if you are okay with slightly suboptimal forecasts (because the parameter estimates will be slightly stale), then you can consider the `extend` method.
A second iteration, using the `append` method and refitting the parameters, would go as follows (note again that the default for `append` does not refit the parameters, but we have overridden that with the `refit=True` argument):
```
# Step 1: append a new observation to the sample and refit the parameters
append_res = training_res.append(endog[training_obs:training_obs + 1], refit=True)
# Print the re-estimated parameters
print(append_res.params)
```
Notice that these estimated parameters are slightly different than those we originally estimated. With the new results object, `append_res`, we can compute forecasts starting from one observation further than the previous call:
```
# Step 2: produce one-step-ahead forecasts
fcast = append_res.forecast()
# Step 3: compute root mean square forecasting error
true = endog.reindex(fcast.index)
error = true - fcast
# Print out the results
print(pd.concat([true.rename('true'),
fcast.rename('forecast'),
error.rename('error')], axis=1))
```
Putting it altogether, we can perform the recursive forecast evaluation exercise as follows:
```
# Setup forecasts
nforecasts = 3
forecasts = {}
# Get the number of initial training observations
nobs = len(endog)
n_init_training = int(nobs * 0.8)
# Create model for initial training sample, fit parameters
init_training_endog = endog.iloc[:n_init_training]
mod = sm.tsa.SARIMAX(training_endog, order=(1, 0, 0), trend='c')
res = mod.fit()
# Save initial forecast
forecasts[training_endog.index[-1]] = res.forecast(steps=nforecasts)
# Step through the rest of the sample
for t in range(n_init_training, nobs):
# Update the results by appending the next observation
updated_endog = endog.iloc[t:t+1]
res = res.append(updated_endog, refit=False)
# Save the new set of forecasts
forecasts[updated_endog.index[0]] = res.forecast(steps=nforecasts)
# Combine all forecasts into a dataframe
forecasts = pd.concat(forecasts, axis=1)
print(forecasts.iloc[:5, :5])
```
We now have a set of three forecasts made at each point in time from 1999Q2 through 2009Q3. We can construct the forecast errors by subtracting each forecast from the actual value of `endog` at that point.
```
# Construct the forecast errors
forecast_errors = forecasts.apply(lambda column: endog - column).reindex(forecasts.index)
print(forecast_errors.iloc[:5, :5])
```
To evaluate our forecasts, we often want to look at a summary value like the root mean square error. Here we can compute that for each horizon by first flattening the forecast errors so that they are indexed by horizon and then computing the root mean square error fore each horizon.
```
# Reindex the forecasts by horizon rather than by date
def flatten(column):
return column.dropna().reset_index(drop=True)
flattened = forecast_errors.apply(flatten)
flattened.index = (flattened.index + 1).rename('horizon')
print(flattened.iloc[:3, :5])
# Compute the root mean square error
rmse = (flattened**2).mean(axis=1)**0.5
print(rmse)
```
#### Using `extend`
We can check that we get similar forecasts if we instead use the `extend` method, but that they are not exactly the same as when we use `append` with the `refit=True` argument. This is because `extend` does not re-estimate the parameters given the new observation.
```
# Setup forecasts
nforecasts = 3
forecasts = {}
# Get the number of initial training observations
nobs = len(endog)
n_init_training = int(nobs * 0.8)
# Create model for initial training sample, fit parameters
init_training_endog = endog.iloc[:n_init_training]
mod = sm.tsa.SARIMAX(training_endog, order=(1, 0, 0), trend='c')
res = mod.fit()
# Save initial forecast
forecasts[training_endog.index[-1]] = res.forecast(steps=nforecasts)
# Step through the rest of the sample
for t in range(n_init_training, nobs):
# Update the results by appending the next observation
updated_endog = endog.iloc[t:t+1]
res = res.extend(updated_endog)
# Save the new set of forecasts
forecasts[updated_endog.index[0]] = res.forecast(steps=nforecasts)
# Combine all forecasts into a dataframe
forecasts = pd.concat(forecasts, axis=1)
print(forecasts.iloc[:5, :5])
# Construct the forecast errors
forecast_errors = forecasts.apply(lambda column: endog - column).reindex(forecasts.index)
print(forecast_errors.iloc[:5, :5])
# Reindex the forecasts by horizon rather than by date
def flatten(column):
return column.dropna().reset_index(drop=True)
flattened = forecast_errors.apply(flatten)
flattened.index = (flattened.index + 1).rename('horizon')
print(flattened.iloc[:3, :5])
# Compute the root mean square error
rmse = (flattened**2).mean(axis=1)**0.5
print(rmse)
```
By not re-estimating the parameters, our forecasts are slightly worse (the root mean square error is higher at each horizon). However, the process is faster, even with only 200 datapoints. Using the `%%timeit` cell magic on the cells above, we found a runtime of 570ms using `extend` versus 1.7s using `append` with `refit=True`. (Note that using `extend` is also faster than using `append` with `refit=False`).
## Indexes
Throughout this notebook, we have been making use of Pandas date indexes with an associated frequency. As you can see, this index marks our data as at a quarterly frequency, between 1959Q1 and 2009Q3.
```
print(endog.index)
```
In most cases, if your data has an associated data/time index with a defined frequency (like quarterly, monthly, etc.), then it is best to make sure your data is a Pandas series with the appropriate index. Here are three examples of this:
```
# Annual frequency, using a PeriodIndex
index = pd.period_range(start='2000', periods=4, freq='A')
endog1 = pd.Series([1, 2, 3, 4], index=index)
print(endog1.index)
# Quarterly frequency, using a DatetimeIndex
index = pd.date_range(start='2000', periods=4, freq='QS')
endog2 = pd.Series([1, 2, 3, 4], index=index)
print(endog2.index)
# Monthly frequency, using a DatetimeIndex
index = pd.date_range(start='2000', periods=4, freq='M')
endog3 = pd.Series([1, 2, 3, 4], index=index)
print(endog3.index)
```
In fact, if your data has an associated date/time index, it is best to use that even if does not have a defined frequency. An example of that kind of index is as follows - notice that it has `freq=None`:
```
index = pd.DatetimeIndex([
'2000-01-01 10:08am', '2000-01-01 11:32am',
'2000-01-01 5:32pm', '2000-01-02 6:15am'])
endog4 = pd.Series([0.2, 0.5, -0.1, 0.1], index=index)
print(endog4.index)
```
You can still pass this data to statsmodels' model classes, but you will get the following warning, that no frequency data was found:
```
mod = sm.tsa.SARIMAX(endog4)
res = mod.fit()
```
What this means is that you cannot specify forecasting steps by dates, and the output of the `forecast` and `get_forecast` methods will not have associated dates. The reason is that without a given frequency, there is no way to determine what date each forecast should be assigned to. In the example above, there is no pattern to the date/time stamps of the index, so there is no way to determine what the next date/time should be (should it be in the morning of 2000-01-02? the afternoon? or maybe not until 2000-01-03?).
For example, if we forecast one-step-ahead:
```
res.forecast(1)
```
The index associated with the new forecast is `4`, because if the given data had an integer index, that would be the next value. A warning is given letting the user know that the index is not a date/time index.
If we try to specify the steps of the forecast using a date, we will get the following exception:
KeyError: 'The `end` argument could not be matched to a location related to the index of the data.'
```
# Here we'll catch the exception to prevent printing too much of
# the exception trace output in this notebook
try:
res.forecast('2000-01-03')
except KeyError as e:
print(e)
```
Ultimately there is nothing wrong with using data that does not have an associated date/time frequency, or even using data that has no index at all, like a Numpy array. However, if you can use a Pandas series with an associated frequency, you'll have more options for specifying your forecasts and get back results with a more useful index.
| true |
code
| 0.688259 | null | null | null | null |
|
```
%matplotlib inline
from pyvista import set_plot_theme
set_plot_theme('document')
```
Slicing {#slice_example}
=======
Extract thin planar slices from a volume.
```
# sphinx_gallery_thumbnail_number = 2
import pyvista as pv
from pyvista import examples
import matplotlib.pyplot as plt
import numpy as np
```
PyVista meshes have several slicing filters bound directly to all
datasets. These filters allow you to slice through a volumetric dataset
to extract and view sections through the volume of data.
One of the most common slicing filters used in PyVista is the
`pyvista.DataSetFilters.slice_orthogonal`{.interpreted-text role="func"}
filter which creates three orthogonal slices through the dataset
parallel to the three Cartesian planes. For example, let\'s slice
through the sample geostatistical training image volume. First, load up
the volume and preview it:
```
mesh = examples.load_channels()
# define a categorical colormap
cmap = plt.cm.get_cmap("viridis", 4)
mesh.plot(cmap=cmap)
```
Note that this dataset is a 3D volume and there might be regions within
this volume that we would like to inspect. We can create slices through
the mesh to gain further insight about the internals of the volume.
```
slices = mesh.slice_orthogonal()
slices.plot(cmap=cmap)
```
The orthogonal slices can be easily translated throughout the volume:
```
slices = mesh.slice_orthogonal(x=20, y=20, z=30)
slices.plot(cmap=cmap)
```
We can also add just a single slice of the volume by specifying the
origin and normal of the slicing plane with the
`pyvista.DataSetFilters.slice`{.interpreted-text role="func"} filter:
```
# Single slice - origin defaults to the center of the mesh
single_slice = mesh.slice(normal=[1, 1, 0])
p = pv.Plotter()
p.add_mesh(mesh.outline(), color="k")
p.add_mesh(single_slice, cmap=cmap)
p.show()
```
Adding slicing planes uniformly across an axial direction can also be
automated with the
`pyvista.DataSetFilters.slice_along_axis`{.interpreted-text role="func"}
filter:
```
slices = mesh.slice_along_axis(n=7, axis="y")
slices.plot(cmap=cmap)
```
Slice Along Line
================
We can also slice a dataset along a `pyvista.Spline`{.interpreted-text
role="func"} or `pyvista.Line`{.interpreted-text role="func"} using the
`DataSetFilters.slice_along_line`{.interpreted-text role="func"} filter.
First, define a line source through the dataset of interest. Please note
that this type of slicing is computationally expensive and might take a
while if there are a lot of points in the line - try to keep the
resolution of the line low.
```
model = examples.load_channels()
def path(y):
"""Equation: x = a(y-h)^2 + k"""
a = 110.0 / 160.0 ** 2
x = a * y ** 2 + 0.0
return x, y
x, y = path(np.arange(model.bounds[2], model.bounds[3], 15.0))
zo = np.linspace(9.0, 11.0, num=len(y))
points = np.c_[x, y, zo]
spline = pv.Spline(points, 15)
spline
```
Then run the filter
```
slc = model.slice_along_line(spline)
slc
p = pv.Plotter()
p.add_mesh(slc, cmap=cmap)
p.add_mesh(model.outline())
p.show(cpos=[1, -1, 1])
```
Multiple Slices in Vector Direction
===================================
Slice a mesh along a vector direction perpendicularly.
```
mesh = examples.download_brain()
# Create vector
vec = np.random.rand(3)
# Normalize the vector
normal = vec / np.linalg.norm(vec)
# Make points along that vector for the extent of your slices
a = mesh.center + normal * mesh.length / 3.0
b = mesh.center - normal * mesh.length / 3.0
# Define the line/points for the slices
n_slices = 5
line = pv.Line(a, b, n_slices)
# Generate all of the slices
slices = pv.MultiBlock()
for point in line.points:
slices.append(mesh.slice(normal=normal, origin=point))
p = pv.Plotter()
p.add_mesh(mesh.outline(), color="k")
p.add_mesh(slices, opacity=0.75)
p.add_mesh(line, color="red", line_width=5)
p.show()
```
Slice At Different Bearings
===========================
From
[pyvista-support\#23](https://github.com/pyvista/pyvista-support/issues/23)
An example of how to get many slices at different bearings all centered
around a user-chosen location.
Create a point to orient slices around
```
ranges = np.array(model.bounds).reshape(-1, 2).ptp(axis=1)
point = np.array(model.center) - ranges*0.25
```
Now generate a few normal vectors to rotate a slice around the z-axis.
Use equation for circle since its about the Z-axis.
```
increment = np.pi/6.
# use a container to hold all the slices
slices = pv.MultiBlock() # treat like a dictionary/list
for theta in np.arange(0, np.pi, increment):
normal = np.array([np.cos(theta), np.sin(theta), 0.0]).dot(np.pi/2.)
name = f'Bearing: {np.rad2deg(theta):.2f}'
slices[name] = model.slice(origin=point, normal=normal)
slices
```
And now display it!
```
p = pv.Plotter()
p.add_mesh(slices, cmap=cmap)
p.add_mesh(model.outline())
p.show()
```
| true |
code
| 0.669961 | null | null | null | null |
|
```
%matplotlib inline
```
# Early stopping of Gradient Boosting
Gradient boosting is an ensembling technique where several weak learners
(regression trees) are combined to yield a powerful single model, in an
iterative fashion.
Early stopping support in Gradient Boosting enables us to find the least number
of iterations which is sufficient to build a model that generalizes well to
unseen data.
The concept of early stopping is simple. We specify a ``validation_fraction``
which denotes the fraction of the whole dataset that will be kept aside from
training to assess the validation loss of the model. The gradient boosting
model is trained using the training set and evaluated using the validation set.
When each additional stage of regression tree is added, the validation set is
used to score the model. This is continued until the scores of the model in
the last ``n_iter_no_change`` stages do not improve by atleast `tol`. After
that the model is considered to have converged and further addition of stages
is "stopped early".
The number of stages of the final model is available at the attribute
``n_estimators_``.
This example illustrates how the early stopping can used in the
:class:`sklearn.ensemble.GradientBoostingClassifier` model to achieve
almost the same accuracy as compared to a model built without early stopping
using many fewer estimators. This can significantly reduce training time,
memory usage and prediction latency.
```
# Authors: Vighnesh Birodkar <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.model_selection import train_test_split
print(__doc__)
data_list = [datasets.load_iris(), datasets.load_digits()]
data_list = [(d.data, d.target) for d in data_list]
data_list += [datasets.make_hastie_10_2()]
names = ['Iris Data', 'Digits Data', 'Hastie Data']
n_gb = []
score_gb = []
time_gb = []
n_gbes = []
score_gbes = []
time_gbes = []
n_estimators = 500
for X, y in data_list:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=0)
# We specify that if the scores don't improve by atleast 0.01 for the last
# 10 stages, stop fitting additional stages
gbes = ensemble.GradientBoostingClassifier(n_estimators=n_estimators,
validation_fraction=0.2,
n_iter_no_change=5, tol=0.01,
random_state=0)
gb = ensemble.GradientBoostingClassifier(n_estimators=n_estimators,
random_state=0)
start = time.time()
gb.fit(X_train, y_train)
time_gb.append(time.time() - start)
start = time.time()
gbes.fit(X_train, y_train)
time_gbes.append(time.time() - start)
score_gb.append(gb.score(X_test, y_test))
score_gbes.append(gbes.score(X_test, y_test))
n_gb.append(gb.n_estimators_)
n_gbes.append(gbes.n_estimators_)
bar_width = 0.2
n = len(data_list)
index = np.arange(0, n * bar_width, bar_width) * 2.5
index = index[0:n]
```
Compare scores with and without early stopping
----------------------------------------------
```
plt.figure(figsize=(9, 5))
bar1 = plt.bar(index, score_gb, bar_width, label='Without early stopping',
color='crimson')
bar2 = plt.bar(index + bar_width, score_gbes, bar_width,
label='With early stopping', color='coral')
plt.xticks(index + bar_width, names)
plt.yticks(np.arange(0, 1.3, 0.1))
def autolabel(rects, n_estimators):
"""
Attach a text label above each bar displaying n_estimators of each model
"""
for i, rect in enumerate(rects):
plt.text(rect.get_x() + rect.get_width() / 2.,
1.05 * rect.get_height(), 'n_est=%d' % n_estimators[i],
ha='center', va='bottom')
autolabel(bar1, n_gb)
autolabel(bar2, n_gbes)
plt.ylim([0, 1.3])
plt.legend(loc='best')
plt.grid(True)
plt.xlabel('Datasets')
plt.ylabel('Test score')
plt.show()
```
Compare fit times with and without early stopping
-------------------------------------------------
```
plt.figure(figsize=(9, 5))
bar1 = plt.bar(index, time_gb, bar_width, label='Without early stopping',
color='crimson')
bar2 = plt.bar(index + bar_width, time_gbes, bar_width,
label='With early stopping', color='coral')
max_y = np.amax(np.maximum(time_gb, time_gbes))
plt.xticks(index + bar_width, names)
plt.yticks(np.linspace(0, 1.3 * max_y, 13))
autolabel(bar1, n_gb)
autolabel(bar2, n_gbes)
plt.ylim([0, 1.3 * max_y])
plt.legend(loc='best')
plt.grid(True)
plt.xlabel('Datasets')
plt.ylabel('Fit Time')
plt.show()
```
| true |
code
| 0.61904 | null | null | null | null |
|
# Problems
```
import math
import pandas as pd
from sklearn import preprocessing
from sklearn.neighbors import NearestNeighbors, KNeighborsClassifier, KNeighborsRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics import accuracy_score, mean_squared_error
from dmutils import classification_summary
from dmutils import regression_summary
```
**1. Calculating Distance with Categorical Predictors.**
This exercise with a tiny dataset illustrates the calculation of Euclidean distance, and the creation of binary
dummies. The online education company Statistics.com segments its customers and prospects into three main categories: IT professionals (IT), statisticians (Stat), and other (Other). It also tracks, for each customer, the number of years since first contact (years). Consider the following customers; information about whether they have taken a course or not (the outcome to be predicted) is included:
Customer 1: Stat, 1 year, did not take course
Customer 2: Other, 1.1 year, took course
**a.** Consider now the following new prospect:
Prospect 1: IT, 1 year
Using the above information on the two customers and one prospect, create one dataset for all three with the categorical predictor variable transformed into 2 binaries, and a similar dataset with the categorical predictor variable transformed into 3 binaries.
```
# dataset for all three customers with the categorical predictor (category)
# transformed into 2 binaries
tiny_two_cat_dummies_df = pd.DataFrame({"IT": [0, 0, 1], "Stat": [1, 0, 0],
"years_since_first_contact": [1, 1.1, 1],
"course": [0, 1, None]})
tiny_two_cat_dummies_df
# dataset for all three customers with the categorical predictor (category)
# transformed into 3 binaries
tiny_all_cat_dummies_df = pd.DataFrame({"IT": [0, 0, 1], "Stat": [1, 0, 0],
"Other": [0, 1, 0], "years_since_first_contact": [1, 1.1, 1],
"course": [0, 1, None]})
tiny_all_cat_dummies_df
```
**b.** For each derived dataset, calculate the Euclidean distance between the prospect and each of the other two customers. (Note: While it is typical to normalize data for k-NN, this is not an iron-clad rule and you may proceed here without normalization.)
- Two categorical dummies (IT/Stat):
```
predictors = ["IT", "Stat", "years_since_first_contact"]
pd.DataFrame(euclidean_distances(tiny_two_cat_dummies_df[predictors],
tiny_two_cat_dummies_df[predictors]),
columns=["customer_1", "customer_2", "customer_3"],
index=["customer_1", "customer_2", "customer_3"])
```
- Three categorical dummies (IT/Stat/Other):
```
predictors = ["IT", "Stat", "Other", "years_since_first_contact"]
pd.DataFrame(euclidean_distances(tiny_all_cat_dummies_df[predictors],
tiny_all_cat_dummies_df[predictors]),
columns=["customer_1", "customer_2", "customer_3"],
index=["customer_1", "customer_2", "customer_3"])
```
We can already see the effect of using two/three dummy variables. For the two dummy variables dataset, the `customer_3` is nearer to `customer_2` than to `customer_1`. This happens because the variable `years_since_first_contact` are the same for the both customers. For the three dummy variables, we still see that the `customer_3` are nearer to `customer_1` than to `customer_2` though the distances are very close between all customers. This happens because the `Other` variable helps to discriminate each of the customers.
In contrast to the situation with statistical models such as regression, all *m* binaries should be created and
used with *k*-NN. While mathematically this is redundant, since *m* - 1 dummies contain the same information as *m* dummies, this redundant information does not create the multicollinearity problems that it does for linear models. Moreover, in *k*-NN the use of *m* - 1 dummies can yield different classifications than the use of *m* dummies, and lead to an imbalance in the contribution of the different categories to the model.
**c.** Using k-NN with k = 1, classify the prospect as taking or not taking a course using each of the two derived datasets. Does it make a difference whether you use two or three dummies?
- Two dummies variables (IT/Stat)
```
predictors = ["IT", "Stat", "years_since_first_contact"]
# user NearestNeighbors from scikit-learn to compute knn
knn = NearestNeighbors(n_neighbors=1)
knn.fit(tiny_two_cat_dummies_df.loc[:1, predictors])
new_customer = pd.DataFrame({"IT": [1], "Stat": [0],
"years_since_first_contact": [1]})
distances, indices = knn.kneighbors(new_customer)
# indices is a list of lists, we are only interested in the first element
tiny_two_cat_dummies_df.iloc[indices[0], :]
```
- Three dummies variable(IT/Stat/Other)
```
predictors = ["IT", "Stat", "Other", "years_since_first_contact"]
# user NearestNeighbors from scikit-learn to compute knn
knn = NearestNeighbors(n_neighbors=1)
knn.fit(tiny_all_cat_dummies_df.loc[:1, predictors])
new_customer = pd.DataFrame({"IT": [1], "Stat": [0], "Other": [1],
"years_since_first_contact": [1]})
distances, indices = knn.kneighbors(new_customer)
# indices is a list of lists, we are only interested in the first element
tiny_all_cat_dummies_df.iloc[indices[0], :]
```
If we use *k* = 1, the nearest customer is the one that took the course for both variables. Therefore, for this specific example there was no difference on using two or three categorical variable. Therefore, as indicated in the previous item (**b**), this redundant information does not create the multicollinearity problems that it does for linear models. Moreover, in *k*-NN the use of *m* - 1 dummies can yield different classifications than the use of *m* dummies, and lead to an imbalance in the contribution of the different categories to the model.
**2. Personal Loan Acceptance.** Universal Bank is a relatively young bank growing rapidly in terms of overall customer acquisition. The majority of these customers are liability customers (depositors) with varying sizes of relationship with the bank. The customer base of asset customers (borrowers) is quite small, and the bank is interested in expanding this base rapidly to bring in more loan business. In particular, it wants to explore ways of converting its liability customers to personal loan customers (while retaining them as depositors).
A campaign that the bank ran last year for liability customers showed a healthy conversion rate of over 9% success. This has encouraged the retail marketing department to devise smarter campaigns with better target marketing. The goal is to use *k*-NN to predict whether a new customer will accept a loan offer. This will serve as the basis for the design of a new campaign.
The file `UniversalBank.csv` contains data on 5000 customers. The data include customer demographic information (age, income, etc.), the customer's relationship with the bank (mortgage, securities account, etc.), and the customer response to the last personal loan campaign (Personal Loan). Among these 5000 customers, only 480 (=9.6%) accepted the personal loan that was offered to them in the earlier campaign.
Partition the data into training (60%) and validation (40%) sets.
**a.** Consider the following customer:
Age = 40, Experience = 10, Income = 84, Family = 2, CCAvg = 2, Education_1 = 0,
Education_2 = 1, Education_3 = 0, Mortgage = 0, Securities Account = 0, CDAccount = 0,
Online = 1, and Credit Card = 1.
Perform a *k*-NN classification with all predictors except ID and ZIP code using k = 1. Remember to transform categorical predictors with more than two categories into dummy variables first. Specify the success class as 1 (loan acceptance), and use the default cutoff value of 0.5. How would this customer be classified?
```
customer_df = pd.read_csv("../datasets/UniversalBank.csv")
customer_df.head()
# define predictors and the outcome for this problem
predictors = ["Age", "Experience", "Income", "Family", "CCAvg", "Education", "Mortgage",
"Securities Account", "CD Account", "Online", "CreditCard"]
outcome = "Personal Loan"
# before k-NN, we will convert 'Education' to binary dummies.
# 'Family' remains unchanged
customer_df = pd.get_dummies(customer_df, columns=["Education"], prefix_sep="_")
# update predictors to include the new dummy variables
predictors = ["Age", "Experience", "Income", "Family", "CCAvg", "Education_1",
"Education_2", "Education_3", "Mortgage",
"Securities Account", "CD Account", "Online", "CreditCard"]
# partition the data into training 60% and validation 40% sets
train_data, valid_data = train_test_split(customer_df, test_size=0.4,
random_state=26)
# equalize the scales that the various predictors have(standardization)
scaler = preprocessing.StandardScaler()
scaler.fit(train_data[predictors])
# transform the full dataset
customer_norm = pd.concat([pd.DataFrame(scaler.transform(customer_df[predictors]),
columns=["z"+col for col in predictors]),
customer_df[outcome]], axis=1)
train_norm = customer_norm.iloc[train_data.index]
valid_norm = customer_norm.iloc[valid_data.index]
# new customer
new_customer = pd.DataFrame({"Age": [40], "Experience": [10], "Income": [84], "Family": [2],
"CCAvg": [2], "Education_1": [0], "Education_2": [1],
"Education_3": [0], "Mortgage": [0], "Securities Account": [0],
"CDAccount": [0], "Online": [1], "Credit Card": [1]})
new_customer_norm = pd.DataFrame(scaler.transform(new_customer),
columns=["z"+col for col in predictors])
# use NearestNeighbors from scikit-learn to compute knn
# using all the dataset (training + validation sets) here!
knn = NearestNeighbors(n_neighbors=1)
knn.fit(customer_norm.iloc[:, 0:-1])
distances, indices = knn.kneighbors(new_customer_norm)
# indices is a list of lists, we are only interested in the first element
customer_norm.iloc[indices[0], :]
```
Since the closest customer did not accepted the loan (=0), we can estimate for the new customer a probability of 1 of being an non-borrower (and 0 for being a borrower). Using a simple majority rule is equivalent to setting the cutoff value to 0.5. In the above results, we see that the software assigned class non-borrower to this record.
**b.** What is a choice of *k* that balances between overfitting and ignoring the predictor information?
First, we need to remember that a balanced choice greatly depends on the nature of the data. The more complex and irregular the structure of the data, the lower the optimum value of *k*. Typically, values of *k* fall in the range of 1-20. We will use odd numbers to avoid ties.
If we choose *k* = 1, we will classify in a way that is very sensitive to the local characteristics of the training data. On the other hand, if we choose a large value of *k*, such as *k* = 14, we would simply predict the most frequent class in the dataset in all cases.
To find a balance, we examine the accuracy (of predictions in the validation set) that results from different choices of *k* between 1 and 14.
```
train_X = train_norm[["z"+col for col in predictors]]
train_y = train_norm[outcome]
valid_X = valid_norm[["z"+col for col in predictors]]
valid_y = valid_norm[outcome]
# Train a classifier for different values of k
results = []
for k in range(1, 15):
knn = KNeighborsClassifier(n_neighbors=k).fit(train_X, train_y)
results.append({"k": k,
"accuracy": accuracy_score(valid_y, knn.predict(valid_X))})
# Convert results to a pandas data frame
results = pd.DataFrame(results)
results
```
Based on the above table, we would choose **k = 3** (though **k = 5** appears to be another option too), which maximizes our accuracy in the validation set. Note, however, that now the validation set is used as part of the training process (to set *k*) and does not reflect a
true holdout set as before. Ideally, we would want a third test set to evaluate the performance of the method on data that it did not see.
**c.** Show the confusion matrix for the validation data that results from using the best *k*.
- k = 3
```
knn = KNeighborsClassifier(n_neighbors=3).fit(train_X, train_y)
classification_summary(y_true=valid_y, y_pred=knn.predict(valid_X))
```
- k = 5
```
knn = KNeighborsClassifier(n_neighbors=5).fit(train_X, train_y)
classification_summary(y_true=valid_y, y_pred=knn.predict(valid_X))
```
**d.** Consider the following customer:
Age = 40, Experience = 10, Income = 84, Family = 2, CCAvg = 2, Education_1 = 0,
Education_2 = 1, Education_3 = 0, Mortgage = 0, Securities Account = 0, CD Account = 0,
Online = 1 and Credit Card = 1.
Classify the customer using the best *k*.
Note: once *k* is chosen, we rerun the algorithm on the combined training and testing sets in order to generate classifications of new records.
```
# using the same user created before :)
knn = KNeighborsClassifier(n_neighbors=3).fit(customer_norm.iloc[:, 0:-1],
customer_norm.loc[:, "Personal Loan"])
knn.predict(new_customer_norm), knn.predict_proba(new_customer_norm)
knn = KNeighborsClassifier(n_neighbors=5).fit(customer_norm.iloc[:, 0:-1],
customer_norm.loc[:, "Personal Loan"])
knn.predict(new_customer_norm), knn.predict_proba(new_customer_norm)
```
Using the best *k* (=3) the user was classified as a **non-borrower**. Also with *k* = 5
**e**. Repartition the data, this time into training, validation, and test sets (50%:30%:20%). Apply the *k*-NN method with the *k* chosen above. Compare the confusion matrix of the test set with that of the training and validation sets. Comment on the differences and their reason.
```
# using the customer_norm computed earlier
# training: 50%
# validation: 30% (0.5 * 0.6)
# test: 20% (0.5 * 0.4)
train_data, temp = train_test_split(customer_df, test_size=0.50, random_state=1)
valid_data, test_data = train_test_split(temp, test_size=0.40, random_state=1)
train_norm = customer_norm.iloc[train_data.index]
valid_norm = customer_norm.iloc[valid_data.index]
test_norm = customer_norm.iloc[test_data.index]
train_X = train_norm[["z"+col for col in predictors]]
train_y = train_norm[outcome]
valid_X = valid_norm[["z"+col for col in predictors]]
valid_y = valid_norm[outcome]
test_X = test_norm[["z"+col for col in predictors]]
test_y = test_norm[outcome]
knn = KNeighborsClassifier(n_neighbors=3).fit(train_X, train_y)
print("Training set\n" + "*" * 12)
classification_summary(y_true=train_y, y_pred=knn.predict(train_X))
print("\nValidation set\n" + "*" * 14)
classification_summary(y_true=valid_y, y_pred=knn.predict(valid_X))
print("\nTest set\n" + "*" * 8)
classification_summary(y_true=test_y, y_pred=knn.predict(test_X))
```
Based on the training, validation, and test matrices we can see a steady increase in the percentage error from training set and validation/test sets. As the model is being fit on the training data it would make intuitive sense that the classifications are most accurate on it rather than validation/test datasets.
We can see also that there does not appear to be overfitting due to the minimal error discrepancies among all three matrices, and specially between validation and test sets.
**3. Predicting Housing Median Prices.** The file `BostonHousing.csv` contains information on over 500 census tracts in Boston, where for each tract multiple variables are recorded. The last column (`CAT.MEDV`) was derived from `MEDV`, such that it obtains the value 1 if `MEDV` > 30 and 0 otherwise. Consider the goal of predicting the median value (`MEDV`) of a tract, given the information in the first 12 columns.
Partition the data into training (60%) and validation (40%) sets.
**a.** Perform a *k*-NN prediction with all 12 predictors (ignore the `CAT.MEDV` column), trying values of *k* from 1 to 5. Make sure to normalize the data. What is the best *k*? What does it mean?
The idea of *k*-NN can readily be extended to predicting a continuous value (as is our aim with multiple linear regression models). The first step of determining neighbors by computing distances remains unchanged. The second step, where a majority vote of the neighbors is used to determine class, is modified such that we take the average outcome value of the *k*-nearest neighbors to determine the prediction. Often, this average is a weighted average, with the weight decreasing with increasing distance from the point at which the prediction is required. In `scikit-learn`, we can use `KNeighborsRegressor` to compute *k*-NN numerical predictions for the validation set.
Another modification is in the error metric used for determining the "best k". Rather than the overall error rate used in classification, RMSE (root-mean-squared error) or another prediction error metric should be used in prediction.
```
housing_df = pd.read_csv("../datasets/BostonHousing.csv")
housing_df.head()
# define predictors and the outcome for this problem
predictors = ["CRIM", "ZN", "INDUS", "CHAS", "NOX", "RM", "AGE",
"DIS", "RAD", "TAX", "PTRATIO", "LSTAT"]
outcome = "MEDV"
# partition the data into training 60% and validation 40% sets
train_data, valid_data = train_test_split(housing_df, test_size=0.4,
random_state=26)
# equalize the scales that the various predictors have(standardization)
scaler = preprocessing.StandardScaler()
scaler.fit(train_data[predictors])
# transform the full dataset
housing_norm = pd.concat([pd.DataFrame(scaler.transform(housing_df[predictors]),
columns=["z"+col for col in predictors]),
housing_df[outcome]], axis=1)
train_norm = housing_norm.iloc[train_data.index]
valid_norm = housing_norm.iloc[valid_data.index]
# Perform a k-NN prediction with all 12 predictors
# trying values of k from 1 to 5
train_X = train_norm[["z"+col for col in predictors]]
train_y = train_norm[outcome]
valid_X = valid_norm[["z"+col for col in predictors]]
valid_y = valid_norm[outcome]
# Train a classifier for different values of k
# Using weighted average
results = []
for k in range(1, 6):
knn = KNeighborsRegressor(n_neighbors=k, weights="distance").fit(train_X, train_y)
y_pred = knn.predict(valid_X)
y_res = valid_y - y_pred
results.append({"k": k,
"mean_error": sum(y_res) / len(y_res),
"rmse": math.sqrt(mean_squared_error(valid_y, y_pred)),
"mae": sum(abs(y_res)) / len(y_res)})
# Convert results to a pandas data frame
results = pd.DataFrame(results)
results
```
Using the RMSE (root mean squared errors) as the *k* decision driver, the best *k* is 4. We choose 4 as a way to minimize the errors found in the validation set. Note, however, that now the validation set is used as part of the training process (to set *k*) and does not reflect a true holdout set as before.
Note also that performance on validation data may be overly optimistic when it comes to predicting performance on data that have not been exposed to the model at all. This is because when the validation data are used to select a final model among a set of model, we are selecting based on how well the model performs with those data and therefore may be incorporating some of the random idiosyncrasies (bias) of the validation data into the judgment about the best model.
The model still may be the best for the validation data among those considered, but it will probably not do as well with the unseen data. Therefore, it is useful to evaluate the chosen model on a new test set to get a sense of how well it will perform on new data. In addition, one must consider practical issues such as costs of collecting variables, error-proneness, and model complexity in the selection of the final model.
**b.** Predict the `MEDV` for a tract with the following information, using the best *k*:
CRIM: 0.2
ZN: 0
INDUS: 7
CHAS: 0
NOX: 0.538
RM: 6
AGE: 62
DIS: 4.7
RAD: 4
TAX: 307
PTRATIO: 21
LSTAT: 10
Once *k* is chosen, we rerun the algorithm on the combined training and testing sets in order to generate classifications of new records.
```
# new house to be predicted. Before predicting the MEDV we normalize it
new_house = pd.DataFrame({"CRIM": [0.2], "ZN": [0], "INDUS": [7], "CHAS": [0],
"NOX": [0.538], "RM": [6], "AGE": [62], "DIS": [4.7],
"RAD": [4], "TAX": [307], "PTRATIO": [21], "LSTAT": [10]})
new_house_norm = pd.DataFrame(scaler.transform(new_house),
columns=["z"+col for col in predictors])
# retrain the knn using the best k and all data
knn = KNeighborsRegressor(n_neighbors=4, weights="distance").fit(housing_norm[["z"+col for col in predictors]],
housing_norm[outcome])
knn.predict(new_house_norm)
```
The new house has a predicted value of 19.6 (in \\$1000s)
**c.** If we used the above *k*-NN algorithm to score the training data, what would be the error of the training set?
It would be zero or near zero. This happens because the best *k* was selected from a model built using such dataset. Therefore, we have used the same data for fitting the classification functions and for estimating the error.
```
# Using the previous trained model (all data, k=5)
y_pred = knn.predict(train_X)
y_res = train_y - y_pred
results = {"k": 4,
"mean_error": sum(y_res) / len(y_res),
"rmse": math.sqrt(mean_squared_error(train_y, y_pred)),
"mae": sum(abs(y_res)) / len(y_res)}
# Convert results to a pandas data frame
results = pd.DataFrame(results, index=[0])
results
```
**d.** Why is the validation data error overly optimistic compared to the error rate when applying this *k*-NN predictor to new data?
When we use the validation data to assess multiple models and then choose the model that performs best with the validation data, we again encounter another (lesser) facet of the overfitting problem - chance aspects of the validation data that happen to match the chosen model better than they match other models. In other words, by using the validation data to choose one of several models, the performance of the chosen model on the validation data will be overly optimistic.
In other words, chances are that the training/validation sets can be biased, so cross-validation would give a better approximation in this scenario.
**e.** If the purpose is to predict `MEDV` for several thousands of new tracts, what would be the disadvantage of using *k*-NN prediction? List the operations that the algorithm goes through in order to produce each prediction.
The disadvantage of the *k*-NN in this case would be it's laziness characteristic meaning that it would take too much time to predict all the cases.
Basically, the algorithm would need to perform the following operations repeatedly for each case to predict the `MEDV` value for them:
- Normalize the data of each new variable for each case based on the mean and standard deviation in training data set;
- Calculate the distance of this case from all the training data;
- Sorting the new data based on the calculated distances;
- Use the majority rule on the first *k* nearest neighbors to predict the new case;
And as mentioned this process would be repeated for each of the thousands of new cases which would be computationally expensive and time consuming.
| true |
code
| 0.610395 | null | null | null | null |
|
```
%matplotlib inline
import control
from control.matlab import *
import numpy as np
import matplotlib.pyplot as plt
def pole_plot(poles, title='Pole Map'):
plt.title(title)
plt.scatter(np.real(poles), np.imag(poles), s=50, marker='x')
plt.axhline(y=0, color='black');
plt.axvline(x=0, color='black');
plt.xlabel('Re');
plt.ylabel('Im');
```
# Tune Those Gains!
State space control requires that you fill in up to three gain matrices (K, L & I), each potentially containing a number of elements. Given the heurism that goes into selecting PID gains (of which there are only three) tuning a state space controller can seem a bit daunting. Fortunately, the state space control framework includes a formal way to calculate gains to arrive at what is called a Linear Quadratic Regulator as well as a Linear Quadratic Estimator.
The goal of this notebook will be to walk you through the steps necessary to formulate the gains for an LQR and LQE. Once we've arrived at some acceptable gains, you can cut and paste them directly into your arduino sketch and start controlling some cool, complex dynamic systems with hopefully less guesswork and gain tweaking than if you were use a PID controller.
We'll be working from the model for the cart pole system from the examples folder. This system has multiple outputs and is inherently unstable so it's a nice way of showing the power of state space control. Having said that, **this analysis can apply to any state space model**, so to adapt it your system, just modify the system matricies ($A,B,C,D$) and the and cost function weighting matrices ($ Q_{ctrl}, R_{ctrl}, Q_{est}, R_{est} $ ) and rerun the notebook.
## Cart Pole
You can find the details on the system modelling for the inverted pendulum [here](http://ctms.engin.umich.edu/CTMS/index.php?example=InvertedPendulum§ion=SystemModeling) but by way of a quick introduction, the physical system consists of a pendulum mounted on top of a moving cart shown below. The cart is fitted with two sensors that measure the angle of the stick and the displacement of the cart. The cart also has an actuator to apply a horizontal force on the cart to drive it forwards and backwards. The aim of the controller is to manipulate the force on the cart in order to balance the stick upright.

The state for this system is defined as:
\begin{equation}
\mathbf{x} = [ cart \;displacement, \; cart \;velocity, \; stick \;angle, \; stick \;angular\; velocity ]^T
\end{equation}
and the state space model that describes this system is as follows:
```
A = [[0.0, 1.0, 0.0, 0.0 ],
[0.0, -0.18, 2.68, 0.0 ],
[0.00, 0.00, 0.00, 1.00],
[0.00, -0.45, 31.21, 0.00]]
B = [[0.00],
[1.82],
[0.00],
[4.55]]
C = [[1, 0, 0, 0],[0,0,1,0]]
D = [[0],[0]]
```
## Take a look at the Open Loop System Poles
To get an idea of how this system behaves before we add any feedback control we can look at the poles of the open loop (uncontrolled) system. Poles are the roots of a characteristic equation derived from the system model. They're often complex numbers (having a real and an imaginary component) and are useful in understanding how the output of a system will respond to changes in its input.
There's quite a bit of interesting information that can be gleaned from looking at system poles but for now we'll focus on stability; which is determined by the pole with the largest real component (the x-axis in the plot below). If a system has any poles with positive real components then that system will be inherently unstable (i.e some or all of the state will shoot off to infinity if the system is left to its own devices).
The inverted pendulum has a pole at Re(+5.56) which makes sense when you consider that if the stick is stood up on its end and let go it'd fall over (obviously it'd stop short of infinity when it hits the side of the cart, but this isn't taken into account by the model). Using a feedback controller we'll move this pole over to the left of the imaginary axis in the plot below and in so doing, stabilise the system.
```
plant = ss(A, B, C, D)
open_loop_poles = pole(plant)
print '\nThe poles of the open loop system are:\n'
print open_loop_poles
pole_plot(open_loop_poles, 'Open Loop Poles')
```
# Design a Control Law
With a model defined, we can get started on calculating the gain matrix K (the control law) which determines the control input necessary to regulate the system state to $ \boldsymbol{0} $ (all zeros, you might want to control it to other set points but to do so just requires offsetting the control law which can be calculated on the arduino).
## Check for Controllability
For it to be possible to control a system defined by a given state space model, that model needs to be controllable. Being controllable simply means that the available set of control inputs are capable of driving the entire state to a desired set point. If there's a part of the system that is totally decoupled from the actuators that are manipulated by the controller then a system won't be controllable.
A system is controllable if the rank of the controllability matrix is the same as the number of states in the system model.
```
controllability = ctrb(A, B)
print 'The controllability matrix is:\n'
print controllability
if np.linalg.matrix_rank(controllability) == np.array(B).shape[0]:
print '\nThe system is controllable!'
else:
print '\nThe system is not controllable, double-check your modelling and that you entered the system matrices correctly'
```
## Fill out the Quadratic Cost Function
Assuming the system is controllable, we can get started on calculating the control gains. The approach we take here is to calculate a Linear Quadratic Regulator. An LQR is basically a control law (K matrix) that minimises the quadratic cost function:
\begin{equation}
J = \int_0^\infty (\boldsymbol{x}' Q \boldsymbol{x} + \boldsymbol{u}' R \boldsymbol{u})\; dt
\end{equation}
The best way to think about this cost function is to realise that whenever we switch on the state space controller, it'll expend some amount of control effort to bring the state to $ \boldsymbol{0} $ (all zeros). Ideally it'll do that as quickly and with as little overshoot as possible. We can represent that with the expression $ \int_0^\infty \boldsymbol{x}' \;\boldsymbol{x} \; dt $. Similarly it's probably a good idea to keep the control effort to a minimum such that the controller is energy efficient and doesn't damage the system with overly aggressive control inputs. This total control effort can be represented with $ \int_0^\infty \boldsymbol{u}' \;\boldsymbol{u} \; dt $.
Inevitably, there'll be some parts of the state and some control inputs that we care about minimising more than others. To reflect that in the cost function we specify two matrices; $ Q $ and $ R $
$ Q_{ctrl} \in \mathbf{R}^{X \;\times\; X} $ is the state weight matrix; the elements on its diagonal represent how important it is to tighly control the corresponding state element (as in Q[0,0] corresponds to x[0]).
$ R_{ctrl} \in \mathbf{R}^{U \;\times\; U} $ is the input weight matrix; the elements on its diagonal represent how important it is to minimise the use of the corresponding control input (as in R[0,0] corresponds to u[0]).
```
Q_ctrl = [[5000, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 100, 0],
[0, 0, 0, 0]]
R_ctrl = [1]
```
## Calculate the Gain Matrix for a Linear Quadratic Regulator
With a cost function defined, the cell below will calculate the gain matrix K for an LQR. Bear in mind it usually takes a bit of tweaking of the cost function to arrive at a good K matrix. Also note that it's the relative value of each weight that's important, not their absolute values. You can multiply both $ Q_{ctrl} $ and $ R_{ctrl} $ by 1e20 and you'll still wind up with the same gains.
To guide your tweaking it's helpful to see the effect that different weightings have on the closed loop system poles. Remember that the further the dominant pole (largest real component) is to the left of the Im axis, the more stable your system will be. That said, don't get carried away and set ridiculously high gains; your actual system might not behave in exactly the same way as the model and an overly aggressive controller might just end up destabilising the system under realistic conditions.
```
K, _, _ = lqr(A, B, Q_ctrl, R_ctrl)
print 'The control gain is:\n'
print 'K = ', K
plant_w_full_state_feedback = ss(A,
B,
np.identity(plant.states),
np.zeros([plant.states, plant.inputs]))
controller = ss(np.zeros([plant.states, plant.states]),
np.zeros([plant.states, plant.states]),
np.zeros([plant.inputs, plant.states]),
K)
closed_loop_system = feedback(plant_w_full_state_feedback, controller)
closed_loop_poles = pole(closed_loop_system)
print '\nThe poles of the closed loop system are:\n'
print closed_loop_poles
pole_plot(closed_loop_poles, 'Closed Loop Poles')
```
# Design an Estimator
If you're lucky enough to be able to directly observe the system's entire state (in which case the $ C $ matrix will be an identity matrix) then you're done!
This obviously isn't the case for the cart pole since given our sensors we aren't able to directly observe the cart velocity or the stick angular velocity (we can differentiate the sensor readings ofcourse, but doing so is a bad idea if the sensors are even a little bit noisy). Because of this, we'll need to introduce an estimator into the feedback controller to reconstruct those parts of the state based on our sensor readings $ \mathbf{y} $.
There's a nice duality between the estimator and the controller so the basic approach we take to calculate the estimator gain ($ L $ matrix) are very similar those for the control law above.
## Check for Observability
Observability tells us whether the sensors we've attached to our system (as defined by the C matrix) are sufficient to derive an estimate of the state to feed into our control law. If for example, a part of the state was completely decoupled from all of the sensor measurements we take, then the system won't be observable and it'll be impossible to estimate and ultimately, to control.
Similar to controllability, a system is observable if the rank of the observability matrix is the same as the number of states in the model.
```
observability = obsv(A, C)
print 'The observability matrix is:\n'
print observability
if np.linalg.matrix_rank(observability) == plant.states:
print '\nThe system is observable!'
else:
print '\nThe system is not observable, double-check your modelling and that you entered the matrices correctly'
```
## Fill out the Noise Covariance Matrices
To calculate the estimator gain L, we can use the same algorithm as that used to calculate the control law. Again we define two matrices, $ Q $ and $ R $ however in this case their interpretations are slightly different.
$ Q_{est} \in \mathbf{R}^{X \;\times\; X} $ is referred to as the process noise covariance, it represents the accuracy of the state space model in being able to predict the next state based on the last state and the control input. It's assumed that the actual system is subject to some unknown noise which throws out the estimate of the state and in cases where that noise is very high, it's best to rely more heavily on the sensor readings.
$ R_{est} \in \mathbf{R}^{Y \;\times\; Y} $ is referred to as the sensor noise covariance, it represents the accuracy of the sensor readings in being able to observe the state. Here again, it's assumed that the actual sensors are subject to some unknown noise which throws out their measurements. In cases where this noise is very high, it's best to be less reliant on sensor readings.
```
Q_est = [[100, 0, 0, 0 ],
[0, 1000, 0, 0 ],
[0, 0, 100, 0 ],
[0, 0, 0, 10000]]
R_est = [[1,0],[0,1]]
```
## Calculate the Gain Matrix for a Linear Quadratic Estimator (aka Kalman Filter)
Ideally, the estimator's covariance matrices can be calculated empirically using data collected from the system's actual sensors and its model. Doing so is a bit outside of the scope of this notebook, so instead we can just tweak the noise values to come up with an estimator that converges on the actual state with a reasonable settling time based on the closed loop poles.
```
L, _, _ = lqr(np.array(A).T, np.array(C).T, Q_est, R_est)
L = L.T
print 'The estimator gain is:\n'
print 'L = ', L
controller_w_estimator = ss(A - np.matmul(B , K) - np.matmul(L , C),
L,
K,
np.zeros([plant.inputs, plant.outputs]))
closed_loop_system_w_estimator = feedback(plant, controller_w_estimator)
closed_loop_estimator_poles = pole(closed_loop_system_w_estimator)
print '\nThe poles of the closed loop system are:\n'
print closed_loop_estimator_poles
pole_plot(closed_loop_estimator_poles, 'Closed Loop Poles with Estimation')
```
# And You're Done!
Congratulations! You've tuned an LQR and an LQE to suit your system model and you can now cut and paste the gains into your arduino sketch.
## Coming soon, Integral Gain selection!
| true |
code
| 0.411761 | null | null | null | null |
|
# Team Surface Velocity
### **Members**: Grace Barcheck, Canyon Breyer, Rodrigo Gómez-Fell, Trevor Hillebrand, Ben Hills, Lynn Kaluzienski, Joseph Martin, David Polashenski
### **Science Advisor**: Daniel Shapero
### **Special Thanks**: Ben Smith, David Shean
### Motivation
**Speaker: Canyon Breyer**
Previous work by Marsh and Rack (2012), and Lee and others (2012), have demonstrated the value of using satellite altimetry as a method of calculating ice surface velocity utilizing the Geoscience Laser Altimeter System (GLAS) on board ICESat. This altimetry method has several advantages over more traditional techniques due to high pointing accuracy for geo-location and an ability to measure velocity in regions that lack visible surface features (Marsh and Rack, 2012). The method also has the added benefit of dealing with tidal fluctuations without the need for a tidal correction model. The motivation for this project was to expand the methodology outlined in Marsh and Rack (2012) to the ICE-Sat2 dataset. The smaller footprint of the ICE-Sat2 mission will likely improve the overall accuracy of velocity measurements and the nature of its precise repeat passes would provide an avenue for studying temporal variations of glacier velocities.
### Project Objective:
**Speaker: Rodrigo Gómez-Fell**
Extract surface ice velocity on polar regions from ICESat-2 along track measurements
##### Goals:
- Compare the capabilities of ICESat-2 to extract surface ice velocity from ice shelves and ice streams
- Compare ICESat GLAS methodology (along track) to ICESat-2 across track
- Use crossovers for calculating velocities and determine how the measurements compare with simple along track and across track.
-Does this resolve different directions of ice flow?
- Can a surface velocity product be extracted from ATL06, or is ATL03 the more suitable product.
### Study Area:
When looking for a study region to test our ICESat-2 velocity derivation method, we prioritized regions that **1)** included both grounded and floating ice and **2)** had a good alignment between satellite track position and overall flow direction. We found Foundation Ice Stream, a large ice stream draining into the Filchner-Ronne Ice Shelf, to meet both of these criteria.

### Data Selection
We used the ICESat-2 Land Ice Height ATL06 product and then used the MEaSUREs Antarctic Velocity Map V2 (Rignot, 2017) for validation of our derived velocities
### Method
**Speaker: Ben Hills**
Following Marsh and Rack (2012) we used the slope of elevation for analysis, this helped amplify differences in the ice profile between repeat measurements and also removed the influence of tidal effects. This is portrayed in the figure below.


Fig.2: From Marsh and Rack 2012. Schematic of the method used to reduce the effect of oblique surface features and ice flow which is non-parallel to ICESat tracks. Black lines indicate satellite tracks, grey ticks indicate the orientation of surface features, and ⍺ is the feature-track angle. Bottom right profile illustrates that after adjustment there is no relative feature displacement due to cross-track separation, therefore all displacement is due to ice movement in the track direction.
### Our Methods:
**Cross-correlation background**
**Speaker: Grace Barcheck**
##### Test.scipy.signal.correlate on some ATL06 data from Foundation Ice Stream (FIS)
```
import numpy as np
import scipy, sys, os, pyproj, glob, re, h5py
import matplotlib.pyplot as plt
from scipy.signal import correlate
from astropy.time import Time
%matplotlib widget
%load_ext autoreload
%autoreload 2
```
##### Test scipy.signal.correlate
Generate test data
```
dx = 0.1
x = np.arange(0,10,dx)
y = np.zeros(np.shape(x))
ix0 = 30
ix1 = 30 + 15
y[ix0:ix1] = 1
fig,axs = plt.subplots(1,2)
axs[0].plot(x,y,'k')
axs[0].set_xlabel('distance (m)')
axs[0].set_ylabel('value')
axs[1].plot(np.arange(len(x)), y,'k')
axs[1].set_xlabel('index')
```
Next, we generate a signal to correlate the test data with
```
imposed_offset = int(14/dx) # 14 meters, in units of samples
x_noise = np.arange(0,50,dx) # make the vector we're comparing with much longer
y_noise = np.zeros(np.shape(x_noise))
y_noise[ix0 + imposed_offset : ix1 + imposed_offset] = 1
# uncomment the line below to add noise
# y_noise = y_noise * np.random.random(np.shape(y_noise))
fig,axs = plt.subplots(1,2)
axs[0].plot(x,y,'k')
axs[0].set_xlabel('distance (m)')
axs[0].set_ylabel('value')
axs[1].plot(np.arange(len(x)), y, 'k')
axs[1].set_xlabel('index')
axs[0].plot(x_noise,y_noise, 'b')
axs[0].set_xlabel('distance (m)')
axs[0].set_ylabel('value')
axs[1].plot(np.arange(len(x_noise)), y_noise,'b')
axs[1].set_xlabel('index')
fig.suptitle('black = original, blue = shifted')
```
##### Try scipy.signal.correlate:
mode ='full' returns the entire cross correlation; could be 'valid' to return only non- zero-padded part
method = direct (not fft)
```
corr = correlate(y_noise,y, mode = 'full', method = 'direct')
norm_val = np.sqrt(np.sum(y_noise**2)*np.sum(y**2))
corr = corr / norm_val
```
Let's look at the dimensions of corr
```
print('corr: ', np.shape(corr))
print('x: ', np.shape(x))
print('x: ', np.shape(x_noise))
```
##### Look at the correlation visualized in the plots below
```
# lagvec = np.arange(0,len(x_noise) - len(x) + 1)
lagvec = np.arange( -(len(x) - 1), len(x_noise), 1)
shift_vec = lagvec * dx
ix_peak = np.arange(len(corr))[corr == np.nanmax(corr)][0]
best_lag = lagvec[ix_peak]
best_shift = shift_vec[ix_peak]
fig,axs = plt.subplots(3,1)
axs[0].plot(lagvec,corr)
axs[0].plot(lagvec[ix_peak],corr[ix_peak], 'r*')
axs[0].set_xlabel('lag (samples)')
axs[0].set_ylabel('correlation coefficient')
axs[1].plot(shift_vec,corr)
axs[1].plot(shift_vec[ix_peak],corr[ix_peak], 'r*')
axs[1].set_xlabel('shift (m)')
axs[1].set_ylabel('correlation coefficient')
axs[2].plot(x + best_shift, y,'k')
axs[2].plot(x_noise, y_noise, 'b--')
axs[2].set_xlabel('shift (m)')
fig.suptitle(' '.join(['Shift ', str(best_lag), ' samples, or ', str(best_shift), ' m to line up signals']))
```
### A little Background on cross-correlation...

### Applying our method to ATL06 data
**Speaker: Ben Hills**
Load repeat data:
Import readers, etc.
```
# ! cd ..; [ -d pointCollection ] || git clone https://www.github.com/smithB/pointCollection.git
# sys.path.append(os.path.join(os.getcwd(), '..'))
#!python3 -m pip install --user git+https://github.com/tsutterley/pointCollection.git@pip
import pointCollection as pc
moa_datapath = '/srv/tutorial-data/land_ice_applications/'
datapath = '/home/jovyan/shared/surface_velocity/FIS_ATL06/'
```
#### **Geographic setting: Foundation Ice Stream**
```
print(pc.__file__)
spatial_extent = np.array([-65, -86, -55, -81])
lat=spatial_extent[[1, 3, 3, 1, 1]]
lon=spatial_extent[[2, 2, 0, 0, 2]]
print(lat)
print(lon)
# project the coordinates to Antarctic polar stereographic
xy=np.array(pyproj.Proj(3031)(lon, lat))
# get the bounds of the projected coordinates
XR=[np.nanmin(xy[0,:]), np.nanmax(xy[0,:])]
YR=[np.nanmin(xy[1,:]), np.nanmax(xy[1,:])]
MOA=pc.grid.data().from_geotif(os.path.join(moa_datapath, 'MOA','moa_2009_1km.tif'), bounds=[XR, YR])
# show the mosaic:
plt.figure()
MOA.show(cmap='gray', clim=[14000, 17000])
plt.plot(xy[0,:], xy[1,:])
plt.title('Mosaic of Antarctica for Foundation Ice Stream')
```
##### Load the repeat track data
ATL06 reader
```
def atl06_to_dict(filename, beam, field_dict=None, index=None, epsg=None):
"""
Read selected datasets from an ATL06 file
Input arguments:
filename: ATl06 file to read
beam: a string specifying which beam is to be read (ex: gt1l, gt1r, gt2l, etc)
field_dict: A dictinary describing the fields to be read
keys give the group names to be read,
entries are lists of datasets within the groups
index: which entries in each field to read
epsg: an EPSG code specifying a projection (see www.epsg.org). Good choices are:
for Greenland, 3413 (polar stereographic projection, with Greenland along the Y axis)
for Antarctica, 3031 (polar stereographic projection, centered on the Pouth Pole)
Output argument:
D6: dictionary containing ATL06 data. Each dataset in
dataset_dict has its own entry in D6. Each dataset
in D6 contains a numpy array containing the
data
"""
if field_dict is None:
field_dict={None:['latitude','longitude','h_li', 'atl06_quality_summary'],\
'ground_track':['x_atc','y_atc'],\
'fit_statistics':['dh_fit_dx', 'dh_fit_dy']}
D={}
# below: file_re = regular expression, it will pull apart the regular expression to get the information from the filename
file_re=re.compile('ATL06_(?P<date>\d+)_(?P<rgt>\d\d\d\d)(?P<cycle>\d\d)(?P<region>\d\d)_(?P<release>\d\d\d)_(?P<version>\d\d).h5')
with h5py.File(filename,'r') as h5f:
for key in field_dict:
for ds in field_dict[key]:
if key is not None:
ds_name=beam+'/land_ice_segments/'+key+'/'+ds
else:
ds_name=beam+'/land_ice_segments/'+ds
if index is not None:
D[ds]=np.array(h5f[ds_name][index])
else:
D[ds]=np.array(h5f[ds_name])
if '_FillValue' in h5f[ds_name].attrs:
bad_vals=D[ds]==h5f[ds_name].attrs['_FillValue']
D[ds]=D[ds].astype(float)
D[ds][bad_vals]=np.NaN
D['data_start_utc'] = h5f['/ancillary_data/data_start_utc'][:]
D['delta_time'] = h5f['/' + beam + '/land_ice_segments/delta_time'][:]
D['segment_id'] = h5f['/' + beam + '/land_ice_segments/segment_id'][:]
if epsg is not None:
xy=np.array(pyproj.proj.Proj(epsg)(D['longitude'], D['latitude']))
D['x']=xy[0,:].reshape(D['latitude'].shape)
D['y']=xy[1,:].reshape(D['latitude'].shape)
temp=file_re.search(filename)
D['rgt']=int(temp['rgt'])
D['cycle']=int(temp['cycle'])
D['beam']=beam
return D
```
##### Next we will read in the files
```
# find all the files in the directory:
# ATL06_files=glob.glob(os.path.join(datapath, 'PIG_ATL06', '*.h5'))
rgt = '0848'
ATL06_files=glob.glob(os.path.join(datapath, '*' + rgt + '*.h5'))
D_dict={}
error_count=0
for file in ATL06_files[:10]:
try:
D_dict[file]=atl06_to_dict(file, '/gt2l', index=slice(0, -1, 25), epsg=3031)
except KeyError as e:
print(f'file {file} encountered error {e}')
error_count += 1
print(f"read {len(D_dict)} data files of which {error_count} gave errors")
# find all the files in the directory:
# ATL06_files=glob.glob(os.path.join(datapath, 'PIG_ATL06', '*.h5'))
rgt = '0537'
ATL06_files=glob.glob(os.path.join(datapath, '*' + rgt + '*.h5'))
#D_dict={}
error_count=0
for file in ATL06_files[:10]:
try:
D_dict[file]=atl06_to_dict(file, '/gt2l', index=slice(0, -1, 25), epsg=3031)
except KeyError as e:
print(f'file {file} encountered error {e}')
error_count += 1
print(f"read {len(D_dict)} data files of which {error_count} gave errors")
```
##### Then, we will plot the ground tracks
```
plt.figure(figsize=[8,8])
hax0=plt.gcf().add_subplot(211, aspect='equal')
MOA.show(ax=hax0, cmap='gray', clim=[14000, 17000]);
hax1=plt.gcf().add_subplot(212, aspect='equal', sharex=hax0, sharey=hax0)
MOA.show(ax=hax1, cmap='gray', clim=[14000, 17000]);
for fname, Di in D_dict.items():
cycle=Di['cycle']
if cycle <= 2:
ax=hax0
else:
ax=hax1
#print(fname)
#print(f'\t{rgt}, {cycle}, {region}')
ax.plot(Di['x'], Di['y'])
if True:
try:
if cycle < 3:
ax.text(Di['x'][0], Di['y'][0], f"rgt={Di['rgt']}, cyc={cycle}", clip_on=True)
elif cycle==3:
ax.text(Di['x'][0], Di['y'][0], f"rgt={Di['rgt']}, cyc={cycle}+", clip_on=True)
except IndexError:
pass
hax0.set_title('cycles 1 and 2');
hax1.set_title('cycle 3+');
# find all the files in the directory:
# ATL06_files=glob.glob(os.path.join(datapath, 'PIG_ATL06', '*.h5'))
rgt = '0848'
ATL06_files=glob.glob(os.path.join(datapath, '*' + rgt + '*.h5'))
D_dict={}
error_count=0
for file in ATL06_files[:10]:
try:
D_dict[file]=atl06_to_dict(file, '/gt2l', index=slice(0, -1, 25), epsg=3031)
except KeyError as e:
print(f'file {file} encountered error {e}')
error_count += 1
print(f"read {len(D_dict)} data files of which {error_count} gave errors")
```
##### Repeat track elevation profile
```
# A revised code to plot the elevations of segment midpoints (h_li):
def plot_elevation(D6, ind=None, **kwargs):
"""
Plot midpoint elevation for each ATL06 segment
"""
if ind is None:
ind=np.ones_like(D6['h_li'], dtype=bool)
# pull out heights of segment midpoints
h_li = D6['h_li'][ind]
# pull out along track x coordinates of segment midpoints
x_atc = D6['x_atc'][ind]
plt.plot(x_atc, h_li, **kwargs)
```
**Data Visualization**
```
D_2l={}
D_2r={}
# specify the rgt here:
rgt="0027"
rgt="0848" #Ben's suggestion
# iterate over the repeat cycles
for cycle in ['03','04','05','06','07']:
for filename in glob.glob(os.path.join(datapath, f'*ATL06_*_{rgt}{cycle}*_003*.h5')):
try:
# read the left-beam data
D_2l[filename]=atl06_to_dict(filename,'/gt2l', index=None, epsg=3031)
# read the right-beam data
D_2r[filename]=atl06_to_dict(filename,'/gt2r', index=None, epsg=3031)
# plot the locations in the previous plot
map_ax.plot(D_2r[filename]['x'], D_2r[filename]['y'],'k');
map_ax.plot(D_2l[filename]['x'], D_2l[filename]['y'],'k');
except Exception as e:
print(f'filename={filename}, exception={e}')
plt.figure();
for filename, Di in D_2l.items():
#Plot only points that have ATL06_quality_summary==0 (good points)
hl=plot_elevation(Di, ind=Di['atl06_quality_summary']==0, label=f"cycle={Di['cycle']}")
#hl=plt.plot(Di['x_atc'][Di['atl06_quality_summary']==0], Di['h_li'][Di['atl06_quality_summary']==0], '.', label=f"cycle={Di['cycle']}")
plt.legend()
plt.xlabel('x_atc')
plt.ylabel('elevation');
```
##### Now, we need to pull out a segment and cross correlate:
Let's try 2.93e7 through x_atc=2.935e7
```
cycles = [] # names of cycles with data
for filename, Di in D_2l.items():
cycles += [str(Di['cycle']).zfill(2)]
cycles.sort()
# x1 = 2.93e7
# x2 = 2.935e7
beams = ['gt1l','gt1r','gt2l','gt2r','gt3l','gt3r']
# try and smooth without filling nans
smoothing_window_size = int(np.round(60 / dx)) # meters / dx; odd multiples of 20 only! it will break
filt = np.ones(smoothing_window_size)
smoothed = True
### extract and plot data from all available cycles
fig, axs = plt.subplots(4,1)
x_atc = {}
h_li_raw = {}
h_li = {}
h_li_diff = {}
times = {}
for cycle in cycles:
# find Di that matches cycle:
Di = {}
x_atc[cycle] = {}
h_li_raw[cycle] = {}
h_li[cycle] = {}
h_li_diff[cycle] = {}
times[cycle] = {}
filenames = glob.glob(os.path.join(datapath, f'*ATL06_*_{rgt}{cycle}*_003*.h5'))
for filename in filenames:
try:
for beam in beams:
Di[filename]=atl06_to_dict(filename,'/'+ beam, index=None, epsg=3031)
times[cycle][beam] = Di[filename]['data_start_utc']
# extract h_li and x_atc for that section
x_atc_tmp = Di[filename]['x_atc']
h_li_tmp = Di[filename]['h_li']#[ixs]
# segment ids:
seg_ids = Di[filename]['segment_id']
# print(len(seg_ids), len(x_atc_tmp))
# make a monotonically increasing x vector
# assumes dx = 20 exactly, so be carefull referencing back
ind = seg_ids - np.nanmin(seg_ids) # indices starting at zero, using the segment_id field, so any skipped segment will be kept in correct location
x_full = np.arange(np.max(ind)+1) * 20 + x_atc_tmp[0]
h_full = np.zeros(np.max(ind)+1) + np.NaN
h_full[ind] = h_li_tmp
x_atc[cycle][beam] = x_full
h_li_raw[cycle][beam] = h_full
# running average smoother /filter
if smoothed == True:
h_smoothed = (1/smoothing_window_size) * np.convolve(filt, h_full, mode="same")
#h_smoothed = h_smoothed[int(np.floor(smoothing_window_size/2)):int(-np.floor(smoothing_window_size/2))] # cut off ends
h_li[cycle][beam] = h_smoothed
# # differentiate that section of data
h_diff = (h_smoothed[1:] - h_smoothed[0:-1]) / (x_full[1:] - x_full[0:-1])
else:
h_li[cycle][beam] = h_full
h_diff = (h_full[1:] - h_full[0:-1]) / (x_full[1:] - x_full[0:-1])
h_li_diff[cycle][beam] = h_diff
# plot
axs[0].plot(x_full, h_full)
axs[1].plot(x_full[1:], h_diff)
# axs[2].plot(x_atc_tmp[1:] - x_atc_tmp[:-1])
axs[2].plot(np.isnan(h_full))
axs[3].plot(seg_ids[1:]- seg_ids[:-1])
except:
print(f'filename={filename}, exception={e}')
```
**Speaker: Grace Barcheck**
```
n_veloc = len(cycles) - 1
segment_length = 3000 # m
x1 = 2.935e7# 2.925e7#x_atc[cycles[0]][beams[0]][1000] <-- the very first x value in a file; doesn't work, I think b/c nans # 2.93e7
#x1=2.917e7
search_width = 800 # m
dx = 20 # meters between x_atc points
for veloc_number in range(n_veloc):
cycle1 = cycles[veloc_number]
cycle2 = cycles[veloc_number+1]
t1_string = times[cycle1]['gt1l'][0].astype(str) #figure out later if just picking hte first one it ok
t1 = Time(t1_string)
t2_string = times[cycle2]['gt1l'][0].astype(str) #figure out later if just picking hte first one it ok
t2 = Time(t2_string)
dt = (t2 - t1).jd # difference in julian days
velocities = {}
for beam in beams:
fig1, axs = plt.subplots(4,1)
# cut out small chunk of data at time t1 (first cycle)
x_full_t1 = x_atc[cycle1][beam]
ix_x1 = np.arange(len(x_full_t1))[x_full_t1 >= x1][0]
ix_x2 = ix_x1 + int(np.round(segment_length/dx))
x_t1 = x_full_t1[ix_x1:ix_x2]
h_li1 = h_li_diff[cycle1][beam][ix_x1-1:ix_x2-1] # start 1 index earlier because the data are differentiated
# cut out a wider chunk of data at time t2 (second cycle)
x_full_t2 = x_atc[cycle2][beam]
ix_x3 = ix_x1 - int(np.round(search_width/dx)) # offset on earlier end by # indices in search_width
ix_x4 = ix_x2 + int(np.round(search_width/dx)) # offset on later end by # indices in search_width
x_t2 = x_full_t2[ix_x3:ix_x4]
h_li2 = h_li_diff[cycle2][beam][ix_x3:ix_x4]
# plot data
axs[0].plot(x_t2, h_li2, 'r')
axs[0].plot(x_t1, h_li1, 'k')
axs[0].set_xlabel('x_atc (m)')
# correlate old with newer data
corr = correlate(h_li1, h_li2, mode = 'valid', method = 'direct')
norm_val = np.sqrt(np.sum(h_li1**2)*np.sum(h_li2**2)) # normalize so values range between 0 and 1
corr = corr / norm_val
# lagvec = np.arange( -(len(h_li1) - 1), len(h_li2), 1)# for mode = 'full'
# lagvec = np.arange( -int(search_width/dx) - 1, int(search_width/dx) +1, 1) # for mode = 'valid'
lagvec = np.arange(- int(np.round(search_width/dx)), int(search_width/dx) +1,1)# for mode = 'valid'
shift_vec = lagvec * dx
ix_peak = np.arange(len(corr))[corr == np.nanmax(corr)][0]
best_lag = lagvec[ix_peak]
best_shift = shift_vec[ix_peak]
velocities[beam] = best_shift/(dt/365)
axs[1].plot(lagvec,corr)
axs[1].plot(lagvec[ix_peak],corr[ix_peak], 'r*')
axs[1].set_xlabel('lag (samples)')
axs[2].plot(shift_vec,corr)
axs[2].plot(shift_vec[ix_peak],corr[ix_peak], 'r*')
axs[2].set_xlabel('shift (m)')
# plot shifted data
axs[3].plot(x_t2, h_li2, 'r')
axs[3].plot(x_t1 - best_shift, h_li1, 'k')
axs[3].set_xlabel('x_atc (m)')
axs[0].text(x_t2[100], 0.6*np.nanmax(h_li2), beam)
axs[1].text(lagvec[5], 0.6*np.nanmax(corr), 'best lag: ' + str(best_lag) + '; corr val: ' + str(np.round(corr[ix_peak],3)))
axs[2].text(shift_vec[5], 0.6*np.nanmax(corr), 'best shift: ' + str(best_shift) + ' m'+ '; corr val: ' + str(np.round(corr[ix_peak],3)))
axs[2].text(shift_vec[5], 0.3*np.nanmax(corr), 'veloc of ' + str(np.round(best_shift/(dt/365),1)) + ' m/yr')
plt.tight_layout()
fig1.suptitle('black = older cycle data, red = newer cycle data to search across')
n_veloc = len(cycles) - 1
segment_length = 2000 # m
search_width = 800 # m
dx = 20 # meters between x_atc points
correlation_threshold = 0.65
x1 = 2.915e7#x_atc[cycles[0]][beams[0]][1000] <-- the very first x value in a file; doesn't work, I think b/c nans # 2.93e7
x1s = x_atc[cycles[veloc_number]][beams[0]][search_width:-segment_length-2*search_width:10]
velocities = {}
correlations = {}
for beam in beams:
velocities[beam] = np.empty_like(x1s)
correlations[beam] = np.empty_like(x1s)
for xi,x1 in enumerate(x1s):
for veloc_number in range(n_veloc):
cycle1 = cycles[veloc_number]
cycle2 = cycles[veloc_number+1]
t1_string = times[cycle1]['gt1l'][0].astype(str) #figure out later if just picking hte first one it ok
t1 = Time(t1_string)
t2_string = times[cycle2]['gt1l'][0].astype(str) #figure out later if just picking hte first one it ok
t2 = Time(t2_string)
dt = (t2 - t1).jd # difference in julian days
for beam in beams:
# cut out small chunk of data at time t1 (first cycle)
x_full_t1 = x_atc[cycle1][beam]
ix_x1 = np.arange(len(x_full_t1))[x_full_t1 >= x1][0]
ix_x2 = ix_x1 + int(np.round(segment_length/dx))
x_t1 = x_full_t1[ix_x1:ix_x2]
h_li1 = h_li_diff[cycle1][beam][ix_x1-1:ix_x2-1] # start 1 index earlier because the data are differentiated
# cut out a wider chunk of data at time t2 (second cycle)
x_full_t2 = x_atc[cycle2][beam]
ix_x3 = ix_x1 - int(np.round(search_width/dx)) # offset on earlier end by # indices in search_width
ix_x4 = ix_x2 + int(np.round(search_width/dx)) # offset on later end by # indices in search_width
x_t2 = x_full_t2[ix_x3:ix_x4]
h_li2 = h_li_diff[cycle2][beam][ix_x3:ix_x4]
# correlate old with newer data
corr = correlate(h_li1, h_li2, mode = 'valid', method = 'direct')
norm_val = np.sqrt(np.sum(h_li1**2)*np.sum(h_li2**2)) # normalize so values range between 0 and 1
corr = corr / norm_val
# lagvec = np.arange( -(len(h_li1) - 1), len(h_li2), 1)# for mode = 'full'
# lagvec = np.arange( -int(search_width/dx) - 1, int(search_width/dx) +1, 1) # for mode = 'valid'
lagvec = np.arange(- int(np.round(search_width/dx)), int(search_width/dx) +1,1)# for mode = 'valid'
shift_vec = lagvec * dx
if all(np.isnan(corr)):
velocities[beam][xi] = np.nan
correlations[beam][xi] = np.nan
else:
correlation_value = np.nanmax(corr)
if correlation_value >= correlation_threshold:
ix_peak = np.arange(len(corr))[corr == correlation_value][0]
best_lag = lagvec[ix_peak]
best_shift = shift_vec[ix_peak]
velocities[beam][xi] = best_shift/(dt/365)
correlations[beam][xi] = correlation_value
else:
velocities[beam][xi] = np.nan
correlations[beam][xi] = correlation_value
plt.figure()
ax1 = plt.subplot(211)
for filename, Di in D_2l.items():
#Plot only points that have ATL06_quality_summary==0 (good points)
hl=plot_elevation(Di, ind=Di['atl06_quality_summary']==0, label=f"cycle={Di['cycle']}")
#hl=plt.plot(Di['x_atc'][Di['atl06_quality_summary']==0], Di['h_li'][Di['atl06_quality_summary']==0], '.', label=f"cycle={Di['cycle']}")
plt.legend()
plt.ylabel('elevation');
ax2 = plt.subplot(212,sharex=ax1)
for beam in beams:
plt.plot(x1s+dx*(segment_length/2),velocities[beam],'.',alpha=0.2,ms=3,label=beam)
plt.ylabel('velocity (m/yr)')
plt.xlabel('x_atc')
plt.ylim(0,1500)
plt.legend()
plt.suptitle('Along track velocity: all beams')
```
#### **Median velocity for all 6 beams:**
**Above a cross-correlation threshold of 0.65**
```
plt.figure()
ax1 = plt.subplot(211)
for filename, Di in D_2l.items():
#Plot only points that have ATL06_quality_summary==0 (good points)
hl=plot_elevation(Di, ind=Di['atl06_quality_summary']==0, label=f"cycle={Di['cycle']}")
#hl=plt.plot(Di['x_atc'][Di['atl06_quality_summary']==0], Di['h_li'][Di['atl06_quality_summary']==0], '.', label=f"cycle={Di['cycle']}")
plt.legend()
plt.ylabel('elevation');
ax2 = plt.subplot(212,sharex=ax1)
medians = np.empty(len(x1s))
stds = np.empty(len(x1s))
for xi, x1 in enumerate(x1s):
corr_vals = []
velocs = []
for beam in beams:
corr_vals += [correlations[beam][xi]]
velocs += [velocities[beam][xi]]
n_obs = len(velocs)
if n_obs >0:
corr_mask = np.array(corr_vals) >= correlation_threshold
veloc_mask = np.abs(np.array(velocs)) < 0.67*segment_length # get rid of segments that are nailed against one edge for some reason
mask = corr_mask * veloc_mask
median_veloc = np.nanmedian(np.array(velocs)[mask])
std_veloc = np.nanstd(np.array(velocs)[mask])
medians[xi] = median_veloc
stds[xi] = std_veloc
ax2.plot([x1,x1], [median_veloc - std_veloc, median_veloc +std_veloc], '-', color= [0.7, 0.7, 0.7])
ax2.plot(x1s, medians, 'k.', markersize=2)
# for beam in beams:
# plt.plot(x1s+dx*(segment_length/2),velocities[beam],'.',alpha=0.2,ms=3,label=beam)
plt.ylabel('velocity (m/yr)')
plt.xlabel('x_atc')
plt.ylim(0,1500)
plt.legend()
plt.suptitle('Median along track velocity')
plt.figure()
ax1 = plt.subplot(211)
for beam in beams:
xvals = x1s+dx*(segment_length/2)
corrs = correlations[beam]
ixs = corrs >= correlation_threshold
ax1.plot(xvals[ixs], corrs[ixs],'.',alpha=0.2,ms=3,label=beam)
plt.ylabel('correlation values, 0->1')
plt.xlabel('x_atc')
plt.legend()
plt.suptitle('Correlation values > threshold, all beams')
ax1 = plt.subplot(212)
for beam in beams:
ax1.plot(x1s+dx*(segment_length/2),correlations[beam],'.',alpha=0.2,ms=3,label=beam)
plt.ylabel('correlation values, 0->1')
plt.xlabel('x_atc')
plt.legend()
plt.suptitle('Correlation values, all beams')
```
Comparison between measures
### Results:
**848**
**Speaker: Lynn Kaluzienski**


**537**
**Speaker: Joseph Martin**



### Future Work for the Surface Velocity Team:
**Speaker: David Polashenski**
- Calculating correlation uncertainty
- Considering larger, more complex areas
- Pending objectives
- Develop methodology to extract Across Track velocities and test efficacy
- Compare ICESat GLAS methodology (Along Track) to ICESat-2 methodology (Across Track)
- Compare the capabilites of ICESat-2 to extract surface ice velocity from ice shelves and ice streams
| true |
code
| 0.576244 | null | null | null | null |
|
# Train-Eval
---
## Import Libraries
```
import os
import sys
from pathlib import Path
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.data import BucketIterator
sys.path.append("../")
from meta_infomax.datasets.fudan_reviews import prepare_data, get_data
```
## Global Constants
```
BSIZE = 16
ENCODER_DIM = 100
CLASSIFIER_DIM = 100
NUM_TASKS = 14
EPOCHS = 1
DATASETS = ['apparel', 'baby', 'books', 'camera_photo', 'electronics',
'health_personal_care', 'imdb', 'kitchen_housewares', 'magazines',
'music', 'software', 'sports_outdoors', 'toys_games', 'video']
```
# Load Data
```
from torchtext.vocab import GloVe
# prepare_data()
train_set, dev_set, test_set, vocab = get_data()
train_iter, dev_iter, test_iter = BucketIterator.splits((train_set, dev_set, test_set),
batch_sizes=(BSIZE, BSIZE*2, BSIZE*2),
sort_within_batch=False,
sort_key=lambda x: len(x.text))
batch = next(iter(train_iter))
batch
batch.text[0].shape, batch.label.shape, batch.task.shape
vocab.stoi["<pad>"]
```
# Baseline Model
```
class Encoder(nn.Module):
def __init__(self,emb_dim, hidden_dim, num_layers):
super().__init__()
self.lstm = nn.LSTM(emb_dim, hidden_dim, num_layers, batch_first=True, bidirectional=True)
def forward(self, x):
self.h0 = self.h0.to(x.device)
self.c0 = self.c0.to(x.device)
out, _ = self.lstm(x, (self.h0, self.c0))
return out
class Classifier(nn.Module):
def __init__(self, in_dim, hidden_dim, out_dim):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(in_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, out_dim)
)
def forward(self, x):
return self.layers(x)
class MultiTaskInfoMax(nn.Module):
def __init__(self, shared_encoder, embeddings, vocab, encoder_dim, encoder_layers, classifier_dim, out_dim):
super().__init__()
self.emb = nn.Embedding.from_pretrained(embeddings, freeze=True, padding_idx=vocab.stoi["<pad>"])
self.shared_encoder = shared_encoder
self.private_encoder = Encoder(embeddings.shape[-1], encoder_dim, encoder_layers)
self.classifier = Classifier(encoder_dim*4, classifier_dim, out_dim)
def forward(self, sentences, lengths):
sent_embed = self.emb(sentences)
shared_out = self.shared_encoder(sent_embed)
private_out = self.private_encoder(sent_embed)
h = torch.cat((shared_out, private_out), dim=1)
out = self.classifier(h)
return out, shared_out, private_out
```
# Train
## Overfit Batch
```
vocab.vectors.shape
shared_encoder = Encoder(vocab.vectors.shape[1], ENCODER_DIM, 1)
shared_encoder
multitask_models = [MultiTaskInfoMax(shared_encoder=shared_encoder, embeddings=vocab.vectors, vocab=vocab,
encoder_dim=ENCODER_DIM,encoder_layers=1, classifier_dim=CLASSIFIER_DIM, out_dim=2)
for i in range(len(DATASETS))]
multitask_models[1]
multitask_models[batch]
```
| true |
code
| 0.774637 | null | null | null | null |
|
# Challenge
In this challenge, we will practice on dimensionality reduction with PCA and selection of variables with RFE. We will use the _data set_ [Fifa 2019](https://www.kaggle.com/karangadiya/fifa19), originally containing 89 variables from over 18 thousand players of _game_ FIFA 2019.
## _Setup_
```
from math import sqrt
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as sct
import seaborn as sns
import statsmodels.api as sm
import statsmodels.stats as st
from sklearn.decomposition import PCA
from loguru import logger
from IPython import get_ipython
%matplotlib inline
from IPython.core.pylabtools import figsize
figsize(12, 8)
sns.set()
fifa = pd.read_csv("fifa.csv")
columns_to_drop = ["Unnamed: 0", "ID", "Name", "Photo", "Nationality", "Flag",
"Club", "Club Logo", "Value", "Wage", "Special", "Preferred Foot",
"International Reputation", "Weak Foot", "Skill Moves", "Work Rate",
"Body Type", "Real Face", "Position", "Jersey Number", "Joined",
"Loaned From", "Contract Valid Until", "Height", "Weight", "LS",
"ST", "RS", "LW", "LF", "CF", "RF", "RW", "LAM", "CAM", "RAM", "LM",
"LCM", "CM", "RCM", "RM", "LWB", "LDM", "CDM", "RDM", "RWB", "LB", "LCB",
"CB", "RCB", "RB", "Release Clause"
]
try:
fifa.drop(columns_to_drop, axis=1, inplace=True)
except KeyError:
logger.warning(f"Columns already dropped")
```
## Starts analysis
```
fifa.head()
fifa.shape
fifa.info()
fifa.isna().sum()
fifa = fifa.dropna()
fifa.isna().sum()
```
## Question 1
Which fraction of the variance can be explained by the first main component of `fifa`? Respond as a single float (between 0 and 1) rounded to three decimal places.
```
def q1():
pca = PCA(n_components = 1).fit(fifa)
return round(float(pca.explained_variance_ratio_), 3)
q1()
```
## Question 2
How many major components do we need to explain 95% of the total variance? Answer as a single integer scalar.
```
def q2():
pca_095 = PCA(n_components=0.95)
X_reduced = pca_095.fit_transform(fifa)
return X_reduced.shape[1]
q2()
```
## Question 3
What are the coordinates (first and second main components) of the `x` point below? The vector below is already centered. Be careful to __not__ center the vector again (for example, by invoking `PCA.transform ()` on it). Respond as a float tuple rounded to three decimal places.
```
x = [0.87747123, -1.24990363, -1.3191255, -36.7341814,
-35.55091139, -37.29814417, -28.68671182, -30.90902583,
-42.37100061, -32.17082438, -28.86315326, -22.71193348,
-38.36945867, -20.61407566, -22.72696734, -25.50360703,
2.16339005, -27.96657305, -33.46004736, -5.08943224,
-30.21994603, 3.68803348, -36.10997302, -30.86899058,
-22.69827634, -37.95847789, -22.40090313, -30.54859849,
-26.64827358, -19.28162344, -34.69783578, -34.6614351,
48.38377664, 47.60840355, 45.76793876, 44.61110193,
49.28911284
]
def q3():
pca_q3 = PCA(n_components = 2)
pca_q3.fit(fifa)
return tuple(np.round(pca_q3.components_.dot(x),3))
q3()
```
## Question 4
Performs RFE with linear regression estimator to select five variables, eliminating them one by one. What are the selected variables? Respond as a list of variable names.
```
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import RFE
def q4():
x = fifa.drop('Overall', axis=1)
y = fifa['Overall']
reg = LinearRegression().fit(x,y)
rfe = RFE(reg, n_features_to_select=5).fit(x, y)
nom_var = x.loc[:,rfe.get_support()].columns
return list(nom_var)
q4()
```
| true |
code
| 0.586168 | null | null | null | null |
|
```
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
```
# Loading a pre-trained model in inference mode
In this tutorial, we will show how to instantiate a model pre-trained with VISSL to use it in inference mode to extract features from its trunk.
We will concentrate on loading a model pre-trained via SimCLR to use it in inference mode and extract features from an image, but this whole training is portable to any another pre-training methods (MoCo, SimSiam, SwAV, etc).
Through it, we will show:
1. How to instantiate a model associated to a pre-training configuration
2. Load the weights of the pre-trained model (taking the weights from our Model Zoo)
3. Use it to extract features associated to the VISSL Logo
**NOTE:** For a tutorial focused on how to use VISSL to schedule a feature extraction job, please refer to [the dedicated tutorial](https://colab.research.google.com/github/facebookresearch/vissl/blob/stable/tutorials/Feature_Extraction.ipynb)
**NOTE:** Please ensure your Collab Notebook has GPU available: `Edit -> Notebook Settings -> select GPU`.
**NOTE:** You can make a copy of this tutorial by `File -> Open in playground mode` and make changes there. DO NOT request access to this tutorial.
## Install VISSL
We will start this tutorial by installing VISSL, following the instructions [here](https://github.com/facebookresearch/vissl/blob/master/INSTALL.md#install-vissl-pip-package).
```
# Install: PyTorch (we assume 1.5.1 but VISSL works with all PyTorch versions >=1.4)
!pip install torch==1.5.1+cu101 torchvision==0.6.1+cu101 -f https://download.pytorch.org/whl/torch_stable.html
# install opencv
!pip install opencv-python
# install apex by checking system settings: cuda version, pytorch version, python version
import sys
import torch
version_str="".join([
f"py3{sys.version_info.minor}_cu",
torch.version.cuda.replace(".",""),
f"_pyt{torch.__version__[0:5:2]}"
])
print(version_str)
# install apex (pre-compiled with optimizer C++ extensions and CUDA kernels)
!pip install -f https://dl.fbaipublicfiles.com/vissl/packaging/apexwheels/{version_str}/download.html apex
# install VISSL
!pip install vissl
```
VISSL should be successfuly installed by now and all the dependencies should be available.
```
import vissl
import tensorboard
import apex
import torch
```
## Loading a VISSL SimCLR pre-trained model
## Download the configuration
VISSL provides yaml configuration files for training a SimCLR model [here](https://github.com/facebookresearch/vissl/tree/master/configs/config/pretrain/simclr). We will start by fetching the configuration files we need.
```
!mkdir -p configs/config/simclr
!mkdir -p vissl/config
!wget -q -O configs/config/simclr/simclr_8node_resnet.yaml https://raw.githubusercontent.com/facebookresearch/vissl/master/configs/config/pretrain/simclr/simclr_8node_resnet.yaml
!wget -q -O vissl/config/defaults.yaml https://raw.githubusercontent.com/facebookresearch/vissl/master/vissl/config/defaults.yaml
```
```
# This is formatted as code
```
## Download the ResNet-50 weights from the Model Zoo
```
!wget -q -O resnet_simclr.torch https://dl.fbaipublicfiles.com/vissl/model_zoo/simclr_rn101_1000ep_simclr_8node_resnet_16_07_20.35063cea/model_final_checkpoint_phase999.torch
```
## Create the model associated to the configuration
Load the configuration and merge it with the default configuration.
```
from omegaconf import OmegaConf
from vissl.utils.hydra_config import AttrDict
config = OmegaConf.load("configs/config/simclr/simclr_8node_resnet.yaml")
default_config = OmegaConf.load("vissl/config/defaults.yaml")
cfg = OmegaConf.merge(default_config, config)
```
Edit the configuration to freeze the trunk (inference mode) and ask for the extraction of the last layer feature.
```
cfg = AttrDict(cfg)
cfg.config.MODEL.WEIGHTS_INIT.PARAMS_FILE = "resnet_simclr.torch"
cfg.config.MODEL.FEATURE_EVAL_SETTINGS.EVAL_MODE_ON = True
cfg.config.MODEL.FEATURE_EVAL_SETTINGS.FREEZE_TRUNK_ONLY = True
cfg.config.MODEL.FEATURE_EVAL_SETTINGS.EXTRACT_TRUNK_FEATURES_ONLY = True
cfg.config.MODEL.FEATURE_EVAL_SETTINGS.SHOULD_FLATTEN_FEATS = False
cfg.config.MODEL.FEATURE_EVAL_SETTINGS.LINEAR_EVAL_FEAT_POOL_OPS_MAP = [["res5avg", ["Identity", []]]]
```
And then build the model:
```
from vissl.models import build_model
model = build_model(cfg.config.MODEL, cfg.config.OPTIMIZER)
```
## Loading the pre-trained weights
```
from classy_vision.generic.util import load_checkpoint
from vissl.utils.checkpoint import init_model_from_weights
weights = load_checkpoint(checkpoint_path=cfg.config.MODEL.WEIGHTS_INIT.PARAMS_FILE)
init_model_from_weights(
config=cfg.config,
model=model,
state_dict=weights,
state_dict_key_name="classy_state_dict",
skip_layers=[], # Use this if you do not want to load all layers
)
print("Loaded...")
```
## Trying the model on the VISSL Logo
```
!wget -q -O test_image.jpg https://raw.githubusercontent.com/facebookresearch/vissl/master/.github/logo/Logo_Color_Light_BG.png
from PIL import Image
import torchvision.transforms as transforms
image = Image.open("test_image.jpg")
image = image.convert("RGB")
pipeline = transforms.Compose([
transforms.CenterCrop(224),
transforms.ToTensor(),
])
x = pipeline(image)
features = model(x.unsqueeze(0))
```
The output is a list with as many representation layers as required in the configuration (in our case, `cfg.config.MODEL.FEATURE_EVAL_SETTINGS.LINEAR_EVAL_FEAT_POOL_OPS_MAP` asks for one representation layer, so we have just one output.
```
features[0].shape
```
| true |
code
| 0.601769 | null | null | null | null |
|
Define the network:
```
import torch # PyTorch base
from torch.autograd import Variable # Tensor class w gradients
import torch.nn as nn # modules, layers, loss fns
import torch.nn.functional as F # Conv,Pool,Loss,Actvn,Nrmlz fns from here
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel, 6 output channels, 5x5 square convolution
# Kernel
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
# an Affine Operation: y = Wx + b
self.fc1 = nn.Linear(16*5*5, 120) # Linear is Dense/Fully-Connected
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# Max pooling over a (2, 2) window
# x = torch.nn.functional.max_pool2d(torch.nn.functional.relu(self.conv1(x)), (2,2))
x = F.max_pool2d(F.relu(self.conv1(x)), (2,2))
# If size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
```
You just have to define the `forward` function, and the `backward` function (where the gradients are computed) is automatically defined for you using `autograd`. You can use any of the Tensor operations in the `forward` function.
The learnable parameteres of a model are returns by `net.parameters()`
```
pars = list(net.parameters())
print(len(pars))
print(pars[0].size()) # conv1's .weight
```
The input to the forward is an `autograd.Variable`, and so is the output. **NOTE**: Expected input size to this net(LeNet) is 32x32. To use this net on MNIST dataset, please resize the images from the dataset to 32x32.
```
input = Variable(torch.randn(1, 1, 32, 32))
out = net(input)
print(out)
```
Zero the gradient buffers of all parameters and backprops with random gradients:
```
net.zero_grad()
out.backward(torch.randn(1, 10))
```
**!NOTE¡**:
`torch.nn` only supports mini-batches. The entire `torch.nn` package only supports inputs that are a mini-batch of samples, and not a single sample.
For example, `nn.Conv2d` will take in a 4D Tensor of `nSamples x nChannels x Height x Width`.
If you have a single sample, just use `input.unsqueeze(0)` to add a fake batch dimension.
Before proceeding further, let's recap all the classes you've seen so far.
**Recap**:
* `torch.Tensor` - A *multi-dimensional array*.
* `autograd.Variable` - *Wraps a Tensor and records the history of operations* applied to it. Has the same API as a `Tensor`, with some additions like `backward()`. Also *holds the gradient* wrt the tensor.
* `nn.Module` - Neural network module. *Convenient way of encapsulating parameters*, with helpers for moving them to GPU, exporting, loading, etc.
* `nn.Parameter` - A kind of Variable, that is *automatically registered as a parameter when assigned as an attribute to a* `Module`.
* `autograd.Function` - Implements *forward and backward definitions of an autograd operation*. Every `Variable` operation creates at least a single `Function` node that connects to functions that created a `Variable` and *encodes its history*.
**At this point, we covered:**
* Defining a neural network
* Processing inputs and calling backward.
**Still Left:**
* Computing the loss
* Updating the weights of the network
| true |
code
| 0.839504 | null | null | null | null |
|
# EuroSciPy 2018: NumPy tutorial (https://github.com/gertingold/euroscipy-numpy-tutorial)
## Let's do some slicing
```
mylist = list(range(10))
print(mylist)
```
Use slicing to produce the following outputs:
[2, 3, 4, 5]
[0, 1, 2, 3, 4]
[6, 7, 8, 9]
[0, 2, 4, 6, 8]
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
[7, 5, 3]
## Matrices and lists of lists
```
matrix = [[0, 1, 2],
[3, 4, 5],
[6, 7, 8]]
```
Get the second row by slicing twice
Try to get the second column by slicing. ~~Do not use a list comprehension!~~
## Getting started
Import the NumPy package
## Create an array
```
np.lookfor('create array')
help(np.array) # remember Shift + Tab give a pop up help. Wh Shift + Tab
```
Press Shift + Tab when your cursor in a Code cell. This will open a pop up with some helping text.
What happens Shift + Tab is pressed a second time?
The variable `matrix` contains a list of lists. Turn it into an `ndarray` and assign it to the variable `myarray`. Verify that its type is correct.
For practicing purposes, arrays can conveniently be created with the `arange` method.
```
myarray1 = np.arange(6)
myarray1
def array_attributes(a):
for attr in ('ndim', 'size', 'itemsize', 'dtype', 'shape', 'strides'):
print('{:8s}: {}'.format(attr, getattr(a, attr)))
array_attributes(myarray1)
```
## Data types
Use `np.array()` to create arrays containing
* floats
* complex numbers
* booleans
* strings
and check the `dtype` attribute.
Do you understand what is happening in the following statement?
```
np.arange(1, 160, 10, dtype=np.int8)
```
## Strides/Reshape
```
myarray2 = myarray1.reshape(2, 3)
myarray2
array_attributes(myarray2)
myarray3 = myarray1.reshape(3, 2)
array_attributes(myarray3)
```
## Views
Set the first entry of `myarray1` to a new value, e.g. 42.
What happened to `myarray2`?
What happens when a matrix is transposed?
```
a = np.arange(9).reshape(3, 3)
a
a.T
```
Check the strides!
```
a.strides
a.T.strides
```
## View versus copy
identical object
```
a = np.arange(4)
b = a
id(a), id(b)
```
view: a different object working on the same data
```
b = a[:]
id(a), id(b)
a[0] = 42
a, b
```
an independent copy
```
a = np.arange(4)
b = np.copy(a)
id(a), id(b)
a[0] = 42
a, b
```
## Some array creation routines
### numerical ranges
`arange(`*start*, *stop*, *step*`)`, *stop* is not included in the array
```
np.arange(5, 30, 5)
```
`arange` resembles `range`, but also works for floats
Create the array [1, 1.1, 1.2, 1.3, 1.4, 1.5]
`linspace(`*start*, *stop*, *num*`)` determines the step to produce *num* equally spaced values, *stop* is included by default
Create the array [1., 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.]
For equally spaced values on a logarithmic scale, use `logspace`.
```
np.logspace(-2, 2, 5)
np.logspace(0, 4, 9, base=2)
```
### Application
```
import matplotlib.pyplot as plt
%matplotlib inline
x = np.linspace(0, 10, 100)
y = np.cos(x)
plt.plot(x, y)
```
### Homogeneous data
```
np.zeros((4, 4))
```
Create a 4x4 array with integer zeros
```
np.ones((2, 3, 3))
```
Create a 3x3 array filled with tens
### Diagonal elements
```
np.diag([1, 2, 3, 4])
```
`diag` has an optional argument `k`. Try to find out what its effect is.
Replace the 1d array by a 2d array. What does `diag` do?
```
np.info(np.eye)
```
Create the 3x3 array
```[[2, 1, 0],
[1, 2, 1],
[0, 1, 2]]
```
### Random numbers
What is the effect of np.random.seed?
```
np.random.seed()
np.random.rand(5, 2)
np.random.seed(1234)
np.random.rand(5, 2)
data = np.random.rand(20, 20)
plt.imshow(data, cmap=plt.cm.hot, interpolation='none')
plt.colorbar()
casts = np.random.randint(1, 7, (100, 3))
plt.hist(casts, np.linspace(0.5, 6.5, 7))
```
## Indexing and slicing
### 1d arrays
```
a = np.arange(10)
```
Create the array [7, 8, 9]
Create the array [2, 4, 6, 8]
Create the array [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
### Higher dimensions
```
a = np.arange(40).reshape(5, 8)
```
Create the array [[21, 22, 23], [29, 30, 31], [37, 38, 39]]
Create the array [ 3, 11, 19, 27, 35]
Create the array [11, 12, 13]
Create the array [[ 8, 11, 14], [24, 27, 30]]
## Fancy indexing ‒ Boolean mask
```
a = np.arange(40).reshape(5, 8)
a % 3 == 0
a[a % 3 == 0]
a[(1, 1, 2, 2, 3, 3), (3, 4, 2, 5, 3, 4)]
```
## Axes
Create an array and calculate the sum over all elements
Now calculate the sum along axis 0 ...
and now along axis 1
Identify the axis in the following array
```
a = np.arange(24).reshape(2, 3, 4)
a
```
## Axes in more than two dimensions
Create a three-dimensional array
Produce a two-dimensional array by cutting along axis 0 ...
and axis 1 ...
and axis 2
What do you get by simply using the index `[0]`?
What do you get by using `[..., 0]`?
## Exploring numerical operations
```
a = np.arange(4)
b = np.arange(4, 8)
a, b
a+b
a*b
```
Operations are elementwise. Check this by multiplying two 2d array...
... and now do a real matrix multiplication
## Application: Random walk
```
length_of_walk = 10000
realizations = 5
angles = 2*np.pi*np.random.rand(length_of_walk, realizations)
x = np.cumsum(np.cos(angles), axis=0)
y = np.cumsum(np.sin(angles), axis=0)
plt.plot(x, y)
plt.axis('scaled')
plt.plot(np.hypot(x, y))
plt.plot(np.mean(x**2+y**2, axis=1))
plt.axis('scaled')
```
## Let's check the speed
```
%%timeit a = np.arange(1000000)
a**2
%%timeit xvals = range(1000000)
[xval**2 for xval in xvals]
%%timeit a = np.arange(100000)
np.sin(a)
import math
%%timeit xvals = range(100000)
[math.sin(xval) for xval in xvals]
```
## Broadcasting
```
a = np.arange(12).reshape(3, 4)
a
a+1
a+np.arange(4)
a+np.arange(3)
np.arange(3)
np.arange(3).reshape(3, 1)
a+np.arange(3).reshape(3, 1)
```
Create a multiplication table for the numbers from 1 to 10 starting from two appropriately chosen 1d arrays.
As an alternative to `reshape` one can add additional axes with `newaxes` or `None`:
```
a = np.arange(5)
b = a[:, np.newaxis]
```
Check the shapes.
## Functions of two variables
```
x = np.linspace(-40, 40, 200)
y = x[:, np.newaxis]
z = np.sin(np.hypot(x-10, y))+np.sin(np.hypot(x+10, y))
plt.imshow(z, cmap='viridis')
x, y = np.mgrid[-10:10:0.1, -10:10:0.1]
x
y
plt.imshow(np.sin(x*y))
x, y = np.mgrid[-10:10:50j, -10:10:50j]
x
y
plt.imshow(np.arctan2(x, y))
```
It is natural to use broadcasting. Check out what happens when you replace `mgrid` by `ogrid`.
## Linear Algebra in NumPy
```
a = np.arange(4).reshape(2, 2)
eigenvalues, eigenvectors = np.linalg.eig(a)
eigenvalues
eigenvectors
```
Explore whether the eigenvectors are the rows or the columns.
Try out `eigvals` and other methods offered by `linalg` which your are interested in
## Application: identify entry closest to ½
Create a 2d array containing random numbers and generate a vector containing for each row the entry closest to one-half.
| true |
code
| 0.330714 | null | null | null | null |
|
*Accompanying code examples of the book "Introduction to Artificial Neural Networks and Deep Learning: A Practical Guide with Applications in Python" by [Sebastian Raschka](https://sebastianraschka.com). All code examples are released under the [MIT license](https://github.com/rasbt/deep-learning-book/blob/master/LICENSE). If you find this content useful, please consider supporting the work by buying a [copy of the book](https://leanpub.com/ann-and-deeplearning).*
Other code examples and content are available on [GitHub](https://github.com/rasbt/deep-learning-book). The PDF and ebook versions of the book are available through [Leanpub](https://leanpub.com/ann-and-deeplearning).
```
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
```
# Model Zoo -- Using PyTorch Dataset Loading Utilities for Custom Datasets (MNIST)
This notebook provides an example for how to load an image dataset, stored as individual PNG files, using PyTorch's data loading utilities. For a more in-depth discussion, please see the official
- [Data Loading and Processing Tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html)
- [torch.utils.data](http://pytorch.org/docs/master/data.html) API documentation
In this example, we are using the cropped version of the **Street View House Numbers (SVHN) Dataset**, which is available at http://ufldl.stanford.edu/housenumbers/.
To execute the following examples, you need to download the 2 ".mat" files
- [train_32x32.mat](http://ufldl.stanford.edu/housenumbers/train_32x32.mat) (ca. 182 Mb, 73,257 images)
- [test_32x32.mat](http://ufldl.stanford.edu/housenumbers/test_32x32.mat) (ca. 65 Mb, 26,032 images)
## Imports
```
import pandas as pd
import numpy as np
import os
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision import transforms
import matplotlib.pyplot as plt
from PIL import Image
import scipy.io as sio
import imageio
```
## Dataset
The following function will convert the images from ".mat" into individual ".png" files. In addition, we will create CSV contained the image paths and associated class labels.
```
def make_pngs(main_dir, mat_file, label):
if not os.path.exists(main_dir):
os.mkdir(main_dir)
sub_dir = os.path.join(main_dir, label)
if not os.path.exists(sub_dir):
os.mkdir(sub_dir)
data = sio.loadmat(mat_file)
X = np.transpose(data['X'], (3, 0, 1, 2))
y = data['y'].flatten()
with open(os.path.join(main_dir, '%s_labels.csv' % label), 'w') as out_f:
for i, img in enumerate(X):
file_path = os.path.join(sub_dir, str(i) + '.png')
imageio.imwrite(os.path.join(file_path),
img)
out_f.write("%d.png,%d\n" % (i, y[i]))
make_pngs(main_dir='svhn_cropped',
mat_file='train_32x32.mat',
label='train')
make_pngs(main_dir='svhn_cropped',
mat_file='test_32x32.mat',
label='test')
df = pd.read_csv('svhn_cropped/train_labels.csv', header=None, index_col=0)
df.head()
df = pd.read_csv('svhn_cropped/test_labels.csv', header=None, index_col=0)
df.head()
```
## Implementing a Custom Dataset Class
Now, we implement a custom `Dataset` for reading the images. The `__getitem__` method will
1. read a single image from disk based on an `index` (more on batching later)
2. perform a custom image transformation (if a `transform` argument is provided in the `__init__` construtor)
3. return a single image and it's corresponding label
```
class SVHNDataset(Dataset):
"""Custom Dataset for loading cropped SVHN images"""
def __init__(self, csv_path, img_dir, transform=None):
df = pd.read_csv(csv_path, index_col=0, header=None)
self.img_dir = img_dir
self.csv_path = csv_path
self.img_names = df.index.values
self.y = df[1].values
self.transform = transform
def __getitem__(self, index):
img = Image.open(os.path.join(self.img_dir,
self.img_names[index]))
if self.transform is not None:
img = self.transform(img)
label = self.y[index]
return img, label
def __len__(self):
return self.y.shape[0]
```
Now that we have created our custom Dataset class, let us add some custom transformations via the `transforms` utilities from `torchvision`, we
1. normalize the images (here: dividing by 255)
2. converting the image arrays into PyTorch tensors
Then, we initialize a Dataset instance for the training images using the 'quickdraw_png_set1_train.csv' label file (we omit the test set, but the same concepts apply).
Finally, we initialize a `DataLoader` that allows us to read from the dataset.
```
# Note that transforms.ToTensor()
# already divides pixels by 255. internally
custom_transform = transforms.Compose([#transforms.Grayscale(),
#transforms.Lambda(lambda x: x/255.),
transforms.ToTensor()])
train_dataset = SVHNDataset(csv_path='svhn_cropped/train_labels.csv',
img_dir='svhn_cropped/train',
transform=custom_transform)
test_dataset = SVHNDataset(csv_path='svhn_cropped/test_labels.csv',
img_dir='svhn_cropped/test',
transform=custom_transform)
BATCH_SIZE=128
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=4)
test_loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=4)
```
That's it, now we can iterate over an epoch using the train_loader as an iterator and use the features and labels from the training dataset for model training:
## Iterating Through the Custom Dataset
```
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.manual_seed(0)
num_epochs = 2
for epoch in range(num_epochs):
for batch_idx, (x, y) in enumerate(train_loader):
print('Epoch:', epoch+1, end='')
print(' | Batch index:', batch_idx, end='')
print(' | Batch size:', y.size()[0])
x = x.to(device)
y = y.to(device)
break
```
Just to make sure that the batches are being loaded correctly, let's print out the dimensions of the last batch:
```
x.shape
```
As we can see, each batch consists of 128 images, just as specified. However, one thing to keep in mind though is that
PyTorch uses a different image layout (which is more efficient when working with CUDA); here, the image axes are "num_images x channels x height x width" (NCHW) instead of "num_images height x width x channels" (NHWC):
To visually check that the images that coming of the data loader are intact, let's swap the axes to NHWC and convert an image from a Torch Tensor to a NumPy array so that we can visualize the image via `imshow`:
```
one_image = x[99].permute(1, 2, 0)
one_image.shape
# note that imshow also works fine with scaled
# images in [0, 1] range.
plt.imshow(one_image.to(torch.device('cpu')));
%watermark -iv
```
| true |
code
| 0.523299 | null | null | null | null |
|

---
## 01. Interpolación de Funciones
Eduard Larrañaga ([email protected])
---
## Interpolación
### Resumen
En este cuaderno se presentan algunas de las técnicas de interpolación de una función.
---
## Interpolación
Los datos astrofísicos (experimentales y sintéticos) usualmente consiten de un conjunto de valores discretos con la forma $(x_j, f_j)$ en donde se representa el valor de una función $f(x)$ paa un conjunto finito de argumentos $\{ x_0, x_1, x_2, ..., x_{n} \}$. Sin embargo, en muchas ocasiones se necesita conocer el valor de la función en puntos adicionales (que no pertenecen al conjunto dado). La **interpolación** es el método que permite obtener estos valores.
Por **interpolación** entenderemos el definir una función $g(x)$, utilizando la información discreta conocida y de tal forma que $g(x_j) = f(x_j)$ y que se aproxime el valor de la función $f$ en cualquier punto $x \in [x_{min}, x_{max}]$, done $x_{min} = \min [x_j]$ y $x_{max} = \max \{ x_j \}$.
Por otro lado, la **extrapolación** correspondería a aproximar el valor de la función $f$ en un punto $x \notin [x_{min}, x_{max}]$}. Sin embargo, este caso no será analizado aquí.
---
## Interpolación Lineal Simple
El método de interpolación más simple es denominado **Interpolación Polinomial** y consiste en encontrar un polinomio $p_n(x)$ de grado $n$ que pasa por $N = n+1$ puntos $x_j$ tomando los valores $p(x_j) = f(x_j)$, donde $j=0,1,2,...,n$.
El polinomio se escribe en la forma general
$p_n(x) = a_0 + a_1 x + a_2 x^2 + \cdots + a_n x^n$
donde $a_i$ son $n+1$-constantes reales que se determinarán por las condiciones
$\left(
\begin{array}{ccccc}
1&x_0^1&x_0^2&\cdots&x_0^n\\
\vdots&\vdots&\vdots&\vdots&\vdots\\
\vdots&\vdots&\vdots&\vdots&\vdots\\
1&x_n^1&x_n^2&\cdots&x_n^n\\
\end{array}
\right)
\left(\begin{array}{c}
a_0\\
\vdots\\
\vdots\\
a_n
\end{array}\right)
=
\left(\begin{array}{c}
f(x_0)\\
\vdots\\
\vdots\\
f(x_n)
\end{array}\right)$
La solución de este sistema es fácil de obtener en los casos de interpolación lineal ($n=1$) y cuadrática ($n=2$), pero puede ser dificil de encontrar para un valor grande de $n$.
---
### Interpolación Lineal
La interpolación lineal ($n=1$) de una función $f(x)$ en un intervalo
$[x_i,x_{i+1}]$ requiere conocer solamente dos puntos.
Resolviendo el sistema lineal resultante se obtiene el polinomio interpolado
\begin{equation}
p_1(x) = f(x_i) + \frac{f(x_{i+1}) - f(x_i)}{x_{i+1} - x_i} (x-x_i) + \mathcal{O}(\Delta x^2)
\end{equation}
donde $\Delta x = x_{i+1} - x_i$.
El método de interpolación lineal provee un polinomio con una precisión de segundo orden que puede ser derivado una vez, pero esta derivada no es continua en los puntos extremos del intervalo de interpolación, $x_i$ y $x_{i+1}$.
#### Ejemplo. Interpolación Lineal por intervalos
A continuación se leerá un conjunto de datos desde un archivo .txt y se interpolará linealmente entre cada par de puntos (*piecewise interpolation*)
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Reading the data
data = np.loadtxt('data_points.txt', comments='#', delimiter=',')
x = data[:,0]
f = data[:,1]
plt.figure()
plt.scatter(x,f)
plt.xlabel(r'$x$')
plt.ylabel(r'$f(x)$')
plt.show()
data.shape
def linearInterpolation(x1, x2, f1, f2, x):
p1 = f1 + ((f2-f1)/(x2-x1))*(x-x1)
return p1
N = len(x)
plt.figure(figsize=(7,5))
plt.scatter(x, f, color='black')
for i in range(N-1):
x_interval = np.linspace(x[i],x[i+1],3)
# Note that the number 3 in thie above line indeicates the number of
# points interpolated in each interval !
# (including the extreme points of the interval)
y_interval = linearInterpolation(x[i], x[i+1], f[i], f[i+1], x_interval)
plt.plot(x_interval, y_interval,'r')
plt.title(r'Linear Piecewise Interpolation')
plt.xlabel(r'$x$')
plt.ylabel(r'$p_1(x)$')
plt.show()
```
---
### Interpolación Cuadrática
La interpolación cuadrática ($n=2$) requiere información de tres puntos.
Por ejemplo, se pueden tomar los tres puntos $x_i$ , $x_{i+1}$ y $x_{i+2}$ para interpolar la función $f(x)$ en el rango$[x_{i},x_{i+1}]$. Al solucionar el sistema de ecuaciones lineales correspondiente se obtiene el polinomio
$p_2(x) = \frac{(x-x_{i+1})(x-x_{i+2})}{(x_i - x_{i+1})(x_i - x_{i+2})} f(x_i)
+ \frac{(x-x_{i})(x-x_{i+2})}{(x_{i+1} - x_{i})(x_{i+1} - x_{i+2})} f(x_{i+1})
+ \frac{(x-x_i)(x-x_{i+1})}{(x_{i+2} - x_i)(x_{i+2} - x_{i+1})} f(x_{i+2}) + \mathcal{O}(\Delta x^3)$,
donde $\Delta x = \max \{ x_{i+2}-x_{i+1},x_{i+1}-x_i \}$.
En este caso, el polinomio interpolado se puede derivar dos veces pero, aunque su primera derivada es continua, la segunda derivada no es continua en los puntos extremos del intervalo.
#### Ejemplo. Interpolación Cuadrática por Intervalos
A continuación se leerá un conjunto de datos desde un archivo .txt y se interpolará cuadráticamente en sub-intervalos (*quadratic piecewise interpolation*)
```
import numpy as np
import matplotlib.pyplot as plt
# Reading the data
data = np.loadtxt('data_points.txt', comments='#', delimiter=',')
x = data[:,0]
f = data[:,1]
def quadraticInterpolation(x1, x2, x3, f1, f2, f3, x):
p2 = (((x-x2)*(x-x3))/((x1-x2)*(x1-x3)))*f1 +\
(((x-x1)*(x-x3))/((x2-x1)*(x2-x3)))*f2 +\
(((x-x1)*(x-x2))/((x3-x1)*(x3-x2)))*f3
return p2
N = len(x)
plt.figure(figsize=(7,5))
plt.scatter(x, f, color='black')
for i in range(N-2):
x_interval = np.linspace(x[i],x[i+1],6) # 6 interpolate points in each interval
y_interval = quadraticInterpolation(x[i], x[i+1], x[i+2], f[i], f[i+1], f[i+2], x_interval)
plt.plot(x_interval, y_interval,'r')
plt.title(r' Quadratic Polynomial Piecewise Interpolation')
plt.xlabel(r'$x$')
plt.ylabel(r'$p_2(x)$')
plt.show()
```
**Nota:** Por la forma de realizar la interpolación cuadrática, el último intervalo queda sin información. En esta región se puede extender la interpolación del penúltimo intervalo o también se puede interpolar un polinomio lineal.
---
## Interpolación de Lagrange
La **Interpolación de Lagrange Interpolation** también busca un polinomio de grado $n$ utilizando $n+1$ puntos, pero utiliza un método alternativo de encontrar los coeficientes. Para comprender esta idea, re-escribimos el polinomio lineal encontrado antes en la forma
\begin{equation}
p_1(x) = \frac{x-x_{i+1}}{x_i - x_{i+1}} f(x_i) + \frac{x-x_i}{x_{i+1}-x_i} f(x_{i+1}) + \mathcal{O}(\Delta x^2),
\end{equation}
o así,
\begin{equation}
p_1(x) = \sum_{j=i}^{i+1} f(x_j) L_{1j}(x) + \mathcal{O}(\Delta x^2)
\end{equation}
donde se han introducido los *coeficientes de Lagrange*
\begin{equation}
L_{1j}(x) = \frac{x-x_k}{x_j-x_k}\bigg|_{k\ne j}.
\end{equation}
Nótese que estos coeficientes aseguran que el polinomio pasa por los puntos conocidos, i.e. $p_1(x_i) = f(x_i)$ y $p_1(x_{i+1}) = f(x_{i+1})$
La **interpolación de Lagrange** generaliza estas expresiones para un polinomio de grado $n$ que pasa por los $n+1$ puntos conocidos,
\begin{equation}
p_n (x) = \sum_{j=0}^{n} f(x_j) L_{nj}(x) + \mathcal{O}(\Delta x^{n+1})\,, \label{eq:LagrangeInterpolation}
\end{equation}
donde los coeficientes de Lagrange se generalizan a
\begin{equation}
L_{nj}(x) = \prod_{k\ne j}^{n} \frac{x-x_k}{x_j - x_k}\,.
\end{equation}
De nuevo, es posible notar que estos coeficientes aseguran que el polinomio pasa por los puntos concidos $p(x_j) = f(x_j)$.
```
# %load lagrangeInterpolation
'''
Eduard Larrañaga
Computational Astrophysics
2020
Lagrange Interpolation Method
'''
import numpy as np
#Lagrange Coefficients
def L(x, xi, j):
'''
------------------------------------------
L(x, xi, j)
------------------------------------------
Returns the Lagrange coefficient for the
interpolation evaluated at points x
Receives as arguments:
x : array of points where the interpolated
polynomial will be evaluated
xi : array of N data points
j : index of the coefficient to be
calculated
------------------------------------------
'''
# Number of points
N = len(xi)
prod = 1
for k in range(N):
if (k != j):
prod = prod * (x - xi[k])/(xi[j] - xi[k])
return prod
# Interpolated Polynomial
def p(x, xi, fi):
'''
------------------------------------------
p(x, xi, fi)
------------------------------------------
Returns the values of the Lagrange
interpolated polynomial in a set of points
defined by x
x : array of points where the interpolated
polynomial will be evaluated
xi : array of N data points points
fi : values of the function to be
interpolated
------------------------------------------
'''
# Number of points
N = len(xi)
summ = 0
for j in range(N):
summ = summ + fi[j]*L(x, xi, j)
return summ
import numpy as np
import matplotlib.pyplot as plt
#import lagrangeInterpolation as lagi
import sys
# Reading the data
data = np.loadtxt('data_points.txt', comments='#', delimiter=',')
x = data[:,0]
f = data[:,1]
N = len(x)
# Degree of the polynomial to be interpolated piecewise
n = 3
# Check if the number of point is enough to interpolate such a polynomial
if n>=N:
print('\nThere are not enough points to interpolate this polynomial.')
print(f'Using {N:.0f} points it is possible to interpolate polynomials up to order n={N-1:.0f}')
sys.exit()
plt.figure(figsize=(7,5))
plt.title(f'Lagrange Polynomial Piecewise Interpolation n={n:.0f}')
plt.scatter(x, f, color='black')
# Piecewise Interpolation Loop
for i in range(N-n):
xi = x[i:i+n+1]
fi = f[i:i+n+1]
x_interval = np.linspace(x[i],x[i+1],3*n)
y_interval = p(x_interval,xi,fi)
plt.plot(x_interval, y_interval,'r')
plt.xlabel(r'$x$')
plt.ylabel(r'$p_n(x)$')
plt.show()
```
Nótese que los últimos $n$ puntos no estan interpolados. Qué se puede hacer?
```
import numpy as np
import matplotlib.pyplot as plt
#import lagrangeInterpolation as lagi
import sys
# Reading the data
data = np.loadtxt('data_points.txt', comments='#', delimiter=',')
x = data[:,0]
f = data[:,1]
N = len(x)
# Degree of the polynomial to be interpolated piecewise
n = 6
# Check if the number of point is enough to interpolate such a polynomial
if n>=N:
print('\nThere are not enough points to interpolate this polynomial.')
print(f'Using {N:.0f} points it is possible to interpolate polynomials up to order n={N-1:.0f}')
sys.exit()
plt.figure(figsize=(7,5))
plt.title(f'Lagrange Polynomial Piecewise Interpolation n={n:.0f}')
plt.scatter(x, f, color='black')
# Piecewise Interpolation Loop
for i in range(N-n):
xi = x[i:i+n+1]
fi = f[i:i+n+1]
x_interval = np.linspace(x[i],x[i+1],3*n)
y_interval = p(x_interval,xi,fi)
plt.plot(x_interval, y_interval,'r')
# Piecewise Interpolation for the final N-n points,
# using a lower degree polynomial
while n>1:
m = n-1
for i in range(N-n,N-m):
xi = x[i:i+m+1]
fi = f[i:i+m+1]
x_interval = np.linspace(x[i],x[i+1],3*m)
y_interval = p(x_interval,xi,fi)
plt.plot(x_interval, y_interval,'r')
n=n-1
plt.xlabel(r'$x$')
plt.ylabel(r'$p_n(x)$')
plt.show()
```
### Fenómeno de Runge
Por qué se se interpola por sub-intervalos? Cuando se tiene una gran cantidad de puntos conocidos, es posible interpolar un polinomio de grado alto. Sin embargo, el comportamiento del polinomio interpolado puede no ser el esperado (especialmente en los extremos del intervalo de interpolación) debido a la existencia de oscilaciones no controladas. A este comportamiento se le denomina el fenómeno de Runge.
Por ejemplo, para un conjunto de datos con $20$ puntos es posible interpolar un polinomio de orden $n=19$,
```
import numpy as np
import matplotlib.pyplot as plt
import lagrangeInterpolation as lagi
import sys
# Reading the data
data = np.loadtxt('data_points.txt', comments='#', delimiter=',')
x = data[:,0]
f = data[:,1]
N = len(x)
# Higher Degree polynomial to be interpolated
n = N-1
plt.figure(figsize=(7,5))
plt.title(f'Lagrange Polynomial Piecewise Interpolation n={n:.0f}')
plt.scatter(x, f, color='black')
#Interpolation of the higher degree polynomial
x_int = np.linspace(x[0],x[N-1],3*n)
y_int = lagi.p(x_int,x,f)
plt.plot(x_int, y_int,'r')
plt.xlabel(r'$x$')
plt.ylabel(r'$p_n(x)$')
plt.show()
```
Sin embargo, es claro que el comportamiento del polinomio interpolado no es bueno en los extremos del intervalo considerado. Por esta razón, es muy aconsejable utilizar una interpolación de polinomios de grado pequeño por sub-intervalos.
---
## Interpolación Cúbica de Hermite por Intervalos
La interpolación de Hermite es unc aso particular de interpolación polinomica que utiliza un conjunto de puntos conocidos en donde se conoce el valor de la función $f(x_j)$ y su derivada $f'(x_j)$. Al incorporar la primera derivada se pueden interpolar polinomios de un grado alto controlando las osiclaciones no deseadas. Adicionalmente, al conocer la primera derivada, se necesitan menos puntos para realizar la interpolación.
Dentro de este tipo de interpolación, la más utilizada es la de polinomios de tercer orden. DE esta forma, en un intervalo $[x_i , x_{i+1}]$, se requiere conocer (o evaluar) los valores de $f(x_i)$, $f(x_{i+1})$, $f'(x_i)$ y $f'(x_{i+1})$ para obtener el polinomio interpolado de Hermite cúbico,
\begin{equation}
H_3(x) = f(x_i)\psi_0(z) + f(x_{i+1})\psi_0(1-z)+ f'(x_i)(x_{i+1} - x_{i})\psi_1(z) - f'(x_{i+1})(x_{i+1}-x_i)\psi_1 (1-z),
\end{equation}
donde
\begin{equation}
z = \frac{x-x_i}{x_{i+1}-x_i}
\end{equation}
y
\begin{align}
\psi_0(z) =&2z^3 - 3z^2 + 1 \\
\psi_1(z) =&z^3-2z^2+z\,\,.
\end{align}
Nótese que con esta formulación, es posible interpolar un polinomio de tercer orden en un intervalo con solo dos puntos. De esta forma, al trabajar con un conjunto de muhos puntos, se podría interpolar un polinomio cúbico entre cada par de datos, incluso en el último sub-intervalo!
```
# %load HermiteInterpolation
'''
Eduard Larrañaga
Computational Astrophysics
2020
Hermite Interpolation Method
'''
import numpy as np
#Hermite Coefficients
def psi0(z):
'''
------------------------------------------
psi0(z)
------------------------------------------
Returns the Hermite coefficients Psi_0
for the interpolation
Receives as arguments: z
------------------------------------------
'''
psi_0 = 2*z**3 - 3*z**2 + 1
return psi_0
def psi1(z):
'''
------------------------------------------
psi1(z)
------------------------------------------
Returns the Hermite coefficients Psi_1 for
the interpolation
Receives as arguments: z
------------------------------------------
'''
psi_1 = z**3 - 2*z**2 + z
return psi_1
# Interpolated Polynomial
def H3(x, xi, fi, dfidx):
'''
------------------------------------------
H3(x, xi, fi, dfidx)
------------------------------------------
Returns the values of the Cubic Hermite
interpolated polynomial in a set of points
defined by x
x : array of points where the interpolated
polynomial will be evaluated
xi : array of 2 data points
fi : array of values of the function at xi
dfidx : array of values of the derivative
of the function at xi
------------------------------------------
'''
# variable z in the interpolation
z = (x - xi[0])/(xi[1] - x[0])
h1 = psi0(z) * fi[0]
h2 = psi0(1-z)*fi[1]
h3 = psi1(z)*(xi[1] - xi[0])*dfidx[0]
h4 = psi1(1-z)*(xi[1] - xi[0])*dfidx[1]
H = h1 + h2 + h3 - h4
return H
import numpy as np
import matplotlib.pyplot as plt
import HermiteInterpolation as heri
def Derivative(x, f):
'''
------------------------------------------
Derivative(x, f)
------------------------------------------
This function returns the numerical
derivative of a discretely-sample function
using one-side derivatives in the extreme
points of the interval and second order
accurate derivative in the middle points.
The data points may be evenly or unevenly
spaced.
------------------------------------------
'''
# Number of points
N = len(x)
dfdx = np.zeros([N, 2])
dfdx[:,0] = x
# Derivative at the extreme points
dfdx[0,1] = (f[1] - f[0])/(x[1] - x[0])
dfdx[N-1,1] = (f[N-1] - f[N-2])/(x[N-1] - x[N-2])
#Derivative at the middle points
for i in range(1,N-1):
h1 = x[i] - x[i-1]
h2 = x[i+1] - x[i]
dfdx[i,1] = h1*f[i+1]/(h2*(h1+h2)) - (h1-h2)*f[i]/(h1*h2) -\
h2*f[i-1]/(h1*(h1+h2))
return dfdx
# Loading the data
data = np.loadtxt('data_points.txt', comments='#', delimiter=',')
x = data[:,0]
f = data[:,1]
N = len(x)
# Calling the derivative function and chosing only the second column
dfdx = Derivative(x,f)[:,1]
plt.figure(figsize=(7,5))
plt.title(f'Cubic Hermite Polynomial Piecewise Interpolation')
plt.scatter(x, f, color='black')
# Piecewise Hermite Interpolation Loop
for i in range(N-1):
xi = x[i:i+2]
fi = f[i:i+2]
dfidx = dfdx[i:i+2]
x_interval = np.linspace(x[i],x[i+1],4)
y_interval = heri.H3(x_interval, xi, fi, dfidx)
plt.plot(x_interval, y_interval,'r')
plt.xlabel(r'$x$')
plt.ylabel(r'$H_3(x)$')
plt.show()
```
| true |
code
| 0.608827 | null | null | null | null |
|
## Deploy an ONNX model to an IoT Edge device using ONNX Runtime and the Azure Machine Learning

```
!python -m pip install --upgrade pip
!pip install azureml-core azureml-contrib-iot azure-mgmt-containerregistry azure-cli
!az extension add --name azure-cli-iot-ext
import os
print(os.__file__)
# Check core SDK version number
import azureml.core as azcore
print("SDK version:", azcore.VERSION)
```
## 1. Setup the Azure Machine Learning Environment
### 1a AML Workspace : using existing config
```
#Initialize Workspace
from azureml.core import Workspace
ws = Workspace.from_config()
```
### 1.2 AML Workspace : create a new workspace
```
#Initialize Workspace
from azureml.core import Workspace
### Change this cell from markdown to code and run this if you need to create a workspace
### Update the values for your workspace below
ws=Workspace.create(subscription_id="<subscription-id goes here>",
resource_group="<resource group goes here>",
name="<name of the AML workspace>",
location="<location>")
ws.write_config()
```
### 1.3 AML Workspace : initialize an existing workspace
Download the `config.json` file for your AML Workspace from the Azure portal
```
#Initialize Workspace
from azureml.core import Workspace
## existing AML Workspace in config.json
ws = Workspace.from_config('config.json')
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
```
## 2. Setup the trained model to use in this example
### 2.1 Register the trained model in workspace from the ONNX Model Zoo
```
import urllib.request
onnx_model_url = "https://onnxzoo.blob.core.windows.net/models/opset_8/tiny_yolov2/tiny_yolov2.tar.gz"
urllib.request.urlretrieve(onnx_model_url, filename="tiny_yolov2.tar.gz")
!tar xvzf tiny_yolov2.tar.gz
from azureml.core.model import Model
model = Model.register(workspace = ws,
model_path = "./tiny_yolov2/Model.onnx",
model_name = "Model.onnx",
tags = {"data": "Imagenet", "model": "object_detection", "type": "TinyYolo"},
description = "real-time object detection model from ONNX model zoo")
```
### 2.2 Load the model from your workspace model registry
For e.g. this could be the ONNX model exported from your training experiment
```
from azureml.core.model import Model
model = Model(name='Model.onnx', workspace=ws)
```
## 3. Create the application container image
This container is the IoT Edge module that will be deployed on the UP<sup>2</sup> device.
1. This container is using a pre-build base image for ONNX Runtime.
2. Includes a `score.py` script, Must include a `run()` and `init()` function. The `init()` is entrypoint that reads the camera frames from /device/video0. The `run()` function is a dummy module to satisfy AML-sdk checks.
3. `amlpackage_inference.py` script which is used to process the input frame and run the inference session and
4. the ONNX model, label file used by the ONNX Runtime
```
%%writefile score.py
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for
# full license information.
import sys
import time
import io
import csv
# Imports for inferencing
import onnxruntime as rt
from amlpackage_inference import run_onnx
import numpy as np
import cv2
# Imports for communication w/IOT Hub
from iothub_client import IoTHubModuleClient, IoTHubClientError, IoTHubTransportProvider
from iothub_client import IoTHubMessage, IoTHubMessageDispositionResult, IoTHubError
from azureml.core.model import Model
# Imports for the http server
from flask import Flask, request
import json
# Imports for storage
import os
# from azure.storage.blob import BlockBlobService, PublicAccess, AppendBlobService
import random
import string
import csv
from datetime import datetime
from pytz import timezone
import time
import json
class HubManager(object):
def __init__(
self,
protocol=IoTHubTransportProvider.MQTT):
self.client_protocol = protocol
self.client = IoTHubModuleClient()
self.client.create_from_environment(protocol)
# set the time until a message times out
self.client.set_option("messageTimeout", MESSAGE_TIMEOUT)
# Forwards the message received onto the next stage in the process.
def forward_event_to_output(self, outputQueueName, event, send_context):
self.client.send_event_async(
outputQueueName, event, send_confirmation_callback, send_context)
def send_confirmation_callback(message, result, user_context):
"""
Callback received when the message that we're forwarding is processed.
"""
print("Confirmation[%d] received for message with result = %s" % (user_context, result))
def get_tinyyolo_frame_from_encode(msg):
"""
Formats jpeg encoded msg to frame that can be processed by tiny_yolov2
"""
#inp = np.array(msg).reshape((len(msg),1))
#frame = cv2.imdecode(inp.astype(np.uint8), 1)
frame = cv2.cvtColor(msg, cv2.COLOR_BGR2RGB)
# resize and pad to keep input frame aspect ratio
h, w = frame.shape[:2]
tw = 416 if w > h else int(np.round(416.0 * w / h))
th = 416 if h > w else int(np.round(416.0 * h / w))
frame = cv2.resize(frame, (tw, th))
pad_value=114
top = int(max(0, np.round((416.0 - th) / 2)))
left = int(max(0, np.round((416.0 - tw) / 2)))
bottom = 416 - top - th
right = 416 - left - tw
frame = cv2.copyMakeBorder(frame, top, bottom, left, right,
cv2.BORDER_CONSTANT, value=[pad_value, pad_value, pad_value])
frame = np.ascontiguousarray(np.array(frame, dtype=np.float32).transpose(2, 0, 1)) # HWC -> CHW
frame = np.expand_dims(frame, axis=0)
return frame
def run(msg):
# this is a dummy function required to satisfy AML-SDK requirements.
return msg
def init():
# Choose HTTP, AMQP or MQTT as transport protocol. Currently only MQTT is supported.
PROTOCOL = IoTHubTransportProvider.MQTT
DEVICE = 0 # when device is /dev/video0
LABEL_FILE = "labels.txt"
MODEL_FILE = "Model.onnx"
global MESSAGE_TIMEOUT # setting for IoT Hub Manager
MESSAGE_TIMEOUT = 1000
LOCAL_DISPLAY = "OFF" # flag for local display on/off, default OFF
# Create the IoT Hub Manager to send message to IoT Hub
print("trying to make IOT Hub manager")
hub_manager = HubManager(PROTOCOL)
if not hub_manager:
print("Took too long to make hub_manager, exiting program.")
print("Try restarting IotEdge or this module.")
sys.exit(1)
# Get Labels from labels file
labels_file = open(LABEL_FILE)
labels_string = labels_file.read()
labels = labels_string.split(",")
labels_file.close()
label_lookup = {}
for i, val in enumerate(labels):
label_lookup[val] = i
# get model path from within the container image
model_path=Model.get_model_path(MODEL_FILE)
# Loading ONNX model
print("loading model to ONNX Runtime...")
start_time = time.time()
ort_session = rt.InferenceSession(model_path)
print("loaded after", time.time()-start_time,"s")
# start reading frames from video endpoint
cap = cv2.VideoCapture(DEVICE)
while cap.isOpened():
_, _ = cap.read()
ret, img_frame = cap.read()
if not ret:
print('no video RESETTING FRAMES TO 0 TO RUN IN LOOP')
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
continue
"""
Handles incoming inference calls for each fames. Gets frame from request and calls inferencing function on frame.
Sends result to IOT Hub.
"""
try:
draw_frame = img_frame
start_time = time.time()
# pre-process the frame to flatten, scale for tiny-yolo
frame = get_tinyyolo_frame_from_encode(img_frame)
# run the inference session for the given input frame
objects = run_onnx(frame, ort_session, draw_frame, labels, LOCAL_DISPLAY)
# LOOK AT OBJECTS AND CHECK PREVIOUS STATUS TO APPEND
num_objects = len(objects)
print("NUMBER OBJECTS DETECTED:", num_objects)
print("PROCESSED IN:",time.time()-start_time,"s")
if num_objects > 0:
output_IOT = IoTHubMessage(json.dumps(objects))
hub_manager.forward_event_to_output("inferenceoutput", output_IOT, 0)
continue
except Exception as e:
print('EXCEPTION:', str(e))
continue
```
### 3.1 Include the dependent packages required by the application scripts
```
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies()
myenv.add_pip_package("azure-iothub-device-client")
myenv.add_pip_package("numpy")
myenv.add_pip_package("opencv-python")
myenv.add_pip_package("requests")
myenv.add_pip_package("pytz")
myenv.add_pip_package("onnx")
with open("myenv.yml", "w") as f:
f.write(myenv.serialize_to_string())
```
### 3.2 Build the custom container image with the ONNX Runtime + OpenVINO base image
This step uses pre-built container images with ONNX Runtime and the different HW execution providers. A complete list of base images are located [here](https://github.com/microsoft/onnxruntime/tree/master/dockerfiles#docker-containers-for-onnx-runtime).
```
from azureml.core.image import ContainerImage
from azureml.core.model import Model
openvino_image_config = ContainerImage.image_configuration(execution_script = "score.py",
runtime = "python",
dependencies=["labels.txt", "amlpackage_inference.py"],
conda_file = "myenv.yml",
description = "TinyYolo ONNX Runtime inference container",
tags = {"demo": "onnx"})
# Use the ONNX Runtime + OpenVINO base image for Intel MovidiusTM USB sticks
openvino_image_config.base_image = "mcr.microsoft.com/azureml/onnxruntime:latest-openvino-myriad"
# For the Intel Movidius VAD-M PCIe card use this:
# openvino_image_config.base_image = "mcr.microsoft.com/azureml/onnxruntime:latest-openvino-vadm"
openvino_image = ContainerImage.create(name = "name-of-image",
# this is the model object
models = [model],
image_config = openvino_image_config,
workspace = ws)
# Alternative: Re-use an image that you have already built from the workspace image registry
# openvino_image = ContainerImage(name = "<name-of-image>", workspace = ws)
openvino_image.wait_for_creation(show_output = True)
if openvino_image.creation_state == 'Failed':
print("Image build log at: " + openvino_image.image_build_log_uri)
if openvino_image.creation_state != 'Failed':
print("Image URI at: " +openvino_image.image_location)
```
## 4. Deploy to the UP<sup>2</sup> device using Azure IoT Edge
### 4.1 Login with the Azure subscription to provision the IoT Hub and the IoT Edge device
```
!az login
!az account set --subscription $ws.subscription_id
# confirm the account
!az account show
```
### 4.2 Specify the IoT Edge device details
```
# Parameter list to configure the IoT Hub and the IoT Edge device
# Pick a name for what you want to call the module you deploy to the camera
module_name = "module-name-here"
# Resource group in Azure
resource_group_name= ws.resource_group
iot_rg=resource_group_name
# Azure region where your services will be provisioned
iot_location="location-here"
# Azure IoT Hub name
iot_hub_name="name-of-IoT-Hub"
# Pick a name for your camera
iot_device_id="name-of-IoT-Edge-device"
# Pick a name for the deployment configuration
iot_deployment_id="Infernce Module from AML"
```
### 4.2a Optional: Provision the IoT Hub, create the IoT Edge device and Setup the Intel UP<sup>2</sup> AI Vision Developer Kit
```
!az iot hub create --resource-group $resource_group_name --name $iot_hub_name --sku S1
# Register an IoT Edge device (create a new entry in the Iot Hub)
!az iot hub device-identity create --hub-name $iot_hub_name --device-id $iot_device_id --edge-enabled
!az iot hub device-identity show-connection-string --hub-name $iot_hub_name --device-id $iot_device_id
```
The following steps need to be executed in the device terminal
1. Open the IoT edge configuration file in UP<sup>2</sup> device to update the IoT Edge device *connection string*
`sudo nano /etc/iotedge/config.yaml`
provisioning:
source: "manual"
device_connection_string: "<ADD DEVICE CONNECTION STRING HERE>"
2. To update the DPS TPM provisioning configuration:
provisioning:
source: "dps"
global_endpoint: "https://global.azure-devices-provisioning.net"
scope_id: "{scope_id}"
attestation:
method: "tpm"
registration_id: "{registration_id}"
3. Save and close the file. `CTRL + X, Y, Enter
4. After entering the privisioning information in the configuration file, restart the *iotedge* daemon
`sudo systemctl restart iotedge`
5. We will show the object detection results from the camera connected (`/dev/video0`) to the UP<sup>2</sup> on the display. Update your .profile file:
`nano ~/.profile`
add the following line to the end of file
__xhost +__
### 4.3 Construct the deployment file
```
# create the registry uri
container_reg = ws.get_details()["containerRegistry"]
reg_name=container_reg.split("/")[-1]
container_url = "\"" + openvino_image.image_location + "\","
subscription_id = ws.subscription_id
print('{}'.format(openvino_image.image_location), "<-- this is the URI configured in the IoT Hub for the device")
print('{}'.format(reg_name))
print('{}'.format(subscription_id))
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.mgmt import containerregistry
client = ContainerRegistryManagementClient(ws._auth,subscription_id)
result= client.registries.list_credentials(resource_group_name, reg_name, custom_headers=None, raw=False)
username = result.username
password = result.passwords[0].value
```
#### Create the `deplpyment.json` with the AML image registry details
We have provided here a sample deployment template this reference implementation.
```
file = open('./AML-deployment.template.json')
contents = file.read()
contents = contents.replace('__AML_MODULE_NAME', module_name)
contents = contents.replace('__AML_REGISTRY_NAME', reg_name)
contents = contents.replace('__AML_REGISTRY_USER_NAME', username)
contents = contents.replace('__AML_REGISTRY_PASSWORD', password)
contents = contents.replace('__AML_REGISTRY_IMAGE_LOCATION', openvino_image.image_location)
with open('./deployment.json', 'wt', encoding='utf-8') as output_file:
output_file.write(contents)
```
### 4.4 Push the *deployment* to the IoT Edge device
```
print("Pushing deployment to IoT Edge device")
print ("Set the deployement")
!az iot edge set-modules --device-id $iot_device_id --hub-name $iot_hub_name --content deployment.json
```
### 4.5 Monitor IoT Hub Messages
```
!az iot hub monitor-events --hub-name $iot_hub_name -y
```
## 5. CLEANUP
```
!rm score.py deployment.json myenv.yml
```
| true |
code
| 0.529324 | null | null | null | null |
|
```
# While in argo environment: Import necessary packages for this notebook
import numpy as np
from matplotlib import pyplot as plt
import xarray as xr
import pandas as pd
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
%matplotlib inline
import glob
```
!python -m pip install "dask[complete]"
```
float_id = '9094' # '9094' '9099' '7652' '9125'
rootdir = '../data/raw/LowRes/'
fd = xr.open_mfdataset(rootdir + float_id + 'SOOCNQC.nc')
JULD = pd.to_datetime(fd.JULD.values)
```
#reads float data
file_folder = "../data/raw/WGfloats/"
#file_folder = "../../data/raw/LowRes"
float_number = "5904468" #7900918 #9094
files = sorted(glob.glob(file_folder+"/*"+float_number+"*.nc"))
print(files)
#files = sorted(glob.glob(file_folder+"/*.nc"))
fd = xr.open_mfdataset(file_folder+"/*"+float_number+"*.nc")
JULD = pd.to_datetime(fd.JULD.values)
```
fd
```
#help(xr.open_mfdataset)
rootdir + float_id + 'SOOCNQC.nc'
#Data/LowRes/9099SOOCNQC.nc
#fd
```
# HELPER FUNCTIONS
#define a function that smooths using a boxcar filter (running mean)
# not sure this function is actually used in the notebook??
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
#interpolate the data onto the standard depth grid given by x_int
def interpolate(x_int, xvals, yvals):
yvals_int = []
for n in range(0, len(yvals)): # len(yvals) = profile number
yvals_int.append(np.interp(x_int, xvals[n, :], yvals[n, :]))
#convert the interpolated data from a list to numpy array
return np.asarray(yvals_int)
# calculate the vertically integrated data column inventory using the composite trapezoidal rule
def integrate(zi, data, depth_range):
n_profs = len(data)
zi_start = abs(zi - depth_range[0]).argmin() # find location of start depth
zi_end = abs(zi - depth_range[1]).argmin() # find location of end depth
zi_struct = np.ones((n_profs, 1)) * zi[zi_start : zi_end] # add +1 to get the 200m value
data = data[:, zi_start : zi_end] # add +1 to get the 200m value
col_inv = []
for n in range(0, len(data)):
col_inv.append(np.trapz(data[n,:][~np.isnan(data[n,:])], zi_struct[n,:][~np.isnan(data[n,:])]))
return col_inv
#fd #(float data)
#fd.Pressure.isel(N_PROF=0).values
# Interpolate nitrate and poc
zi = np.arange(0, 1600, 5) # 5 = 320 depth intervals between 0m to 1595m
nitr_int = interpolate(zi, fd.Pressure[:, ::-1], fd.Nitrate[:, ::-1]) # interpolate nitrate values across zi depth intervals for all 188 profiles
# Integrate nitrate and poc - total nitrate in upper 200m
upperlim=25
lowerlim=200
nitr = np.array(integrate(zi, nitr_int, [upperlim, lowerlim])) # integrate interpolated nitrate values between 25m-200m
print(nitr)
#nitr.shape
# Find winter maximum and summer minimum upper ocean nitrate levels
def find_extrema(data, date_range, find_func):
# Find indices of float profiles in the date range
date_mask = (JULD > date_range[0]) & (JULD < date_range[1])
# Get the index where the data is closest to the find_func
index = np.where(data[date_mask] == find_func(data[date_mask]))[0][0]
# Get the average data for the month of the extrema
month_start = JULD[date_mask][index].replace(day = 1) # .replace just changes the day of max/min to 1
month_dates = (JULD > month_start) & (JULD < month_start + pd.Timedelta(days = 30))
# ^ not sure why this is needed? or what it does? - it is not used later on...
#month_avg = np.mean(data[date_mask]) #average whole winter or summer values
# ^ but it should be just the month of max/min nitrate,
# not the average for the whole season?...
month_mask = (JULD.month[date_mask] == month_start.month)
month_avg = np.mean(data[date_mask][month_mask])
return month_avg, JULD[date_mask][index], data[date_mask][index]
years = [2015, 2016, 2017, 2018]
nitr_extrema = []
nitr_ancp = []
for y in years:
winter_range = [pd.datetime(y, 8, 1), pd.datetime(y, 12, 1)] #4 months
summer_range = [pd.datetime(y, 12, 1), pd.datetime(y + 1, 4, 1)] #4 months
# Find maximum winter and minimum summer nitrate
avg_max_nitr, max_nitr_date, max_nitr = find_extrema(nitr, winter_range, np.max)
avg_min_nitr, min_nitr_date, min_nitr = find_extrema(nitr, summer_range, np.min)
# Convert to annual nitrate drawdown
redfield_ratio = 106.0/16.0 #106C:16NO3-
# Nitrate units: umol/kg --> divide by 1000 to convert to mol/kg
nitr_drawdown = (avg_max_nitr - avg_min_nitr)/1000.0 * redfield_ratio
nitr_ancp.append(nitr_drawdown)
nitr_extrema.append(((max_nitr, max_nitr_date), (min_nitr, min_nitr_date)))
print(y, max_nitr_date, max_nitr, avg_max_nitr)
print(y, min_nitr_date, min_nitr, avg_min_nitr)
# plot ANCP for chosen float over specified time period
fig, ax = plt.subplots(figsize = (10, 5))
ax.plot(years, nitr_ancp)
ax.set_ylabel('ANCP [mol/m$^2$]', size = 12)
ax.set_xticks(years)
ax.set_xticklabels(['2015', '2016', '2017', '2018'])
ax.set_title('ANCP for Float ' + float_id)
# Find winter maximum and summer minimum upper ocean nitrate levels
def find_extrema(data, date_range, find_func):
# Find indices of float profiles in the date range
date_mask = (JULD > date_range[0]) & (JULD < date_range[1])
# Get the index where the data is closest to the find_func
index = np.where(data[date_mask] == find_func(data[date_mask]))[0][0]
# Get the average data for the month of the extrema
month_start = JULD[date_mask][index].replace(day = 1) # .replace just changes the day of max/min to 1
month_dates = (JULD > month_start) & (JULD < month_start + pd.Timedelta(days = 30))
# ^ not sure why this is needed? or what it does? - it is not used later on...
month_avg = np.mean(data[date_mask]) #average whole winter or summer values
# ^ but it should be just the month of max/min nitrate,
# not the average for the whole season?...
# month_mask = (JULD.month[date_mask] == month_start.month)
# month_avg = np.mean(data[date_mask][month_mask])
return month_avg, JULD[date_mask][index], data[date_mask][index]
years = [2015, 2016, 2017, 2018]
nitr_extrema = []
nitr_ancp = []
for y in years:
winter_range = [pd.datetime(y, 8, 1), pd.datetime(y, 12, 1)]
summer_range = [pd.datetime(y, 12, 1), pd.datetime(y + 1, 4, 1)]
# Find maximum winter and minimum summer nitrate
avg_max_nitr, max_nitr_date, max_nitr = find_extrema(nitr, winter_range, np.max)
avg_min_nitr, min_nitr_date, min_nitr = find_extrema(nitr, summer_range, np.min)
# Convert to annual nitrate drawdown
redfield_ratio = 106.0/16.0 #106C:16NO3-
# Nitrate units: umol/kg --> divide by 1000 to convert to mol/kg
nitr_drawdown = (avg_max_nitr - avg_min_nitr)/1000.0 * redfield_ratio
nitr_ancp.append(nitr_drawdown)
nitr_extrema.append(((max_nitr, max_nitr_date), (min_nitr, min_nitr_date)))
print(y, max_nitr_date, max_nitr, avg_max_nitr)
print(y, min_nitr_date, min_nitr, avg_min_nitr)
# plot ANCP for chosen float over specified time period
fig, ax = plt.subplots(figsize = (10, 5))
ax.plot(years, nitr_ancp)
ax.set_ylabel('ANCP [mol/m$^2$]', size = 12)
ax.set_xticks(years)
ax.set_xticklabels(['2015', '2016', '2017', '2018'])
ax.set_title('ANCP for Float ' + float_id)
# Plot values of integrated nitrate (mol/m2)
fig, ax = plt.subplots(figsize = (20, 5))
# Integrate nitrate and poc between given depth range
zi_range = [25, 200]
nitr_v = np.array(integrate(zi, nitr_int, zi_range))/1000.0
# Function to mark the maximum/minimum values of the data for summer and winter
def add_extrema(ax, ydata, extrema):
for i in range(len(years)):
y = years[i]
winter_range = [pd.datetime(y, 8, 1), pd.datetime(y, 12, 1)]
summer_range = [pd.datetime(y, 12, 1), pd.datetime(y + 1, 4, 1)]
plt.axvspan(winter_range[0], winter_range[1], color='grey', alpha=0.1)
plt.axvspan(summer_range[0], summer_range[1], color='y', alpha=0.1)
(nmax, dmax), (nmin, dmin) = extrema[i]
nitr_vmax = ydata[JULD == dmax]
nitr_vmin = ydata[JULD == dmin]
ax.plot([dmax], nitr_vmax, color = 'g', marker='o', markersize=8)
ax.plot([dmin], nitr_vmin, color = 'r', marker='o', markersize=8)
return ax
#ax = plt.subplot(2, 1, 1)
ax.plot(JULD, nitr_v)
add_extrema(ax, nitr_v, nitr_extrema)
ax.set_ylabel('Nitrate [mol/$m^2$]')
ax.set_title('Integrated Nitrate (' + str(zi_range[0]) + '-' + str(zi_range[1]) + 'm)')
ax.set_ylim([4.5, ax.get_ylim()[1]])
```
| true |
code
| 0.547283 | null | null | null | null |
|
# Monte Carlo Simulations with Python (Part 1)
[Patrick Hanbury](https://towardsdatascience.com/monte-carlo-simulations-with-python-part-1-f5627b7d60b0)
- Notebook author: Israel Oliveira [\[e-mail\]](mailto:'Israel%20Oliveira%20'<[email protected]>)
```
%load_ext watermark
import numpy as np
import math
import random
from matplotlib import pyplot as plt
from IPython.display import clear_output, display, Markdown, Latex, Math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import scipy.integrate as integrate
from decimal import Decimal
import pandas as pd
PI = math.pi
e = math.e
# Run this cell before close.
%watermark
%watermark --iversion
%watermark -b -r -g
```
We want:
$I_{ab} = \int\limits_{a}^{b} f(x) dx ~~~(1)$
so, we could achive that with a avarage value of $f$:
$\hat{f}_{ab} = \frac{1}{b-a} \int\limits_{a}^{b} f(x) dx ~~~(2)$
```
def func(x):
return (x - 3) * (x - 5) * (x - 7) + 85
a, b = 2, 9 # integral limits
x = np.linspace(0, 10)
y = func(x)
fig, ax = plt.subplots()
ax.plot(x, y, 'r', linewidth=2)
ax.set_ylim(bottom=0)
# Make the shaded region
ix = np.linspace(a, b)
iy = func(ix)
verts = [(a, 0), *zip(ix, iy), (b, 0)]
poly = Polygon(verts, facecolor='0.9', edgecolor='0.5')
ax.add_patch(poly)
ax.text(0.5 * (a + b), 30, r"$\int_a^b f(x)\mathrm{d}x$",
horizontalalignment='center', fontsize=20)
fig.text(0.9, 0.05, '$x$')
fig.text(0.1, 0.9, '$y$')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks((a, b))
ax.set_xticklabels(('$a$', '$b$'))
ax.set_yticks([])
plt.axhline(90, xmin=0.225, xmax=0.87)
ax.text(0.5 * (a + b), 30, r"$\int_a^b f(x)\mathrm{d}x$",
horizontalalignment='center', fontsize=20)
plt.show()
```
With $(1)$ and $(2)$:
$\hat{f}_{ab} = \frac{1}{b-a} I $
$I = (b-a)\hat{f}_{ab} ~~~(3)$
Sampling $f(\cdot)$, it is possible to calculate a approximate value for $\hat{f}_{ab}$ (with a random variable $\mathbf{x}$):
$\mathbf{F}_{ab} = {f(\mathbf{x}) ~|~ \mathbf{x} ~\in~ [a, b]}$
The expectation for $\mathbf{F}_{ab}$ is:
$E[\mathbf{F}_{ab}] = \hat{f}_{ab}$
and concluding with
$I = E[\mathbf{F}_{ab}](b-a)$
So, how we could calculate $E[\mathbf{F}_{ab}]$? With $N$ uniform sampling of $x~\in~[a, b]$. If $N$ is large enough and $\mathbf{x}$ is uniform between $[a, b]$:
$ E[\mathbf{F}_{ab}] = \frac{1}{N} \sum\limits_{i}^N f(\mathbf{x}) ~|~ \mathbf{x} ~\in~ [a, b]$
and
$I = E[\mathbf{F}_{ab}](b-a) = \lim\limits_{N \rightarrow \infty} \frac{b-a}{N} \sum\limits_{i}^N f(\mathbf{x}) ~|~ \mathbf{x} ~\in~ [a, b] ~~~(4)$
This is the *Crude Monte Carlo*.
#### Example 1:
Calculate:
$I = \int\limits_{0}^{+\infty} \frac{e^{-x}}{(x-1)^2 + 1} dx ~~~(5)$
```
def get_rand_number(min_value, max_value):
"""
This function gets a random number from a uniform distribution between
the two input values [min_value, max_value] inclusively
Args:
- min_value (float)
- max_value (float)
Return:
- Random number between this range (float)
"""
range = max_value - min_value
choice = random.uniform(0,1)
return min_value + range*choice
def f_of_x(x):
"""
This is the main function we want to integrate over.
Args:
- x (float) : input to function; must be in radians
Return:
- output of function f(x) (float)
"""
return (e**(-1*x))/(1+(x-1)**2)
lower_bound = 0
upper_bound = 5
def crude_monte_carlo(num_samples=10000, lower_bound = 0, upper_bound = 5):
"""
This function performs the Crude Monte Carlo for our
specific function f(x) on the range x=0 to x=5.
Notice that this bound is sufficient because f(x)
approaches 0 at around PI.
Args:
- num_samples (float) : number of samples
Return:
- Crude Monte Carlo estimation (float)
"""
sum_of_samples = 0
for i in range(num_samples):
x = get_rand_number(lower_bound, upper_bound)
sum_of_samples += f_of_x(x)
return (upper_bound - lower_bound) * float(sum_of_samples/num_samples)
display(Math(r'I \approx {:.4f}, ~N = 10^4'.format(crude_monte_carlo())))
display(Math(r'\left . f(a) \right |_{a=0} \approx '+r'{:.4f} '.format(f_of_x(lower_bound))))
display(Math(r'\left . f(b) \right |_{b=5} \approx '+r'{:.4f} '.format(f_of_x(upper_bound))))
```
Why $b=5$?
$ \lim\limits_{x \rightarrow +\infty} \frac{e^{-x}}{(x-1)^2 + 1} \rightarrow 0 $
We could consider $0.0004 ~\approx~ 0$.
If $b = 10$?
```
upper_bound = 10
display(Math(r'\left . f(b) \right |_{b=5} \approx '+r'{:.6f}'.format(f_of_x(upper_bound))+'= 10^{-6}'))
display(Math(r'I \approx {:.4f}, ~N = 10^5 '.format(crude_monte_carlo(num_samples=100000, upper_bound = 10))))
plt.figure()
def func(x):
return f_of_x(x)
a, b = 0, 5 # integral limits
x = np.linspace(0, 6)
y = func(x)
fig, ax = plt.subplots()
ax.plot(x, y, 'r', linewidth=2)
ax.set_ylim(bottom=0)
# Make the shaded region
ix = np.linspace(a, b)
iy = func(ix)
verts = [(a, 0), *zip(ix, iy), (b, 0)]
poly = Polygon(verts, facecolor='0.9', edgecolor='0.5')
ax.add_patch(poly)
ax.text(0.2 * (a + b), 0.05, r"$\int_a^b f(x)\mathrm{d}x$",
horizontalalignment='center', fontsize=20)
fig.text(0.9, 0.05, '$x$')
fig.text(0.1, 0.9, '$y$')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks((a, b))
ax.set_xticklabels(('$a$', '$b$'))
ax.set_yticks([])
ax.axhline(0.2*crude_monte_carlo(),color='b', xmin=0.051, xmax=0.81)
iy = iy*0+0.2*crude_monte_carlo()
verts = [(a, 0), *zip(ix, iy), (b, 0)]
poly = Polygon(verts, facecolor='0.94', edgecolor='0.99')
ax.add_patch(poly)
ax.text(5,0.2*crude_monte_carlo()+0.03 , r"$\hat{f}_{ab}$",
horizontalalignment='center', fontsize=20)
plt.show()
```
Comparing with [Integration (scipy.integrate)](https://docs.scipy.org/doc/scipy/reference/tutorial/integrate.html).
```
results = integrate.quad(lambda x: f_of_x(x), lower_bound, upper_bound)
tmp = (Decimal(results[1]).as_tuple().digits[0], Decimal(results[1]).as_tuple().exponent + len(Decimal(results[1]).as_tuple().digits) -1)
display(Math(r'I_{\text{SciPy}} = '+r'{:.4f}, ~e \approx {}'.format(results[0],tmp[0])+r'\cdot 10^{'+'{}'.format(tmp[1])+r'}'))
diff = []
for _ in range(100):
diff.append(crude_monte_carlo(num_samples=100000, upper_bound = 10)-results[0])
df = pd.DataFrame([abs(x) for x in diff], columns=['$I- I_{\text{SciPy}}$'])
display(df.describe())
df.plot(grid = True)
df = pd.DataFrame([abs(x)/results[0] for x in diff], columns=['$(I- I_{\text{SciPy}})/I_{\text{SciPy}}$'])
display(df.describe())
df.plot(grid = True)
```
Confirm the estimated error with variance.
```
def get_crude_MC_variance(num_samples = 10000, upper_bound = 5):
"""
This function returns the variance fo the Crude Monte Carlo.
Note that the inputed number of samples does not neccissarily
need to correspond to number of samples used in the Monte
Carlo Simulation.
Args:
- num_samples (int)
Return:
- Variance for Crude Monte Carlo approximation of f(x) (float)
"""
int_max = upper_bound # this is the max of our integration range
# get the average of squares
running_total = 0
for i in range(num_samples):
x = get_rand_number(0, int_max)
running_total += f_of_x(x)**2
sum_of_sqs = running_total*int_max / num_samples
# get square of average
running_total = 0
for i in range(num_samples):
x = get_rand_number(0, int_max)
running_total = f_of_x(x)
sq_ave = (int_max*running_total/num_samples)**2
return sum_of_sqs - sq_ave
s1 = get_crude_MC_variance()
"{:.4f}".format(s1)
s2 = get_crude_MC_variance(100000,10)
"{:.4f}".format(s2)
math.sqrt(s1 / 10000)
df.describe().loc['mean'].to_list()[0]
```
### Importance Sampling
```
# this is the template of our weight function g(x)
def g_of_x(x, A, lamda):
e = 2.71828
return A*math.pow(e, -1*lamda*x)
def inverse_G_of_r(r, lamda):
return (-1 * math.log(float(r)))/lamda
def get_IS_variance(lamda, num_samples):
"""
This function calculates the variance if a Monte Carlo
using importance sampling.
Args:
- lamda (float) : lamdba value of g(x) being tested
Return:
- Variance
"""
A = lamda
int_max = 5
# get sum of squares
running_total = 0
for i in range(num_samples):
x = get_rand_number(0, int_max)
running_total += (f_of_x(x)/g_of_x(x, A, lamda))**2
sum_of_sqs = running_total / num_samples
# get squared average
running_total = 0
for i in range(num_samples):
x = get_rand_number(0, int_max)
running_total += f_of_x(x)/g_of_x(x, A, lamda)
sq_ave = (running_total/num_samples)**2
return sum_of_sqs - sq_ave
# get variance as a function of lambda by testing many
# different lambdas
test_lamdas = [i*0.05 for i in range(1, 61)]
variances = []
for i, lamda in enumerate(test_lamdas):
print(f"lambda {i+1}/{len(test_lamdas)}: {lamda}")
A = lamda
variances.append(get_IS_variance(lamda, 10000))
clear_output(wait=True)
optimal_lamda = test_lamdas[np.argmin(np.asarray(variances))]
IS_variance = variances[np.argmin(np.asarray(variances))]
print(f"Optimal Lambda: {optimal_lamda}")
print(f"Optimal Variance: {IS_variance}")
print(f"Error: {(IS_variance/10000)**0.5}")
def importance_sampling_MC(lamda, num_samples):
A = lamda
running_total = 0
for i in range(num_samples):
r = get_rand_number(0,1)
running_total += f_of_x(inverse_G_of_r(r, lamda=lamda))/g_of_x(inverse_G_of_r(r, lamda=lamda), A, lamda)
approximation = float(running_total/num_samples)
return approximation
# run simulation
num_samples = 10000
approx = importance_sampling_MC(optimal_lamda, num_samples)
variance = get_IS_variance(optimal_lamda, num_samples)
error = (variance/num_samples)**0.5
# display results
print(f"Importance Sampling Approximation: {approx}")
print(f"Variance: {variance}")
print(f"Error: {error}")
display(Math(r'(I_{IS} - I_{\text{SciPy}})/I_{\text{SciPy}} = '+'{:.4}\%'.format(100*abs((approx-results[0])/results[0]))))
```
| true |
code
| 0.665247 | null | null | null | null |
|
```
import sys; sys.path.append('../rrr')
from multilayer_perceptron import *
from figure_grid import *
from local_linear_explanation import *
from toy_colors import generate_dataset, imgshape, ignore_rule1, ignore_rule2, rule1_score, rule2_score
import lime
import lime.lime_tabular
```
# Toy Color Dataset
This is a simple, two-class image classification dataset with two independent ways a model could learn to distinguish between classes. The first is whether all four corner pixels are the same color, and the second is whether the top-middle three pixels are all different colors. Images in class 1 satisfy both conditions and images in class 2 satisfy neither. See `color_dataset_generator` for more details.
We will train a multilayer perceptron to classify these images, explore which rule(s) it implicitly learns, and constrain it to use only one rule (or neither).
Let's first load our dataset:
```
X, Xt, y, yt = generate_dataset(cachefile='../data/toy-colors.npz')
E1 = np.array([ignore_rule2 for _ in range(len(y))])
E2 = np.array([ignore_rule1 for _ in range(len(y))])
print(X.shape, Xt.shape, y.shape, yt.shape, E1.shape, E2.shape)
```
## Understanding the Dataset
Let's just examine images from each class quickly and verify that in class 1, the corners are all the same color and the top-middle three pixels are all different (none of which should hold true in class 2):
```
plt.subplot(121)
plt.title('Class 1')
image_grid(X[np.argwhere(y == 0)[:9]], (5,5,3), 3)
plt.subplot(122)
plt.title('Class 2')
image_grid(X[np.argwhere(y == 1)[:9]], (5,5,3), 3)
plt.show()
```
Great.
## Explaining and learning diverse classifiers
Now let's see if we can train our model to implicitly learn each rule:
```
def explain(model, title='', length=4):
plt.title(title)
explanation_grid(model.grad_explain(Xt[:length*length]), imgshape, length)
# Train a model without any constraints
mlp_plain = MultilayerPerceptron()
mlp_plain.fit(X, y)
mlp_plain.score(Xt, yt)
# Train a model constrained to use the first rule
mlp_rule1 = MultilayerPerceptron(l2_grads=1000)
mlp_rule1.fit(X, y, E1)
mlp_rule1.score(Xt, yt)
# Train a model constrained to use the second rule
mlp_rule2 = MultilayerPerceptron(l2_grads=1000)
mlp_rule2.fit(X, y, E2)
mlp_rule2.score(Xt, yt)
# Visualize largest weights
with figure_grid(1,3, rowwidth=8) as g:
g.next()
explain(mlp_plain, 'No annotations')
g.next()
explain(mlp_rule1, '$A$ penalizing top middle')
g.next()
explain(mlp_rule2, '$A$ penalizing corners')
```
Notice that when we explicitly penalize corners or the top middle, the model appears to learn the _other_ rule perfectly. We haven't identified the pixels it does treat as significant in any way, but they are significant, so the fact that they show up in the explanations means that the explanation is probably an accurate reflection of the model's implicit logic.
When we don't have any annotations, the model does identify the top-middle pixels occasionally, suggesting it defaults to learning a heavily but not completely corner-weighted combination of the rules.
What happens when we forbid it from using either rule?
```
mlp_neither = MultilayerPerceptron(l2_grads=1e6)
mlp_neither.fit(X, y, E1 + E2)
mlp_neither.score(Xt, yt)
explain(mlp_neither, '$A$ biased against all relevant features')
plt.show()
```
As we might expect, accuracy goes down and we start identifying random pixels as significant.
## Find-another-explanation
Let's now pretend we have no knowledge of what $A$ _should_ be for this dataset. Can we still train models that use diverse rules just by examining explanations?
```
A1 = mlp_plain.largest_gradient_mask(X)
mlp_fae1 = MultilayerPerceptron(l2_grads=1000)
mlp_fae1.fit(X, y, A1)
mlp_fae1.score(Xt, yt)
explain(mlp_fae1, '$A$ biased against first model')
plt.show()
```
Excellent. When we train a model to have small gradients where the $A=0$ model has large ones, we reproduce the top middle rule, though in some cases we learn a hybrid of the two. Now let's train another model to be different from either one. Note: I'm going to iteratively increase the L2 penalty until I get explanation divergence. I'm doing this manually now but it could easily be automated.
```
A2 = mlp_fae1.largest_gradient_mask(X)
mlp_fae2 = MultilayerPerceptron(l2_grads=1e6)
mlp_fae2.fit(X, y, A1 + A2)
mlp_fae2.score(Xt, yt)
explain(mlp_fae2, '$A$ biased against models 1 and 2')
plt.show()
```
When we run this twice, we get low accuracy and random gradient placement. Let's visualize this all together:
```
gridsize = (2,3)
plt.subplot2grid(gridsize, (0,0))
explain(mlp_plain, r'$M_{0.67}\left[ f_X|\theta_0 \right]$', 4)
plt.subplot2grid(gridsize, (0,1))
explain(mlp_fae1, r'$M_{0.67}\left[ f_X|\theta_1 \right]$', 4)
plt.subplot2grid(gridsize, (0,2))
explain(mlp_fae2, r'$M_{0.67}\left[ f_X|\theta_2 \right]$', 4)
plt.subplot2grid(gridsize, (1,0), colspan=3)
plt.axhline(1, color='red', ls='--')
test_scores = [mlp_plain.score(Xt, yt), mlp_fae1.score(Xt, yt), mlp_fae2.score(Xt, yt)]
train_scores = [mlp_plain.score(X, y), mlp_fae1.score(X, y), mlp_fae2.score(X, y)]
plt.plot([0,1,2], train_scores, marker='^', label='Train', alpha=0.5, color='blue', markersize=10)
plt.plot([0,1,2], test_scores, marker='o', label='Test', color='blue')
plt.xlim(-0.5, 2.5)
plt.ylim(0.4, 1.05)
plt.ylabel(' Accuracy')
plt.xlabel('Find-another-explanation iteration')
plt.legend(loc='best', fontsize=10)
plt.xticks([0,1,2])
plt.show()
```
So this more or less demonstrates the find-another-explanation method on the toy color dataset.
## Transitions between rules
Separately, I ran a script to train many MLPs on this dataset, all biased against using corners, but with varying numbers of annotations in $A$ and varying L2 penalties. Let's see if we can find any transition behavior between these two rules:
```
import pickle
n_vals = pickle.load(open('../data/color_n_vals.pkl', 'rb'))
n_mlps = pickle.load(open('../data/color_n_mlps.pkl', 'rb'))
l2_vals = pickle.load(open('../data/color_l2_vals.pkl', 'rb'))
l2_mlps = pickle.load(open('../data/color_l2_mlps.pkl', 'rb'))
def realize(mlp_params):
return [MultilayerPerceptron.from_params(p) for p in mlp_params]
l2_rule1_scores = [rule1_score(mlp, Xt[:1000]) for mlp in realize(l2_mlps)]
l2_rule2_scores = [rule2_score(mlp, Xt[:1000]) for mlp in realize(l2_mlps)]
l2_acc_scores = [mlp.score(Xt[:1000], yt[:1000]) for mlp in realize(l2_mlps)]
n_rule1_scores = [rule1_score(mlp, Xt[:1000]) for mlp in realize(n_mlps)]
n_rule2_scores = [rule2_score(mlp, Xt[:1000]) for mlp in realize(n_mlps)]
n_acc_scores = [mlp.score(Xt[:1000], yt[:1000]) for mlp in realize(n_mlps)]
plt.figure(figsize=(8,4))
plt.subplot(121)
plt.plot(l2_vals, l2_rule1_scores, 'o', label='Corners', marker='^')
plt.plot(l2_vals, l2_rule2_scores, 'o', label='Top mid.')
plt.plot(l2_vals, l2_acc_scores, label='Accuracy')
plt.title('Effect of $\lambda_1$ on implicit rule (full $A$)')
plt.ylabel(r'Mean % $M_{0.67}\left[f_X\right]$ in corners / top middle')
plt.ylim(0,1.1)
plt.xscale("log")
plt.yticks([])
plt.xlim(0,1000)
plt.legend(loc='best', fontsize=10)
plt.xlabel(r'$\lambda_1$ (explanation L2 penalty)')
plt.subplot(122)
plt.plot(n_vals, n_rule1_scores, 'o', label='Corners', marker='^')
plt.plot(n_vals, n_rule2_scores, 'o', label='Top mid.')
plt.plot(n_vals, n_acc_scores, label='Accuracy')
plt.xscale('log')
plt.ylim(0,1.1)
plt.xlim(0,10000)
plt.legend(loc='best', fontsize=10)
plt.title('Effect of $A$ on implicit rule ($\lambda_1=1000$)')
plt.xlabel('Number of annotations (nonzero rows of $A$)')
plt.tight_layout()
plt.show()
```
Cool. So we can definitely see a clear transition effect between rules.
## Comparison with LIME
Although we have some pretty clear evidence that gradient explanations are descriptive for our MLP on this simple dataset, let's make sure LIME produces similar results. We'll also do a very basic benchmark to see how long each of the respective methods take.
```
explainer = lime.lime_tabular.LimeTabularExplainer(
Xt,
feature_names=list(range(len(Xt[0]))),
class_names=[0,1])
import time
t1 = time.clock()
lime_explanations = [
explainer.explain_instance(Xt[i], mlp_plain.predict_proba, top_labels=1)
for i in range(25)
]
t2 = time.clock()
input_grads = mlp_plain.input_gradients(Xt[:25])
t3 = time.clock()
print('LIME took {:.6f}s/example'.format((t2-t1)/25.))
print('grads took {:.6f}s/example, which is {:.0f}x faster'.format((t3-t2)/25., (t2-t1)/float(t3-t2)))
preds = mlp_plain.predict(Xt[:25])
lime_exps = [LocalLinearExplanation.from_lime(Xt[i], preds[i], lime_explanations[i]) for i in range(25)]
grad_exps = [LocalLinearExplanation(Xt[i], preds[i], input_grads[i]) for i in range(25)]
plt.subplot(121)
plt.title('LIME', fontsize=16)
explanation_grid(lime_exps, imgshape, 3)
plt.subplot(122)
plt.title(r'$M_{0.67}\left[f_X\right]$', fontsize=16)
explanation_grid(grad_exps, imgshape, 3)
plt.show()
```
So our explanation methods agree somewhat closely, which is good to see. Also, gradients are significantly faster.
## Learning from less data
Do explanations allow our model to learn with less data? Separately, we trained many models on increasing fractions of the dataset with different annotations; some penalizing the corners/top-middle and some penalizing everything but the corners/top-middle. Let's see how each version of the model performs:
```
import pickle
data_counts = pickle.load(open('../data/color_data_counts.pkl', 'rb'))
normals_by_count = pickle.load(open('../data/color_normals_by_count.pkl', 'rb'))
pro_r1s_by_count = pickle.load(open('../data/color_pro_r1s_by_count.pkl', 'rb'))
pro_r2s_by_count = pickle.load(open('../data/color_pro_r2s_by_count.pkl', 'rb'))
anti_r1s_by_count = pickle.load(open('../data/color_anti_r1s_by_count.pkl', 'rb'))
anti_r2s_by_count = pickle.load(open('../data/color_anti_r2s_by_count.pkl', 'rb'))
def score_all(ms):
return [m.score(Xt,yt) for m in realize(ms)]
def realize(mlp_params):
return [MultilayerPerceptron.from_params(p) for p in mlp_params]
sc_normal = score_all(normals_by_count)
sc_pro_r1 = score_all(pro_r1s_by_count)
sc_pro_r2 = score_all(pro_r2s_by_count)
sc_anti_r1 = score_all(anti_r1s_by_count)
sc_anti_r2 = score_all(anti_r2s_by_count)
from matplotlib import ticker
def plot_A(A):
plt.gca().set_xticks([])
plt.gca().set_yticks([])
plt.imshow((A[0].reshape(5,5,3) * 255).astype(np.uint8), interpolation='none')
for i in range(5):
for j in range(5):
if A[0].reshape(5,5,3)[i][j][0]:
plt.text(j,i+0.025,'1',ha='center',va='center',fontsize=8)
else:
plt.text(j,i+0.025,'0',ha='center',va='center',color='white',fontsize=8)
gridsize = (4,9)
plt.figure(figsize=(10,5))
cs=3
plt.subplot2grid(gridsize, (0,2*cs))
plt.title('Pro-Rule 1')
plot_A(~E2)
plt.subplot2grid(gridsize, (1,2*cs))
plt.title('Pro-Rule 2')
plot_A(~E1)
plt.subplot2grid(gridsize, (2,2*cs))
plt.title('Anti-Rule 1')
plot_A(E2)
plt.subplot2grid(gridsize, (3,2*cs))
plt.title('Anti-Rule 2')
plot_A(E1)
plt.subplot2grid(gridsize, (0,0), rowspan=4, colspan=cs)
plt.title('Learning Rule 1 with $A$')
plt.errorbar(data_counts, sc_normal, label=r'Normal', lw=2)
plt.errorbar(data_counts, sc_pro_r1, label=r'Pro-Rule 1', marker='H')
plt.errorbar(data_counts, sc_anti_r2, label=r'Anti-Rule 2', marker='^')
plt.xscale('log')
plt.ylim(0.5,1)
plt.ylabel('Test Accuracy')
plt.xlabel('# Training Examples')
plt.legend(loc='best', fontsize=10)
plt.gca().xaxis.set_major_formatter(ticker.FormatStrFormatter("%d"))
plt.gca().set_xticks([10,100,1000])
plt.subplot2grid(gridsize, (0,cs), rowspan=4, colspan=cs)
plt.title('Learning Rule 2 with $A$')
plt.gca().set_yticklabels([])
plt.errorbar(data_counts, sc_normal, label=r'Normal', lw=2)
plt.errorbar(data_counts, sc_pro_r2, label=r'Pro-Rule 2', marker='H')
plt.errorbar(data_counts, sc_anti_r1, label=r'Anti-Rule 1', marker='^')
plt.xscale('log')
plt.ylim(0.5,1)
plt.xlabel('# Training Examples')
plt.legend(loc='best', fontsize=10)
plt.gca().xaxis.set_major_formatter(ticker.FormatStrFormatter("%d"))
plt.show()
def improvement_over_normal(scores, cutoff):
norm = data_counts[next(i for i,val in enumerate(sc_normal) if val > cutoff)]
comp = data_counts[next(i for i,val in enumerate(scores) if val > cutoff)]
return norm / float(comp)
def print_improvement(name, scores, cutoff):
print('Extra data for normal model to reach {:.2f} accuracy vs. {}: {:.2f}'.format(
cutoff, name, improvement_over_normal(scores, cutoff)))
print_improvement('Anti-Rule 2', sc_anti_r2, 0.8)
print_improvement('Anti-Rule 2', sc_anti_r2, 0.9)
print_improvement('Anti-Rule 2', sc_anti_r2, 0.95)
print_improvement('Anti-Rule 2', sc_anti_r2, 0.99)
print('')
print_improvement('Pro-Rule 1', sc_pro_r1, 0.8)
print_improvement('Pro-Rule 1', sc_pro_r1, 0.9)
print_improvement('Pro-Rule 1', sc_pro_r1, 0.95)
print_improvement('Pro-Rule 1', sc_pro_r1, 0.99)
print('')
print_improvement('Pro-Rule 2', sc_pro_r2, 0.9)
print_improvement('Pro-Rule 2', sc_pro_r2, 0.95)
print_improvement('Pro-Rule 2', sc_pro_r2, 0.97)
print('')
print_improvement('Anti-Rule 1', sc_anti_r1, 0.7)
print_improvement('Anti-Rule 1', sc_anti_r1, 0.8)
print_improvement('Anti-Rule 1', sc_anti_r1, 0.9)
```
Generally, we learn better classifiers with less data using explanations (especially in the Pro-Rule 1 case, where we provide the most information). Biasing against the top-middle or against everything but the corners / top-middle tends to give us more accurate classifiers. Biasing against the corners, however, gives us _lower_ accuracy until we obtain more examples. This may be because it's an inherently harder rule to learn; there are only 4 ways that all corners can match, but $4*3*2=24$ ways the top-middle pixels can differ.
## Investigating cutoffs
We chose a 0.67 cutoff for most of our training a bit arbitrarily, so let's just investigate that briefly:
```
def M(input_gradients, cutoff=0.67):
return np.array([np.abs(e) > cutoff*np.abs(e).max() for e in input_gradients]).astype(int).ravel()
grads = mlp_plain.input_gradients(Xt)
grads2 = mlp_rule1.input_gradients(Xt)
cutoffs = np.linspace(0,1,100)
cutoff_pcts = np.array([M(grads, c).sum() / float(len(grads.ravel())) for c in cutoffs])
cutoff_pcts2 = np.array([M(grads2, c).sum() / float(len(grads2.ravel())) for c in cutoffs])
plt.plot(cutoffs, cutoff_pcts, label='$A=0$')
plt.plot(cutoffs, cutoff_pcts2, label='$A$ against corners')
plt.legend(loc='best')
plt.xlabel('Cutoff')
plt.ylabel('Mean fraction of qualifying gradient entries')
plt.yticks(np.linspace(0,1,21))
plt.yscale('log')
plt.axhline(0.06, ls='--', c='red')
plt.axvline(0.67, ls='--', c='blue')
plt.title('-- Toy color dataset- --\n# qualifying entries falls exponentially\n0.67 cutoff takes the top ~6%')
plt.show()
```
On average, the number of elements we keep falls exponentially without any clear kink in the curve, so perhaps our arbitrariness is justified, though it's problematic that it exists in the first place.
| true |
code
| 0.60013 | null | null | null | null |
|
```
import os
import re
import torch
import pickle
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
tqdm.pandas()
```
# 1. Pre-processing
### Create a combined dataframe
> This creates a dataframe containing the image IDs & labels for both original images provided by the Bristol Myers Squibb pharmaceutical company, and the augmentations generated per each original image.
```
train_df = pd.read_csv('../../../../../../../../../Downloads/train/train_labels.csv')
```
### InChI pre-processing
> This, firstly, splits the first part of the InChI string (the chemical formula) into sequences of text and numbers. Secondly, this splits the second part of the InChI string (the other layers) into sequences of text and numbers.
```
def split_inchi_formula(formula: str) -> str:
"""
This function splits the chemical formula (in the first layer of InChI)
into its separate element and number components.
:param formula: chemical formula, e.g. C13H20OS
:type formula: string
:return: splitted chemical formula
:rtype: string
"""
string = ''
# for each chemical element in the formula
for i in re.findall(r"[A-Z][^A-Z]*", formula):
# return each separate element, i.e. text
elem = re.match(r"\D+", i).group()
# return each separate number
num = i.replace(elem, "")
# add either the element or both element and number (space-separated) to the string
if num == "":
string += f"{elem} "
else:
string += f"{elem} {str(num)} "
return string.rstrip(' ')
def split_inchi_layers(layers: str) -> str:
"""
This function splits the layers (following the first layer of InChI)
into separate element and number components.
:param layers: layer string, e.g. c1-9(2)8-15-13-6-5-10(3)7-12(13)11(4)14/h5-7,9,11,14H,8H2,1-4H3
:type layers: string
:return: splitted layer info
:rtype: string
"""
string = ''
# for each layer in layers
for i in re.findall(r"[a-z][^a-z]*", layers):
# get the character preceding the layer info
elem = i[0]
# get the number string succeeding the character
num = i.replace(elem, "").replace("/", "")
num_string = ''
# for each number string
for j in re.findall(r"[0-9]+[^0-9]*", num):
# get the list of numbers
num_list = list(re.findall(r'\d+', j))
# get the first number
_num = num_list[0]
# add the number string to the overall result
if j == _num:
num_string += f"{_num} "
else:
extra = j.replace(_num, "")
num_string += f"{_num} {' '.join(list(extra))} "
string += f"/{elem} {num_string}"
return string.rstrip(' ')
```
### Tokenize texts and predict captions
> This tokenizes each text by converting it to a sequence of characters. Backward compatibility is also maintained, i.e. sequence to text conversion. Image caption predictions also take place within the Tokenizer class.
```
class Tokenizer(object):
def __init__(self):
# string to integer mapping
self.stoi = {}
# integer to string mapping
self.itos = {}
def __len__(self) -> None:
"""
This method returns the length of token:index map.
:return: length of map
:rtype: int
"""
# return the length of the map
return len(self.stoi)
def fit_on_texts(self, texts: list) -> None:
"""
This method creates a vocabulary of all tokens contained in provided texts,
and updates the mapping of token to index, and index to token.
:param texts: list of texts
:type texts: list
"""
# create a storage for all tokens
vocab = set()
# add tokens from each text to vocabulary
for text in texts:
vocab.update(text.split(' '))
# sort the vocabulary in alphabetical order
vocab = sorted(vocab)
# add start, end and pad for sentence
vocab.append('<sos>')
vocab.append('<eos>')
vocab.append('<pad>')
# update the string to integer mapping, where integer is the index of the token
for i, s in enumerate(vocab):
self.stoi[s] = i
# reverse the previous vocabulary to create integer to string mapping
self.itos = {item[1]: item[0] for item in self.stoi.items()}
def text_to_sequence(self, text: str) -> list:
"""
This method converts the given text to a list of its individual tokens,
including start and end of string symbols.
:param text: input textual data
:type text: str
:return: list of tokens
:rtype: list
"""
# storage to append symbols to
sequence = []
# add the start of string symbol to storage
sequence.append(self.stoi['<sos>'])
# add each token in text to storage
for s in text.split(' '):
sequence.append(self.stoi[s])
# add the end of string symbol to storage
sequence.append(self.stoi['<eos>'])
return sequence
def texts_to_sequences(self, texts: list) -> list:
"""
This method converts each text in the provided list into sequences of characters.
Each sequence is appended to a list and the said list is returned.
:param texts: a list of input texts
:type texts: list
:return: a list of sequences
:rtype: list
"""
# storage to append sequences to
sequences = []
# for each text do
for text in texts:
# convert the text to a list of characters
sequence = self.text_to_sequence(text)
# append the lists of characters to an aggregated list storage
sequences.append(sequence)
return sequences
def sequence_to_text(self, sequence: list) -> str:
"""
This method converts the sequence of characters back into text.
:param sequence: list of characters
:type sequence: list
:return: text
:rtype: str
"""
# join the characters with no space in between
return ''.join(list(map(lambda i: self.itos[i], sequence)))
def sequences_to_texts(self, sequences: list) -> list:
"""
This method converts each provided sequence into text and returns all texts inside a list.
:param sequences: list of character sequences
:type sequences: list
:return: list of texts
:rtype: list
"""
# storage for texts
texts = []
# convert each sequence to text and append to storage
for sequence in sequences:
text = self.sequence_to_text(sequence)
texts.append(text)
return texts
def predict_caption(self, sequence: list) -> str:
"""
This method predicts the caption by adding each symbol in sequence to a resulting string.
This keeps happening up until the end of sentence or padding is met.
:param sequence: list of characters
:type sequence: list
:return: image caption
:rtype: string
"""
# storage for the final caption
caption = ''
# for each index in a sequence of symbols
for i in sequence:
# if symbol is the end of sentence or padding, break
if i == self.stoi['<eos>'] or i == self.stoi['<pad>']:
break
# otherwise, add the symbol to the final caption
caption += self.itos[i]
return caption
def predict_captions(self, sequences: list) -> list:
"""
This method predicts the captions for each sequence in a list of sequences.
:param sequences: list of sequences
:type sequences: list
:return: list of final image captions
:rtype: list
"""
# storage for captions
captions = []
# for each sequence, do
for sequence in sequences:
# predict the caption per sequence
caption = self.predict_caption(sequence)
# append to the storage of captions
captions.append(caption)
return captions
# split the InChI string with the backslash delimiter
train_df['InChI_chemical_formula'] = train_df['InChI'].apply(lambda x: x.split('/')[1])
```
### Pre-process
> This performs all preprocessing steps, mainly: (1) converting InChI string to space separated list of elements,
(2) tokenizing the InChI string by creating lists of elements, and (3) computing the actual lengths of each such list. The results are returned in `train_df`.
```
# split the InChI string into the chemical formula part and the other layers part
train_df['InChI_text'] = (
train_df['InChI_chemical_formula'].apply(split_inchi_formula)
+ ' '
+ train_df['InChI'].apply(lambda x: '/'.join(x.split('/')[2:])).apply(split_inchi_layers).values
+ ' '
+ train_df['InChI'].apply(lambda x: x[x.find('/h'):]).apply(split_inchi_layers).values
)
# adjust for cases where hydrogen was not found and NaN was returned
for idx in range(len(train_df['InChI_text'])):
if '/h' not in train_df.loc[idx, 'InChI']:
train_df.loc[idx, 'InChI_text'] = (
split_inchi_formula(train_df.loc[idx, 'InChI_chemical_formula'])
+
' '
+
split_inchi_layers('/'.join(train_df.loc[idx, 'InChI'].split('/')[2:])
)
)
# save the train_df in a separate csv
train_df.to_csv('../../../data/train_df.csv')
# create a tokenizer class
tokenizer = Tokenizer()
# create a vocabulary of all InChI tokens
tokenizer.fit_on_texts(train_df['InChI_text'].values)
# save the tokenizer
torch.save(tokenizer, '../../../data/tokenizer.pth')
# store all sequence lengths
lengths = []
# creates a progress bar around the iterable
tk = tqdm(train_df['InChI_text'].values, total=len(train_df))
# for each text, i.e. InChI string, in the iterable, do
for text in tk:
# convert text to sequence of characters
seq = tokenizer.text_to_sequence(text)
# update the caption length (reduced by 2 for <end> and <pad>) and append to the aggregated storage
length = len(seq) - 2
lengths.append(length)
# write down the lengths in the dataframe
train_df['InChI_length'] = lengths
# save as a pickle file
train_df.to_pickle('../../../data/train.pkl')
print('Saved the train dataframe as a pickle file.')
```
| true |
code
| 0.641956 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/skojaku/cidre/blob/second-edit/examples/example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# About this notebook
In this notebook, we apply CIDRE to a network with communities and demonstrate how to use CIDRE and visualize the detected groups.
## Preparation
### Install CIDRE package
First, we install `cidre` package with `pip`:
```
!pip install cidre
```
### Loading libraries
Next, we load some libraries
```
import sys
import numpy as np
from scipy import sparse
import pandas as pd
import cidre
import networkx as nx
```
# Example 1
We first present an example of a small artificial network, which can be loaded by
```
# Data path
edge_file = "https://raw.githubusercontent.com/skojaku/cidre/main/data/synthe/edge-table.csv"
node_file = "https://raw.githubusercontent.com/skojaku/cidre/main/data/synthe/node-table.csv"
# Load
node_table = pd.read_csv(node_file)
A, node_labels = cidre.utils.read_edge_list(edge_file)
# Visualization
nx.draw(nx.from_scipy_sparse_matrix(A), linewidths = 1, edge_color="#8d8d8d", edgecolors="b")
```
## About this network
We constructed this synthetic network by generating a network using a stochastic block model (SBM) composed of two blocks and then adding excessive citation edges among uniformly randomly selected pairs of nodes. Each block corresponds to a community, i.e., a group of nodes that are densely connected with each other within it but sparsely connected with those in the opposite group. Such communities overshadow anomalous groups in networks.
## Community detection with graph-tool
Let's pretend that we do not know that the network is composed of two communities plus additional edges. To run CIDRE, we first need to find the communities. We use [graph-tool package](https://graph-tool.skewed.de/) to do this, which can be installed by
```bash
conda install -c conda-forge graph-tool
```
or in `Colaboratory` platform:
```
%%capture
!echo "deb http://downloads.skewed.de/apt bionic main" >> /etc/apt/sources.list
!apt-key adv --keyserver keys.openpgp.org --recv-key 612DEFB798507F25
!apt-get update
!apt-get install python3-graph-tool python3-cairo python3-matplotlib
```
Now, let's detect communities by fitting the degree-corrected stochastic block model (dcSBM) to the network and consider each detected block as a community.
```
import graph_tool.all as gt
def detect_community(A, K = None, **params):
"""Detect communities using the graph-tool package
:param A: adjacency matrix
:type A: scipy.csr_sparse_matrix
:param K: Maximum number of communities. If K = None, the number of communities is automatically determined by graph-tool.
:type K: int or None
:param params: parameters passed to graph_tool.gt.minimize_blockmodel_dl
"""
def to_graph_tool_format(adj, membership=None):
g = gt.Graph(directed=True)
r, c, v = sparse.find(adj)
nedges = v.size
edge_weights = g.new_edge_property("double")
g.edge_properties["weight"] = edge_weights
g.add_edge_list(
np.hstack([np.transpose((r, c)), np.reshape(v, (nedges, 1))]),
eprops=[edge_weights],
)
return g
G = to_graph_tool_format(A)
states = gt.minimize_blockmodel_dl(
G,
state_args=dict(eweight=G.ep.weight),
multilevel_mcmc_args = {"B_max": A.shape[0] if K is None else K },
**params
)
b = states.get_blocks()
return np.unique(np.array(b.a), return_inverse = True)[1]
group_membership = detect_community(A)
```
## Detecting anomalous groups in the network
Now, we feed the network and its community structure to CIDRE. To to this, we create a `cidre.Cidre` object and input `group_membership` along with some key parameters to `cidre.Cidre`.
```
alg = cidre.Cidre(group_membership = group_membership, alpha = 0.05, min_edge_weight = 1)
```
- `alpha` (default 0.01) is the statistical significance level.
- `min_edge_weight` is the threshold of the edge weight, i.e., the edges with weight less than this value will be removed.
Then, we input the network to `cidre.Cidre.detect`.
```
groups = alg.detect(A, threshold=0.15)
```
`groups` is a list of `Group` instances. A `Group` instance represents a group of nodes detected by CIDRE and contains information about the type of each member node (i.e., donor and recipient). We can get the donor nodes of a group, for example `groups[0]`, by
```
groups[0].donors
```
The keys and values of this dict object are the IDs of the nodes and their donor scores, respectively. The recipients and their recipient scores can be obtained by
```
groups[0].recipients
```
## Visualization
`cidre` package provides an API to visualize small groups. To use this API, we first need to import some additional libraries.
```
import seaborn as sns
import matplotlib.pyplot as plt
```
Then, plot the group by
```
# The following three lines are purely for visual enhancement, i.e., changing the saturation of the colors and font size.
sns.set_style("white")
sns.set(font_scale=1.2)
sns.set_style("ticks")
# Set the figure size
width, height = 5,5
fig, ax = plt.subplots(figsize=(width, height))
# Plot a citation group
cidre.DrawGroup().draw(groups[0], ax = ax)
```
# Example 2
Let's apply CIDRE to a much larger empirical citation network, i.e., the citation network of journals in 2013.
```
# Data path
edge_file = "https://raw.githubusercontent.com/skojaku/cidre/main/data/journal-citation/edge-table-2013.csv"
node_file = "https://raw.githubusercontent.com/skojaku/cidre/main/data/journal-citation/community-label.csv"
# Load
node_table = pd.read_csv(node_file)
A, node_labels = cidre.utils.read_edge_list(edge_file)
```
## About this network
This network is a citation network of journals in 2013 constructed from Microsoft Academic Graph.
Each edge is weighted by the number of citations made to the papers in the prior two years.
The following are basic statistics of this network.
```
print("Number of nodes: %d" % A.shape[0])
print("Number of edges: %d" % A.sum())
print("Average degree: %.2f" % (A.sum()/A.shape[0]))
print("Max in-degree: %d" % np.max(A.sum(axis = 0)))
print("Max out-degree: %d" % np.max(A.sum(axis = 1)))
print("Maximum edge weight: %d" % A.max())
print("Minimum edge weight: %d" % np.min(A.data))
```
## Communities
[In our paper](https://www.nature.com/articles/s41598-021-93572-3), we identified the communities of journals using [graph-tool](https://graph-tool.skewed.de/). `node_table` contains the community membership of each journal, from which we prepare `group_membership` array as follows.
```
# Get the group membership
node2com = dict(zip(node_table["journal_id"], node_table["community_id"]))
group_membership = [node2com[node_labels[i]] for i in range(A.shape[0])]
```
## Detecting anomalous groups in the network
As is demonstrated in the first example, we detect the anomalous groups in the network by
```
alg = cidre.Cidre(group_membership = group_membership, alpha = 0.01, min_edge_weight = 10)
groups = alg.detect(A, threshold=0.15)
print("The number of journals in the largest group: %d" % np.max([group.size() for group in groups]))
print("Number of groups detected: %d" % len(groups))
```
[In our paper](https://www.nature.com/articles/s41598-021-93572-3), we omitted the groups that have within-group citations less than 50 because we expect that anomalous citation groups contain sufficiently many within-group citations, i.e.,
```
groups = [group for group in groups if group.get_num_edges()>=50]
```
where `group.get.num_edges()` gives the sum of the weights of the non-self-loop edges within the group.
## Visualization
Let us visualize the groups detected by CIDRE. For expository purposes, we sample three groups to visualize uniformly at random.
```
groups_sampled = [groups[i] for i in np.random.choice(len(groups), 3, replace = False)]
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style("white")
sns.set(font_scale=1.2)
sns.set_style("ticks")
fig, axes = plt.subplots(ncols = 3, figsize=(6 * 3, 5))
for i in range(3):
cidre.DrawGroup().draw(groups_sampled[i], ax = axes.flat[i])
```
The numbers beside the nodes are the IDs of the journals in the network. To show the journals' names, we do the following.
First, we load node lables and make a dictionary that maps the ID of each node to the label:
```
df = pd.read_csv("https://raw.githubusercontent.com/skojaku/cidre/main/data/journal-citation/journal_names.csv")
journalid2label = dict(zip(df.journal_id.values, df.name.values)) # Dictionary from MAG journal ID to the journal name
id2label = {k:journalid2label[v] for k, v in node_labels.items()} # This is a dictionary from ID to label, i.e., {ID:journal_name}
```
Then, give `id2label` to `cidre.DrawGroup.draw`, i.e.,
```
sns.set_style("white")
sns.set(font_scale=1.2)
sns.set_style("ticks")
fig, axes = plt.subplots(ncols = 3, figsize=(9 * 3, 5))
for i in range(3):
plotter = cidre.DrawGroup()
plotter.font_size = 12 # Font size
plotter.label_node_margin = 0.7 # Margin between labels and node
plotter.draw(groups_sampled[i], node_labels = id2label, ax = axes.flat[i])
```
| true |
code
| 0.635194 | null | null | null | null |
|
# Fine-Tuning a BERT Model and Create a Text Classifier
In the previous section, we've already performed the Feature Engineering to create BERT embeddings from the `reviews_body` text using the pre-trained BERT model, and split the dataset into train, validation and test files. To optimize for Tensorflow training, we saved the files in TFRecord format.
Now, let’s fine-tune the BERT model to our Customer Reviews Dataset and add a new classification layer to predict the `star_rating` for a given `review_body`.

As mentioned earlier, BERT’s attention mechanism is called a Transformer. This is, not coincidentally, the name of the popular BERT Python library, “Transformers,” maintained by a company called HuggingFace.
We will use a variant of BERT called [**DistilBert**](https://arxiv.org/pdf/1910.01108.pdf) which requires less memory and compute, but maintains very good accuracy on our dataset.
```
import time
import random
import pandas as pd
from glob import glob
import argparse
import json
import subprocess
import sys
import os
import tensorflow as tf
from transformers import DistilBertTokenizer
from transformers import TFDistilBertForSequenceClassification
from transformers import DistilBertConfig
%store -r max_seq_length
try:
max_seq_length
except NameError:
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("[ERROR] Please run the notebooks in the PREPARE section before you continue.")
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print(max_seq_length)
def select_data_and_label_from_record(record):
x = {
"input_ids": record["input_ids"],
"input_mask": record["input_mask"],
# 'segment_ids': record['segment_ids']
}
y = record["label_ids"]
return (x, y)
def file_based_input_dataset_builder(channel, input_filenames, pipe_mode, is_training, drop_remainder):
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if pipe_mode:
print("***** Using pipe_mode with channel {}".format(channel))
from sagemaker_tensorflow import PipeModeDataset
dataset = PipeModeDataset(channel=channel, record_format="TFRecord")
else:
print("***** Using input_filenames {}".format(input_filenames))
dataset = tf.data.TFRecordDataset(input_filenames)
dataset = dataset.repeat(100)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
name_to_features = {
"input_ids": tf.io.FixedLenFeature([max_seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([max_seq_length], tf.int64),
# "segment_ids": tf.io.FixedLenFeature([max_seq_length], tf.int64),
"label_ids": tf.io.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
return tf.io.parse_single_example(record, name_to_features)
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=8,
drop_remainder=drop_remainder,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
)
dataset.cache()
if is_training:
dataset = dataset.shuffle(seed=42, buffer_size=10, reshuffle_each_iteration=True)
return dataset
train_data = "./data-tfrecord/bert-train"
train_data_filenames = glob("{}/*.tfrecord".format(train_data))
print("train_data_filenames {}".format(train_data_filenames))
train_dataset = file_based_input_dataset_builder(
channel="train", input_filenames=train_data_filenames, pipe_mode=False, is_training=True, drop_remainder=False
).map(select_data_and_label_from_record)
validation_data = "./data-tfrecord/bert-validation"
validation_data_filenames = glob("{}/*.tfrecord".format(validation_data))
print("validation_data_filenames {}".format(validation_data_filenames))
validation_dataset = file_based_input_dataset_builder(
channel="validation",
input_filenames=validation_data_filenames,
pipe_mode=False,
is_training=False,
drop_remainder=False,
).map(select_data_and_label_from_record)
test_data = "./data-tfrecord/bert-test"
test_data_filenames = glob("{}/*.tfrecord".format(test_data))
print(test_data_filenames)
test_dataset = file_based_input_dataset_builder(
channel="test", input_filenames=test_data_filenames, pipe_mode=False, is_training=False, drop_remainder=False
).map(select_data_and_label_from_record)
```
# Specify Manual Hyper-Parameters
```
epochs = 1
steps_per_epoch = 10
validation_steps = 10
test_steps = 10
freeze_bert_layer = True
learning_rate = 3e-5
epsilon = 1e-08
```
# Load Pretrained BERT Model
https://huggingface.co/transformers/pretrained_models.html
```
CLASSES = [1, 2, 3, 4, 5]
config = DistilBertConfig.from_pretrained(
"distilbert-base-uncased",
num_labels=len(CLASSES),
id2label={0: 1, 1: 2, 2: 3, 3: 4, 4: 5},
label2id={1: 0, 2: 1, 3: 2, 4: 3, 5: 4},
)
print(config)
transformer_model = TFDistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased", config=config)
input_ids = tf.keras.layers.Input(shape=(max_seq_length,), name="input_ids", dtype="int32")
input_mask = tf.keras.layers.Input(shape=(max_seq_length,), name="input_mask", dtype="int32")
embedding_layer = transformer_model.distilbert(input_ids, attention_mask=input_mask)[0]
X = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(50, return_sequences=True, dropout=0.1, recurrent_dropout=0.1))(
embedding_layer
)
X = tf.keras.layers.GlobalMaxPool1D()(X)
X = tf.keras.layers.Dense(50, activation="relu")(X)
X = tf.keras.layers.Dropout(0.2)(X)
X = tf.keras.layers.Dense(len(CLASSES), activation="softmax")(X)
model = tf.keras.Model(inputs=[input_ids, input_mask], outputs=X)
for layer in model.layers[:3]:
layer.trainable = not freeze_bert_layer
```
# Setup the Custom Classifier Model Here
```
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate, epsilon=epsilon)
model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
model.summary()
callbacks = []
log_dir = "./tmp/tensorboard/"
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
callbacks.append(tensorboard_callback)
history = model.fit(
train_dataset,
shuffle=True,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_data=validation_dataset,
validation_steps=validation_steps,
callbacks=callbacks,
)
print("Trained model {}".format(model))
```
# Evaluate on Holdout Test Dataset
```
test_history = model.evaluate(test_dataset, steps=test_steps, callbacks=callbacks)
print(test_history)
```
# Save the Model
```
tensorflow_model_dir = "./tmp/tensorflow/"
!mkdir -p $tensorflow_model_dir
model.save(tensorflow_model_dir, include_optimizer=False, overwrite=True)
!ls -al $tensorflow_model_dir
!saved_model_cli show --all --dir $tensorflow_model_dir
# !saved_model_cli run --dir $tensorflow_model_dir --tag_set serve --signature_def serving_default \
# --input_exprs 'input_ids=np.zeros((1,64));input_mask=np.zeros((1,64))'
```
# Predict with Model
```
import pandas as pd
import numpy as np
from transformers import DistilBertTokenizer
tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
sample_review_body = "This product is terrible."
encode_plus_tokens = tokenizer.encode_plus(
sample_review_body, padding='max_length', max_length=max_seq_length, truncation=True, return_tensors="tf"
)
# The id from the pre-trained BERT vocabulary that represents the token. (Padding of 0 will be used if the # of tokens is less than `max_seq_length`)
input_ids = encode_plus_tokens["input_ids"]
# Specifies which tokens BERT should pay attention to (0 or 1). Padded `input_ids` will have 0 in each of these vector elements.
input_mask = encode_plus_tokens["attention_mask"]
outputs = model.predict(x=(input_ids, input_mask))
prediction = [{"label": config.id2label[item.argmax()], "score": item.max().item()} for item in outputs]
print("")
print('Predicted star_rating "{}" for review_body "{}"'.format(prediction[0]["label"], sample_review_body))
```
# Release Resources
```
%%html
<p><b>Shutting down your kernel for this notebook to release resources.</b></p>
<button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button>
<script>
try {
els = document.getElementsByClassName("sm-command-button");
els[0].click();
}
catch(err) {
// NoOp
}
</script>
%%javascript
try {
Jupyter.notebook.save_checkpoint();
Jupyter.notebook.session.delete();
}
catch(err) {
// NoOp
}
```
| true |
code
| 0.585279 | null | null | null | null |
|
# 2-22: Intro to scikit-learn
<img src="https://www.cityofberkeley.info/uploadedImages/Public_Works/Level_3_-_Transportation/DSC_0637.JPG" style="width: 500px; height: 275px;" />
---
** Regression** is useful for predicting a value that varies on a continuous scale from a bunch of features. This lab will introduce the regression methods available in the scikit-learn extension to scipy, focusing on ordinary least squares linear regression, LASSO, and Ridge regression.
*Estimated Time: 45 minutes*
---
### Table of Contents
1 - [The Test-Train-Validation Split](#section 1)<br>
2 - [Linear Regression](#section 2)<br>
3 - [LASSO Regression](#section 3)<br>
4 - [Ridge Regression](#section 4)<br>
5 - [Choosing a Model](#section 5)<br>
**Dependencies:**
```
import numpy as np
from datascience import *
import datetime as dt
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Ridge, Lasso, LinearRegression
from sklearn.model_selection import KFold
```
## The Data: Bike Sharing
In your time at Cal, you've probably passed by one of the many bike sharing station around campus. Bike sharing systems have become more and more popular as traffic and concerns about global warming rise. This lab's data describes one such bike sharing system in Washington D.C., from [UC Irvine's Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset).
```
bike=Table().read_table(('data/Bike-Sharing-Dataset/day.csv'))
# reformat the date column to integers representing the day of the year, 001-366
bike['dteday'] = pd.to_datetime(bike['dteday']).strftime('%j')
# get rid of the index column
bike = bike.drop(0)
bike.show(4)
```
Take a moment to get familiar with the data set. In data science, you'll often hear rows referred to as **records** and columns as **features**. Before you continue, make sure you can answer the following:
- How many records are in this data set?
- What does each record represent?
- What are the different features?
- How is each feature represented? What values does it take, and what are the data types of each value?
Use Table methods and check the UC Irvine link for more information.
```
# explore the data set here
```
---
## 1. The Test-Train-Validation Split <a id='section 1'></a>
When we train a model on a data set, we run the risk of [**over-fitting**](http://scikit-learn.org/stable/auto_examples/model_selection/plot_underfitting_overfitting.html). Over-fitting happens when a model becomes so complex that it makes very accurate predictions for the data it was trained on, but it can't generalize to make good predictions on new data.
We can reduce the risk of overfitting by using a **test-train split**.
1. Randomly divide our data set into two smaller sets: one for training and one for testing
2. Train the data on the training set, changing our model along the way to increase accuracy
3. Test the data's predictions using the test set.
Scikit-learn's [`test_train_split`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) function will help here. First, separate your data into two parts: a Table containing the features used to make our prediction, and an array of the true values. To start, let's predict the *total number of riders* (y) using *every feature that isn't a rider count* (X).
Note: for the function to work, X can't be a Table. Save X as a pandas DataFrame by calling `.to_df()` on the feature Table.
```
# the features used to predict riders
X = bike.drop('casual', 'registered', 'cnt')
X = X.to_df()
# the number of riders
y = bike['cnt']
```
Next, set the random seed using `np.random.seed(...)`. This will affect the way numpy pseudo-randomly generates the numbers it uses to decide how to split the data into training and test sets. Any seed number is fine- the important thing is to document the number you used in case we need to recreate this pseudorandom split in the future.
Then, call `train_test_split` on your X and y. Also set the parameters `train_size=` and `test_size=` to set aside 80% of the data for training and 20% for testing.
```
# set the random seed
np.random.seed(10)
# split the data
# train_test_split returns 4 values: X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=0.80, test_size=0.20)
```
### The Validation Set
Our test data should only be used once: after our model has been selected, trained, and tweaked. Unfortunately, it's possible that in the process of tweaking our model, we could still overfit it to the training data and only find out when we return a poor test data score. What then?
A **validation set** can help here. By trying your trained models on a validation set, you can (hopefully) weed out models that don't generalize well.
Call `train_test_split` again, this time on your X_train and y_train. We want to set aside 25% of the data to go to our validation set, and keep the remaining 75% for our training set.
Note: This means that out of the original data, 20% is for testing, 20% is for validation, and 60% is for training.
```
# split the data
# Returns 4 values: X_train, X_validate, y_train, y_validate
X_train, X_validate, y_train, y_validate = train_test_split(X_train, y_train,
train_size=0.75, test_size=0.25)
```
## 2. Linear Regression (Ordinary Least Squares) <a id='section 2'></a>
Now, we're ready to start training models and making predictions. We'll start with a **linear regression** model.
[Scikit-learn's linear regression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression.score) is built around scipy's ordinary least squares, which you used in the last lab. The syntax for each scikit-learn model is very similar:
1. Create a model by calling its constructor function. For example, `LinearRegression()` makes a linear regression model.
2. Train the model on your training data by calling `.fit(train_X, train_y)` on the model
Create a linear regression model in the cell below.
```
# create a model
lin_reg = LinearRegression(normalize=True)
# fit the model
lin_model = lin_reg.fit(X_train, y_train)
```
With the model fit, you can look at the best-fit slope for each feature using `.coef_`, and you can get the intercept of the regression line with `.intercept_`.
```
print(lin_model.coef_)
print(lin_model.intercept_)
```
Now, let's get a sense of how good our model is. We can do this by looking at the difference between the predicted values and the actual values, also called the error.
We can see this graphically using a scatter plot.
- Call `.predict(X)` on your linear regression model, using your training X and training y, to return a list of predicted number of riders per hour. Save it to a variable `lin_pred`.
- Using a scatter plot (`plt.scatter(...)`), plot the predicted values against the actual values (`y_train`)
```
# predict the number of riders
lin_pred = lin_model.predict(X_train)
# plot the residuals on a scatter plot
plt.scatter(y_train, lin_pred)
plt.title('Linear Model (OLS)')
plt.xlabel('actual value')
plt.ylabel('predicted value')
plt.show()
```
Question: what should our scatter plot look like if our model was 100% accurate?
**ANSWER:** All points (i.e. errors) would fall on a line with a slope of one: the predicted value would always equal the actual value.
We can also get a sense of how well our model is doing by calculating the **root mean squared error**. The root mean squared error (RMSE) represents the average difference between the predicted and the actual values.
To get the RMSE:
- subtract each predicted value from its corresponding actual value (the errors)
- square each error (this prevents negative errors from cancelling positive errors)
- average the squared errors
- take the square root of the average (this gets the error back in the original units)
Write a function `rmse` that calculates the mean squared error of a predicted set of values.
```
def rmse(pred, actual):
return np.sqrt(np.mean((pred - actual) ** 2))
```
Now calculate the mean squared error for your linear model.
```
rmse(lin_pred, y_train)
```
## 3. Ridge Regression <a id='section 3'></a>
Now that you've gone through the process for OLS linear regression, it's easy to do the same for [**Ridge Regression**](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html). In this case, the constructor function that makes the model is `Ridge()`.
```
# make and fit a Ridge regression model
ridge_reg = Ridge()
ridge_model = ridge_reg.fit(X_train, y_train)
# use the model to make predictions
ridge_pred = ridge_model.predict(X_train)
# plot the predictions
plt.scatter(y_train, ridge_pred)
plt.title('Ridge Model')
plt.xlabel('actual values')
plt.ylabel('predicted values')
plt.show()
# calculate the rmse for the Ridge model
rmse(ridge_pred, y_train)
```
Note: the documentation for Ridge regression shows it has lots of **hyperparameters**: values we can choose when the model is made. Now that we've tried it using the defaults, look at the [documentation](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html) and try changing some parameters to see if you can get a lower RMSE (`alpha` might be a good one to try).
## 4. LASSO Regression <a id='section 4'></a>
Finally, we'll try using [LASSO regression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lasso.html). The constructor function to make the model is `Lasso()`.
You may get a warning message saying the objective did not converge. The model will still work, but to get convergence try increasing the number of iterations (`max_iter=`) when you construct the model.
```
# create and fit the model
lasso_reg = Lasso(max_iter=10000)
lasso_model = lasso_reg.fit(X_train, y_train)
# use the model to make predictions
lasso_pred = lasso_model.predict(X_train)
# plot the predictions
plt.scatter(y_train, lasso_pred)
plt.title('LASSO Model')
plt.xlabel('actual values')
plt.ylabel('predicted values')
plt.show()
# calculate the rmse for the LASSO model
rmse(lasso_pred, y_train)
```
Note: LASSO regression also has many tweakable hyperparameters. See how changing them affects the accuracy!
Question: How do these three models compare on performance? What sorts of things could we do to improve performance?
**ANSWER:** All three models have very similar accuracy, around 900 RMSE for each.
We could try changing which features we use or adjust the hyperparameters.
---
## 5. Choosing a model <a id='section 5'></a>
### Validation
Once you've tweaked your models' hyperparameters to get the best possible accuracy on your training sets, we can compare your models on your validation set. Make predictions on `X_validate` with each one of your models, then calculate the RMSE for each set of predictions.
```
# make predictions for each model
lin_vpred = lin_model.predict(X_validate)
ridge_vpred = ridge_model.predict(X_validate)
lasso_vpred = lasso_model.predict(X_validate)
# calculate RMSE for each set of validation predictions
print("linear model rmse: ", rmse(lin_vpred, y_validate))
print("Ridge rmse: ", rmse(ridge_vpred, y_validate))
print("LASSO rmse: ", rmse(lasso_vpred, y_validate))
```
How do the RMSEs for the validation data compare to those for the training data? Why?
Did the model that performed best on the training set also do best on the validation set?
**YOUR ANSWER:** The RMSE for the validation set tends to be larger than for the training set, simply because the models were fit to the training data.
### Predicting the Test Set
Finally, select one final model to make predictions for your test set. This is often the model that performed best on the validation data.
```
# make predictions for the test set using one model of your choice
final_pred = lin_model.predict(X_test)
# calculate the rmse for the final predictions
print('Test set rmse: ', rmse(final_pred, y_test))
```
Coming up this semester: how to select your models, model parameters, and features to get the best performance.
---
Notebook developed by: Keeley Takimoto
Data Science Modules: http://data.berkeley.edu/education/modules
| true |
code
| 0.610163 | null | null | null | null |
|
**Chapter 19 – Training and Deploying TensorFlow Models at Scale**
_This notebook contains all the sample code in chapter 19._
<table align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/ageron/handson-ml2/blob/master/19_training_and_deploying_at_scale.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
</table>
# Setup
First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.
```
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
!echo "deb http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal" > /etc/apt/sources.list.d/tensorflow-serving.list
!curl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | apt-key add -
!apt update && apt-get install -y tensorflow-model-server
!pip install -q -U tensorflow-serving-api
IS_COLAB = True
except Exception:
IS_COLAB = False
# TensorFlow ≥2.0 is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
if not tf.config.list_physical_devices('GPU'):
print("No GPU was detected. CNNs can be very slow without a GPU.")
if IS_COLAB:
print("Go to Runtime > Change runtime and select a GPU hardware accelerator.")
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
tf.random.set_seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "deploy"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
```
# Deploying TensorFlow models to TensorFlow Serving (TFS)
We will use the REST API or the gRPC API.
## Save/Load a `SavedModel`
```
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
X_train_full = X_train_full[..., np.newaxis].astype(np.float32) / 255.
X_test = X_test[..., np.newaxis].astype(np.float32) / 255.
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_new = X_test[:3]
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28, 1]),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
np.round(model.predict(X_new), 2)
model_version = "0001"
model_name = "my_mnist_model"
model_path = os.path.join(model_name, model_version)
model_path
!rm -rf {model_name}
tf.saved_model.save(model, model_path)
for root, dirs, files in os.walk(model_name):
indent = ' ' * root.count(os.sep)
print('{}{}/'.format(indent, os.path.basename(root)))
for filename in files:
print('{}{}'.format(indent + ' ', filename))
!saved_model_cli show --dir {model_path}
!saved_model_cli show --dir {model_path} --tag_set serve
!saved_model_cli show --dir {model_path} --tag_set serve \
--signature_def serving_default
!saved_model_cli show --dir {model_path} --all
```
Let's write the new instances to a `npy` file so we can pass them easily to our model:
```
np.save("my_mnist_tests.npy", X_new)
input_name = model.input_names[0]
input_name
```
And now let's use `saved_model_cli` to make predictions for the instances we just saved:
```
!saved_model_cli run --dir {model_path} --tag_set serve \
--signature_def serving_default \
--inputs {input_name}=my_mnist_tests.npy
np.round([[1.1739199e-04, 1.1239604e-07, 6.0210604e-04, 2.0804715e-03, 2.5779348e-06,
6.4079795e-05, 2.7411186e-08, 9.9669880e-01, 3.9654213e-05, 3.9471846e-04],
[1.2294615e-03, 2.9207937e-05, 9.8599273e-01, 9.6755642e-03, 8.8930705e-08,
2.9156188e-04, 1.5831805e-03, 1.1311053e-09, 1.1980456e-03, 1.1113169e-07],
[6.4066830e-05, 9.6359509e-01, 9.0598064e-03, 2.9872139e-03, 5.9552520e-04,
3.7478798e-03, 2.5074568e-03, 1.1462728e-02, 5.5553433e-03, 4.2495009e-04]], 2)
```
## TensorFlow Serving
Install [Docker](https://docs.docker.com/install/) if you don't have it already. Then run:
```bash
docker pull tensorflow/serving
export ML_PATH=$HOME/ml # or wherever this project is
docker run -it --rm -p 8500:8500 -p 8501:8501 \
-v "$ML_PATH/my_mnist_model:/models/my_mnist_model" \
-e MODEL_NAME=my_mnist_model \
tensorflow/serving
```
Once you are finished using it, press Ctrl-C to shut down the server.
Alternatively, if `tensorflow_model_server` is installed (e.g., if you are running this notebook in Colab), then the following 3 cells will start the server:
```
os.environ["MODEL_DIR"] = os.path.split(os.path.abspath(model_path))[0]
%%bash --bg
nohup tensorflow_model_server \
--rest_api_port=8501 \
--model_name=my_mnist_model \
--model_base_path="${MODEL_DIR}" >server.log 2>&1
!tail server.log
import json
input_data_json = json.dumps({
"signature_name": "serving_default",
"instances": X_new.tolist(),
})
repr(input_data_json)[:1500] + "..."
```
Now let's use TensorFlow Serving's REST API to make predictions:
```
import requests
SERVER_URL = 'http://localhost:8501/v1/models/my_mnist_model:predict'
response = requests.post(SERVER_URL, data=input_data_json)
response.raise_for_status() # raise an exception in case of error
response = response.json()
response.keys()
y_proba = np.array(response["predictions"])
y_proba.round(2)
```
### Using the gRPC API
```
from tensorflow_serving.apis.predict_pb2 import PredictRequest
request = PredictRequest()
request.model_spec.name = model_name
request.model_spec.signature_name = "serving_default"
input_name = model.input_names[0]
request.inputs[input_name].CopyFrom(tf.make_tensor_proto(X_new))
import grpc
from tensorflow_serving.apis import prediction_service_pb2_grpc
channel = grpc.insecure_channel('localhost:8500')
predict_service = prediction_service_pb2_grpc.PredictionServiceStub(channel)
response = predict_service.Predict(request, timeout=10.0)
response
```
Convert the response to a tensor:
```
output_name = model.output_names[0]
outputs_proto = response.outputs[output_name]
y_proba = tf.make_ndarray(outputs_proto)
y_proba.round(2)
```
Or to a NumPy array if your client does not include the TensorFlow library:
```
output_name = model.output_names[0]
outputs_proto = response.outputs[output_name]
shape = [dim.size for dim in outputs_proto.tensor_shape.dim]
y_proba = np.array(outputs_proto.float_val).reshape(shape)
y_proba.round(2)
```
## Deploying a new model version
```
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28, 1]),
keras.layers.Dense(50, activation="relu"),
keras.layers.Dense(50, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
model_version = "0002"
model_name = "my_mnist_model"
model_path = os.path.join(model_name, model_version)
model_path
tf.saved_model.save(model, model_path)
for root, dirs, files in os.walk(model_name):
indent = ' ' * root.count(os.sep)
print('{}{}/'.format(indent, os.path.basename(root)))
for filename in files:
print('{}{}'.format(indent + ' ', filename))
```
**Warning**: You may need to wait a minute before the new model is loaded by TensorFlow Serving.
```
import requests
SERVER_URL = 'http://localhost:8501/v1/models/my_mnist_model:predict'
response = requests.post(SERVER_URL, data=input_data_json)
response.raise_for_status()
response = response.json()
response.keys()
y_proba = np.array(response["predictions"])
y_proba.round(2)
```
# Deploy the model to Google Cloud AI Platform
Follow the instructions in the book to deploy the model to Google Cloud AI Platform, download the service account's private key and save it to the `my_service_account_private_key.json` in the project directory. Also, update the `project_id`:
```
project_id = "onyx-smoke-242003"
import googleapiclient.discovery
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "my_service_account_private_key.json"
model_id = "my_mnist_model"
model_path = "projects/{}/models/{}".format(project_id, model_id)
model_path += "/versions/v0001/" # if you want to run a specific version
ml_resource = googleapiclient.discovery.build("ml", "v1").projects()
def predict(X):
input_data_json = {"signature_name": "serving_default",
"instances": X.tolist()}
request = ml_resource.predict(name=model_path, body=input_data_json)
response = request.execute()
if "error" in response:
raise RuntimeError(response["error"])
return np.array([pred[output_name] for pred in response["predictions"]])
Y_probas = predict(X_new)
np.round(Y_probas, 2)
```
# Using GPUs
```
tf.test.is_gpu_available()
tf.test.gpu_device_name()
tf.test.is_built_with_cuda()
from tensorflow.python.client.device_lib import list_local_devices
devices = list_local_devices()
devices
```
# Distributed Training
```
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
def create_model():
return keras.models.Sequential([
keras.layers.Conv2D(filters=64, kernel_size=7, activation="relu",
padding="same", input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(units=64, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=10, activation='softmax'),
])
batch_size = 100
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid), batch_size=batch_size)
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
distribution = tf.distribute.MirroredStrategy()
# Change the default all-reduce algorithm:
#distribution = tf.distribute.MirroredStrategy(
# cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
# Specify the list of GPUs to use:
#distribution = tf.distribute.MirroredStrategy(devices=["/gpu:0", "/gpu:1"])
# Use the central storage strategy instead:
#distribution = tf.distribute.experimental.CentralStorageStrategy()
#resolver = tf.distribute.cluster_resolver.TPUClusterResolver()
#tf.tpu.experimental.initialize_tpu_system(resolver)
#distribution = tf.distribute.experimental.TPUStrategy(resolver)
with distribution.scope():
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
batch_size = 100 # must be divisible by the number of workers
model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid), batch_size=batch_size)
model.predict(X_new)
```
Custom training loop:
```
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
K = keras.backend
distribution = tf.distribute.MirroredStrategy()
with distribution.scope():
model = create_model()
optimizer = keras.optimizers.SGD()
with distribution.scope():
dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)).repeat().batch(batch_size)
input_iterator = distribution.make_dataset_iterator(dataset)
@tf.function
def train_step():
def step_fn(inputs):
X, y = inputs
with tf.GradientTape() as tape:
Y_proba = model(X)
loss = K.sum(keras.losses.sparse_categorical_crossentropy(y, Y_proba)) / batch_size
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
per_replica_losses = distribution.experimental_run(step_fn, input_iterator)
mean_loss = distribution.reduce(tf.distribute.ReduceOp.SUM,
per_replica_losses, axis=None)
return mean_loss
n_epochs = 10
with distribution.scope():
input_iterator.initialize()
for epoch in range(n_epochs):
print("Epoch {}/{}".format(epoch + 1, n_epochs))
for iteration in range(len(X_train) // batch_size):
print("\rLoss: {:.3f}".format(train_step().numpy()), end="")
print()
batch_size = 100 # must be divisible by the number of workers
model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid), batch_size=batch_size)
```
## Training across multiple servers
A TensorFlow cluster is a group of TensorFlow processes running in parallel, usually on different machines, and talking to each other to complete some work, for example training or executing a neural network. Each TF process in the cluster is called a "task" (or a "TF server"). It has an IP address, a port, and a type (also called its role or its job). The type can be `"worker"`, `"chief"`, `"ps"` (parameter server) or `"evaluator"`:
* Each **worker** performs computations, usually on a machine with one or more GPUs.
* The **chief** performs computations as well, but it also handles extra work such as writing TensorBoard logs or saving checkpoints. There is a single chief in a cluster. If no chief is specified, then the first worker is the chief.
* A **parameter server** (ps) only keeps track of variable values, it is usually on a CPU-only machine.
* The **evaluator** obviously takes care of evaluation. There is usually a single evaluator in a cluster.
The set of tasks that share the same type is often called a "job". For example, the "worker" job is the set of all workers.
To start a TensorFlow cluster, you must first specify it. This means defining all the tasks (IP address, TCP port, and type). For example, the following cluster specification defines a cluster with 3 tasks (2 workers and 1 parameter server). It's a dictionary with one key per job, and the values are lists of task addresses:
```
{
"worker": ["my-worker0.example.com:9876", "my-worker1.example.com:9876"],
"ps": ["my-ps0.example.com:9876"]
}
```
Every task in the cluster may communicate with every other task in the server, so make sure to configure your firewall to authorize all communications between these machines on these ports (it's usually simpler if you use the same port on every machine).
When a task is started, it needs to be told which one it is: its type and index (the task index is also called the task id). A common way to specify everything at once (both the cluster spec and the current task's type and id) is to set the `TF_CONFIG` environment variable before starting the program. It must be a JSON-encoded dictionary containing a cluster specification (under the `"cluster"` key), and the type and index of the task to start (under the `"task"` key). For example, the following `TF_CONFIG` environment variable defines a simple cluster with 2 workers and 1 parameter server, and specifies that the task to start is the first worker:
```
import os
import json
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
"worker": ["my-work0.example.com:9876", "my-work1.example.com:9876"],
"ps": ["my-ps0.example.com:9876"]
},
"task": {"type": "worker", "index": 0}
})
print("TF_CONFIG='{}'".format(os.environ["TF_CONFIG"]))
```
Some platforms (e.g., Google Cloud ML Engine) automatically set this environment variable for you.
Then you would write a short Python script to start a task. The same script can be used on every machine, since it will load the `TF_CONFIG` variable, which will tell it which task to start:
```
import tensorflow as tf
resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()
worker0 = tf.distribute.Server(resolver.cluster_spec(),
job_name=resolver.task_type,
task_index=resolver.task_id)
```
Another way to specify the cluster specification is directly in Python, rather than through an environment variable:
```
cluster_spec = tf.train.ClusterSpec({
"worker": ["127.0.0.1:9901", "127.0.0.1:9902"],
"ps": ["127.0.0.1:9903"]
})
```
You can then start a server simply by passing it the cluster spec and indicating its type and index. Let's start the two remaining tasks (remember that in general you would only start a single task per machine; we are starting 3 tasks on the localhost just for the purpose of this code example):
```
#worker1 = tf.distribute.Server(cluster_spec, job_name="worker", task_index=1)
ps0 = tf.distribute.Server(cluster_spec, job_name="ps", task_index=0)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
"worker": ["127.0.0.1:9901", "127.0.0.1:9902"],
"ps": ["127.0.0.1:9903"]
},
"task": {"type": "worker", "index": 1}
})
print(repr(os.environ["TF_CONFIG"]))
distribution = tf.distribute.experimental.MultiWorkerMirroredStrategy()
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
"worker": ["127.0.0.1:9901", "127.0.0.1:9902"],
"ps": ["127.0.0.1:9903"]
},
"task": {"type": "worker", "index": 1}
})
#CUDA_VISIBLE_DEVICES=0
with distribution.scope():
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
import tensorflow as tf
from tensorflow import keras
import numpy as np
# At the beginning of the program (restart the kernel before running this cell)
distribution = tf.distribute.experimental.MultiWorkerMirroredStrategy()
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
X_train_full = X_train_full[..., np.newaxis] / 255.
X_test = X_test[..., np.newaxis] / 255.
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_new = X_test[:3]
n_workers = 2
batch_size = 32 * n_workers
dataset = tf.data.Dataset.from_tensor_slices((X_train[..., np.newaxis], y_train)).repeat().batch(batch_size)
def create_model():
return keras.models.Sequential([
keras.layers.Conv2D(filters=64, kernel_size=7, activation="relu",
padding="same", input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(units=64, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=10, activation='softmax'),
])
with distribution.scope():
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
model.fit(dataset, steps_per_epoch=len(X_train)//batch_size, epochs=10)
# Hyperparameter tuning
# Only talk to ps server
config_proto = tf.ConfigProto(device_filters=['/job:ps', '/job:worker/task:%d' % tf_config['task']['index']])
config = tf.estimator.RunConfig(session_config=config_proto)
# default since 1.10
strategy.num_replicas_in_sync
```
| true |
code
| 0.569494 | null | null | null | null |
|
# Longest Palindromic Subsequence
In this notebook, you'll be tasked with finding the length of the *Longest Palindromic Subsequence* (LPS) given a string of characters.
As an example:
* With an input string, `ABBDBCACB`
* The LPS is `BCACB`, which has `length = 5`
In this notebook, we'll focus on finding an optimal solution to the LPS task, using dynamic programming. There will be some similarities to the Longest Common Subsequence (LCS) task, which is outlined in detail in a previous notebook. It is recommended that you start with that notebook before trying out this task.
### Hint
**Storing pre-computed values**
The LPS algorithm depends on looking at one string and comparing letters to one another. Similar to how you compared two strings in the LCS (Longest Common Subsequence) task, you can compare the characters in just *one* string with one another, using a matrix to store the results of matching characters.
For a string on length n characters, you can create an `n x n` matrix to store the solution to subproblems. In this case, the subproblem is the length of the longest palindromic subsequence, up to a certain point in the string (up to the end of a certain substring).
It may be helpful to try filling up a matrix on paper before you start your code solution. If you get stuck with this task, you may look at some example matrices below (see the section titled **Example matrices**), before consulting the complete solution code.
```
# imports for printing a matrix, nicely
import pprint
pp = pprint.PrettyPrinter()
# complete LPS solution
def lps(input_string):
n = len(input_string)
# create a lookup table to store results of subproblems
L = [[0 for x in range(n)] for x in range(n)]
# strings of length 1 have LPS length = 1
for i in range(n):
L[i][i] = 1
# consider all substrings
for s_size in range(2, n+1):
for start_idx in range(n-s_size+1):
end_idx = start_idx + s_size - 1
if s_size == 2 and input_string[start_idx] == input_string[end_idx]:
# match with a substring of length 2
L[start_idx][end_idx] = 2
elif input_string[start_idx] == input_string[end_idx]:
# general match case
L[start_idx][end_idx] = L[start_idx+1][end_idx-1] + 2
else:
# no match case, taking the max of two values
L[start_idx][end_idx] = max(L[start_idx][end_idx-1], L[start_idx+1][end_idx]);
# debug line
# pp.pprint(L)
return L[0][n-1] # value in top right corner of matrix
def test_function(test_case):
string = test_case[0]
solution = test_case[1]
output = lps(string)
print(output)
if output == solution:
print("Pass")
else:
print("Fail")
string = "TACOCAT"
solution = 7
test_case = [string, solution]
test_function(test_case)
string = 'BANANA'
solution = 5
test_case = [string, solution]
test_function(test_case)
string = 'BANANO'
solution = 3
test_case = [string, solution]
test_function(test_case)
```
### Example matrices
Example LPS Subproblem matrix 1:
```
input_string = 'BANANO'
LPS subproblem matrix:
B A N A N O
B [[1, 1, 1, 3, 3, 3],
A [0, 1, 1, 3, 3, 3],
N [0, 0, 1, 1, 3, 3],
A [0, 0, 0, 1, 1, 1],
N [0, 0, 0, 0, 1, 1],
O [0, 0, 0, 0, 0, 1]]
LPS length: 3
```
Example LPS Subproblem matrix 2:
```
input_string = 'TACOCAT'
LPS subproblem matrix:
T A C O C A T
T [[1, 1, 1, 1, 3, 5, 7],
A [0, 1, 1, 1, 3, 5, 5],
C [0, 0, 1, 1, 3, 3, 3],
O [0, 0, 0, 1, 1, 1, 1],
C [0, 0, 0, 0, 1, 1, 1],
A [0, 0, 0, 0, 0, 1, 1],
T [0, 0, 0, 0, 0, 0, 1]]
LPS length: 7
```
Note: The lower diagonal values will remain 0 in all cases.
### The matrix rules
You can efficiently fill up this matrix one cell at a time. Each grid cell only depends on the values in the grid cells that are directly on bottom and to the left of it, or on the diagonal/bottom-left. The rules are as follows:
* Start with an `n x n ` matrix where n is the number of characters in a given string; the diagonal should all have the value 1 for the base case, the rest can be zeros.
* As you traverse your string:
* If there is a match, fill that grid cell with the value to the bottom-left of that cell *plus* two.
* If there is not a match, take the *maximum* value from either directly to the left or the bottom cell, and carry that value over to the non-match cell.
* After completely filling the matrix, **the top-right cell will hold the final LPS length**.
<span class="graffiti-highlight graffiti-id_d28fhk7-id_3yrlf09"><i></i><button>Show Solution</button></span>
### Complexity
What was the complexity of this?
In the solution, we are looping over the elements of our `input_string` using two `for` loops; these are each of $O(N)$ and nested this becomes $O(N^2)$. This behavior dominates our optimized solution.
| true |
code
| 0.205974 | null | null | null | null |
|
# Update rules
```
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
from IPython.display import HTML
from matplotlib import cm
from matplotlib.colors import LogNorm
def sgd(f, df, x0, y0, lr, steps):
x = np.zeros(steps + 1)
y = np.zeros(steps + 1)
x[0] = x0
y[0] = y0
for i in range(steps):
(dx, dy) = df(x[i], y[i])
x[i + 1] = x[i] - lr * dx
y[i + 1] = y[i] - lr * dy
z = f(x, y)
return [x, y, z]
def nesterov(f, df, x0, y0, lr, steps, momentum):
x = np.zeros(steps + 1)
y = np.zeros(steps + 1)
x[0] = x0
y[0] = y0
dx_v = 0
dy_v = 0
for i in range(steps):
(dx_ahead, dy_ahead) = df(x[i] + momentum * dx_v, y[i] + momentum * dy_v)
dx_v = momentum * dx_v - lr * dx_ahead
dy_v = momentum * dy_v - lr * dy_ahead
x[i + 1] = x[i] + dx_v
y[i + 1] = y[i] + dy_v
z = f(x, y)
return [x, y, z]
def adagrad(f, df, x0, y0, lr, steps):
x = np.zeros(steps + 1)
y = np.zeros(steps + 1)
x[0] = x0
y[0] = y0
dx_cache = 0
dy_cache = 0
for i in range(steps):
(dx, dy) = df(x[i], y[i])
dx_cache += dx ** 2
dy_cache += dy ** 2
x[i + 1] = x[i] - lr * dx / (1e-8 + np.sqrt(dx_cache))
y[i + 1] = y[i] - lr * dy / (1e-8 + np.sqrt(dy_cache))
z = f(x, y)
return [x, y, z]
def rmsprop(f, df, x0, y0, lr, steps, decay_rate):
x = np.zeros(steps + 1)
y = np.zeros(steps + 1)
x[0] = x0
y[0] = y0
dx_cache = 0
dy_cache = 0
for i in range(steps):
(dx, dy) = df(x[i], y[i])
dx_cache = decay_rate * dx_cache + (1 - decay_rate) * dx ** 2
dy_cache = decay_rate * dy_cache + (1 - decay_rate) * dy ** 2
x[i + 1] = x[i] - lr * dx / (1e-8 + np.sqrt(dx_cache))
y[i + 1] = y[i] - lr * dy / (1e-8 + np.sqrt(dy_cache))
z = f(x, y)
return [x, y, z]
def adam(f, df, x0, y0, lr, steps, beta1, beta2):
# adam with bias correction
x = np.zeros(steps + 1)
y = np.zeros(steps + 1)
x[0] = x0
y[0] = y0
dx_v = 0
dy_v = 0
dx_cache = 0
dy_cache = 0
for i in range(steps):
(dx, dy) = df(x[i], y[i])
dx_v = beta1 * dx_v + (1 - beta1) * dx
dx_v_hat = dx_v / (1 - beta1 ** (i + 1))
dx_cache = beta2 * dx_cache + (1 - beta2) * dx ** 2
dx_cache_hat = dx_cache / (1 - beta2 ** (i + 1))
dy_v = beta1 * dy_v + (1 - beta1) * dy
dy_v_hat = dy_v / (1 - beta1 ** (i + 1))
dy_cache = beta2 * dy_cache + (1 - beta2) * dy ** 2
dy_cache_hat = dy_cache / (1 - beta2 ** (i + 1))
x[i + 1] = x[i] - lr * dx_v_hat / (1e-8 + np.sqrt(dx_cache_hat))
y[i + 1] = y[i] - lr * dy_v_hat / (1e-8 + np.sqrt(dy_cache_hat))
z = f(x, y)
return [x, y, z]
def update_lines(num, dataLines, lines):
for line, data in zip(lines, dataLines):
# NOTE: there is no .set_data() for 3 dim data...
line.set_data(data[0:2, :num])
line.set_3d_properties(data[2, :num])
line.set_marker('o')
line.set_markevery([-1])
return lines
def create_and_save_animation(func_title, f, df, params={}, plot_params={}):
x0 = params.get('x0', 0)
y0 = params.get('y0', 0)
lr = params.get('lr', .1)
steps = params.get('steps', 8)
momentum = params.get('momentum', .9)
decay_rate = params.get('decay_rate', .9)
beta1 = params.get('beta1', .9)
beta2 = params.get('beta2', .999)
# sgd params
x0_sgd = params.get('x0_sgd', x0)
y0_sgd = params.get('y0_sgd', y0)
lr_sgd = params.get('lr_sgd', lr)
# nesterov params
x0_nesterov = params.get('x0_nesterov', x0)
y0_nesterov = params.get('y0_nesterov', y0)
lr_nesterov = params.get('lr_nesterov', lr)
# adagrad params
x0_adagrad = params.get('x0_adagrad', x0)
y0_adagrad = params.get('y0_adagrad', y0)
lr_adagrad = params.get('lr_adagrad', lr)
# rmsprop params
x0_rmsprop = params.get('x0_rmsprop', x0)
y0_rmsprop = params.get('y0_rmsprop', y0)
lr_rmsprop = params.get('lr_rmsprop', lr)
# adam params
x0_adam = params.get('x0_adam', x0)
y0_adam = params.get('y0_adam', y0)
lr_adam = params.get('lr_adam', lr)
azim = plot_params.get('azim', -29)
elev = plot_params.get('elev', 49)
rotation = plot_params.get('rotation', -7)
# attaching 3D axis to the figure
fig = plt.figure(figsize=(12, 8))
ax = p3.Axes3D(fig, azim=azim, elev=elev)
# plot the surface
x = np.arange(-6.5, 6.5, 0.1)
y = np.arange(-6.5, 6.5, 0.1)
x, y = np.meshgrid(x, y)
z = f(x, y)
ax.plot_surface(x, y, z, rstride=1, cstride=1,
norm = LogNorm(), cmap = cm.jet)
ax.set_title(func_title, rotation=rotation)
# lines to plot in 3D
sgd_data = sgd(f, df, x0_sgd, y0_sgd, lr_sgd, steps)
nesterov_data = nesterov(f, df, x0_nesterov, y0_nesterov, lr_nesterov, steps, momentum)
adagrad_data = adagrad(f, df, x0_adagrad, y0_adagrad, lr_adagrad, steps)
rmsprop_data = rmsprop(f, df, x0_rmsprop, y0_rmsprop, lr_rmsprop, steps, decay_rate)
adam_data = adam(f, df, x0_adam, y0_adam, lr_adam, steps, beta1, beta2)
data = np.array([sgd_data, nesterov_data, adagrad_data, rmsprop_data, adam_data])
# NOTE: Can't pass empty arrays into 3d version of plot()
lines = [ax.plot(dat[0, 0:1], dat[1, 0:1], dat[2, 0:1])[0] for dat in data]
ax.legend(lines, ['SGD', 'Nesterov Momentum', 'Adagrad', 'RMSProp', 'Adam'])
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
plt.rcParams['animation.html'] = 'html5'
line_ani = animation.FuncAnimation(fig, update_lines, steps+2, fargs=(data, lines),
interval=500, blit=False, repeat=False)
plt.close()
line_ani.save(f'optimization_{func_title}.gif', writer='imagemagick',fps=500/100)
return line_ani
func_title = 'sphere_function'
def f(x, y):
return x ** 2 + y ** 2
def df(x, y):
return (2 * x, 2 * y)
create_and_save_animation(func_title, f, df,
params={
'steps': 15,
'lr': .2,
'x0_sgd': -4,
'y0_sgd': -4,
'x0_nesterov': -4.2,
'y0_nesterov': -3.8,
'x0_adagrad': -4,
'y0_adagrad': 4,
'x0_rmsprop': -4.2,
'y0_rmsprop': 3.8,
'x0_adam': -4,
'y0_adam': 4.2,
},
plot_params={
'azim': 15,
'elev': 60,
'rotation': -7
})
func_title = 'himmelblau_function'
def f(x, y):
return (x ** 2 + y - 11) ** 2 + (x + y ** 2 - 7) ** 2
def df(x, y):
return (4 * x * (x ** 2 + y - 11) + 2 * (x + y ** 2 - 7),
2 * (x ** 2 + y - 11) + 4 * y * (x + y ** 2 - 7))
create_and_save_animation(func_title, f, df,
params={
'steps': 25,
'lr': .005,
'x0': 0,
'y0': -3,
'lr_adagrad': .5,
'lr_rmsprop': .5,
'lr_adam': .5
},
plot_params={
'azim': -29,
'elev': 70,
'rotation': 17
})
```
| true |
code
| 0.437523 | null | null | null | null |
|
# Mumbai House Price Prediction - Supervised Machine Learning-Regression Problem
## Data Preprocessing
# The main goal of this project is to Predict the price of the houses in Mumbai using their features.
# Import Libraries
```
# importing necessary libraries
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
import numpy as np
from scipy import stats
import re
```
# Load dataset
```
# Load the dataset
df=pd.read_csv('house_scrape.csv')
df.head(5)
df = df.drop(['type_of_sale'], axis = 1)
df.shape
duplicate = df[df.duplicated()]
duplicate
# Drops the duplicate entires in the dataset.
df=df.drop_duplicates()
# As number of rows would vary we need to reset index.
df=df.reset_index()
df.head()
# Dropping unnecessary columns in dataset.
df=df.drop(labels='index',axis=1)
df.head()
```
# Exploratory Data Analysis
```
df.shape
df.info()
#we have 3 numeric variables and 5 categorical variables
#we have column price in lakhs
df.describe()
#observe 75% and max value it shows huge diff
sns.pairplot(df)
plt.show()
# area_insqft and price(L) have slightly linear correlation with some outliers
# value count of each feature
def value_count(df):
for var in df.columns:
print(df[var].value_counts())
print("--------------------------------")
value_count(df)
# correlation heatmap
sns.heatmap(df.corr(),cmap="coolwarm", annot=True)
plt.show()
```
# Preare Data for Machine Learning Model
# Data Cleaning
```
df.isnull().sum() # find the homuch missing data available
df.isnull().mean()*100 # % of measing value
# visualize missing value using heatmap to get idea where is the value missing
plt.figure(figsize=(16,9))
sns.heatmap(df.isnull())
```
# Handling the null values of sale_type
```
df.loc[df['construction_status'] == 'Under Construction', 'Sale_type'] = 'new'
df
#so here we can replace the null values for sale_type with help of construnction_status column we can replace 'new' in sale_type where status is 'Under Construntion'.
df1 = df['Sale_type']
df1
df = df.drop(['Sale_type'],axis = 1)
df.head()
#we can drop the Sale_type as we will concatenate it in df.
df1 = df1.fillna(method='ffill')
df1.isnull().sum()
#to handle rest of the null values in Sale_type we used ffill() method
df = pd.concat([df, df1], axis=1)
df.head()
```
# Handling the null values of Bathroom
```
#we need to extract the numeric value from string first
df["Bathroom"] = df.assign(Bathroom = lambda x: x['Bathroom'].str.extract('(\d+)'))
#lets convert the bathroom from object type to numeric
df["Bathroom"] = pd.to_numeric(df["Bathroom"])
df2 = df['Bathroom']
df2
df = df.drop(['Bathroom'],axis = 1)
df.head()
#we can drop the Bathroom as we will concatenate it in df.
df2 = df2.fillna(method='bfill')
df2.isnull().sum()
#to handle rest of the null values in Sale_type we used ffill() method
df = pd.concat([df, df2], axis=1)
df.head()
df.isnull().sum() #check for the null values
#so our data has no null values now we can proceed further now with other data preprocessing
#lets convert the rate_persqft from object type to numeric
# got error cannot convert str "price" at position 604 so replacing price with rate/sqft value.
df["rate_persqft"] = df["rate_persqft"].replace("Price", 8761)
df["rate_persqft"] = pd.to_numeric(df["rate_persqft"])
#now we can check the description of the data again
df.describe()
sns.heatmap(df.corr(), annot=True)
plt.show()
```
# Finding outliers and removing them
```
# function to create histogram, Q-Q plot and boxplot
# for Q-Q plots
import scipy.stats as stats
def diagnostic_plots(df, variable):
# function takes a dataframe (df) and
# the variable of interest as arguments
# define figure size
plt.figure(figsize=(16, 4))
# histogram
plt.subplot(1, 3, 1)
sns.distplot(df[variable], bins=30)
plt.title('Histogram')
# Q-Q plot
plt.subplot(1, 3, 2)
stats.probplot(df[variable], dist="norm", plot=plt)
plt.ylabel('Variable quantiles')
# boxplot
plt.subplot(1, 3, 3)
sns.boxplot(y=df[variable])
plt.title('Boxplot')
plt.show()
num_var = ["BHK","price(L)","rate_persqft","area_insqft","Bathroom"]
for var in num_var:
print("******* {} *******".format(var))
diagnostic_plots(df, var)
# here we observe outlier using histogram,, qq plot and boxplot
#here we can see there are outliers in some features that we will remove to balance our dataset so other variable didnt get affected.
df3 = df[['price(L)', 'rate_persqft', 'area_insqft']].copy()
df3
#here we make a new data frame to remove the outliers of the features needed and then concatenate with previous dataframe
df = df.drop(['price(L)','rate_persqft','area_insqft'],axis = 1)
df.head()
#droping the values so we can concat the new clean features
z_scores = stats.zscore(df3)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
df3 = df3[filtered_entries]
#using Z-sore to remove the outliers from the features selected
df3
#this is our new dataframe with removed outliers affecting our data
sns.boxplot(x=df3['price(L)'])
#we can compare the above box plots and see the difference outliers has been removed the ones remaining are relevant to our data
sns.boxplot(x=df3['rate_persqft'])
sns.boxplot(x=df3['area_insqft'])
df = pd.concat([df, df3], axis=1)
df.head()
#concatenate to our previous dataframe
df.isnull().sum()
#after we removed the outliers we get some na values
df = df.dropna()
df
#we can drop those values and reset the index so we get all aligned dataset
df=df.reset_index()
#resetting the index
df=df.drop(labels='index',axis=1)
df.head()
#drop the extra index created that we dont need
```
# Categorical variable encoding
# Encoding Construction_status
```
for cat_var in ["Under Construction","Ready to move"]:
df["construction_status"+cat_var] = np.where(df['construction_status']==cat_var, 1,0)
df.shape
```
# Encoding Sale_type
```
for cat_var in ["new","resale"]:
df["Sale_type"+cat_var] = np.where(df['Sale_type']==cat_var, 1,0)
df.shape
```
# Encoding Location
```
# here we are selecting only the location which have count above 50
location_value_count = df['location'].value_counts()
location_value_count
location_get_50 = location_value_count[location_value_count>=50].index
location_get_50
for cat_var in location_get_50:
df['location_'+cat_var]=np.where(df['location']==cat_var, 1,0)
df.shape
df.head()
```
# Drop categorical variable
```
df = df.drop(["location","construction_status",'Sale_type'], axis =1)
df.shape
df.head()
df.to_csv('final_house_scrape.csv', index=False)
```
| true |
code
| 0.566019 | null | null | null | null |
|
# Speech Identity Inference
Let's check if the pretrained model can really identify speakers.
```
import os
import numpy as np
import pandas as pd
from sklearn import metrics
from tqdm.notebook import tqdm
from IPython.display import Audio
from matplotlib import pyplot as plt
%matplotlib inline
import tensorflow as tf
import tensorflow_io as tfio
import tensorflow_addons as tfa
from train_speech_id_model import BaseSpeechEmbeddingModel
from create_audio_tfrecords import AudioTarReader, PersonIdAudio
sr = 48000
m = BaseSpeechEmbeddingModel()
m.summary()
# 90.cpkt: auc = 0.9525
# 110.cpkt: auc = 0.9533
chkpt = 'temp/cp-0110.ckpt'
m.load_weights(chkpt)
m.compile(
optimizer=tf.keras.optimizers.Adam(0.0006),
loss=tfa.losses.TripletSemiHardLoss()
)
# m.save('speech-id-model-110')
# changing the corpus to other languages allows evaluating how the model transfers between languages
dev_dataset = tfrecords_audio_dataset = tf.data.TFRecordDataset(
'data/cv-corpus-7.0-2021-07-21-en.tar.gz_dev.tfrecords.gzip', compression_type='GZIP',
# 'data/cv-corpus-7.0-2021-07-21-en.tar.gz_test.tfrecords.gzip', compression_type='GZIP',
num_parallel_reads=4
).map(PersonIdAudio.deserialize_from_tfrecords)
samples = [x for x in dev_dataset.take(2500)]
# decode audio
samples = [(tfio.audio.decode_mp3(x[0])[:, 0], x[1]) for x in samples]
# is the audio decoded correctly?
Audio(samples[10][0], rate=sr)
# compute the embeddings
embeddings = []
for audio_data, person_id in tqdm(samples):
cur_emb = m.predict(
tf.expand_dims(audio_data, axis=0)
)[0]
embeddings.append(cur_emb)
```
## Check embedding quality
Ideally, embeddings from the same person should look the same.
```
n_speakers = len(set([x[1].numpy() for x in samples]))
print(f'Loaded {n_speakers} different speakers')
pairwise_diff = {'same': [], 'different': []}
for p in tqdm(range(len(samples))):
for q in range(p + 1, len(samples)):
id_1 = samples[p][1]
id_2 = samples[q][1]
dist = np.linalg.norm(embeddings[p] - embeddings[q])
if id_1 == id_2:
pairwise_diff['same'].append(dist)
else:
pairwise_diff['different'].append(dist)
plt.figure(figsize=(12, 8))
plt.boxplot([pairwise_diff[x] for x in pairwise_diff])
plt.xticks([k + 1 for k in range(len(pairwise_diff))], [x for x in pairwise_diff])
plt.ylabel('Embedding distance')
plt.title('Boxplot of speaker identifiability')
# what do we care about?
# given that 2 samples are different, we don't want to predict `same`
# secondarily, given that 2 samples are the same, we want to predict `same`
# threshold - alpha from 0 (median of same) to 1 (median of different)
alpha = 0.2
# if using the validation set, we can calibrate t
t = np.median(pairwise_diff['same']) + alpha * (np.median(pairwise_diff['different']) - np.median(pairwise_diff['same']))
specificity = np.sum(np.array(pairwise_diff['different']) > t) / len(pairwise_diff['different'])
sensitivity = np.sum(np.array(pairwise_diff['same']) < t) / len(pairwise_diff['same'])
print('Sensitivity, specificity = ', sensitivity, specificity)
same_lbl = [0] * len(pairwise_diff['same'])
diff_lbl = [1] * len(pairwise_diff['different'])
scores = pairwise_diff['same'] + pairwise_diff['different']
# scale scores to range [0,1] and chande threshold accordingly
scores = np.array(scores) * 0.5
t = t * 0.5
labels = same_lbl + diff_lbl
len(scores), len(labels)
fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=1)
plt.figure(figsize=(12, 8))
roc_auc = metrics.roc_auc_score(labels, scores)
plt.title(f'ROC curve: AUC = {np.round(roc_auc, 4)} {chkpt}')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.plot(fpr, tpr)
plt.plot([0, 1], [0, 1])
plt.figure(figsize=(12, 8))
plt.title('Point of operation')
plt.plot(thresholds, 1 - fpr, label='Specificity')
plt.plot(thresholds, tpr, label='Sensitivity')
plt.plot([t, t], [0, 1], label='Threshold')
plt.xlabel('Threshold level')
plt.xlim([0, 1])
plt.legend()
```
## Select best model on validation
Strategy: compute loss but don't sort validation set, so there are multiple voice repeats in a batch. Also makes the evaluation consistent. Batch size should be as big as possible.
```
triplet_loss = tfa.losses.TripletSemiHardLoss()
# compute all predictions
def mp3_decode_fn(audio_bytes, audio_class):
audio_data = tfio.audio.decode_mp3(audio_bytes)[:, 0]
return audio_data, audio_class
all_preds = []
all_labels = []
for x in tqdm(dev_dataset.take(1300).map(mp3_decode_fn)):
s = x[0]
all_preds.append(m.predict(
tf.expand_dims(x[0], axis=0)
)[0])
all_labels.append(x[1].numpy())
len(all_preds)
batch_size = 128
n_batches = len(all_preds) // batch_size
vec_size = len(all_preds[0])
np_preds = np.reshape(all_preds[0:batch_size * n_batches], (n_batches, batch_size, vec_size))
np_labls = np.reshape(all_labels[0:batch_size * n_batches], (n_batches, batch_size))
total_loss = 0
for lbl, pred in zip(np_labls, np_preds):
total_loss += triplet_loss(lbl, pred).numpy()
total_loss = total_loss / len(lbl)
print(f'Total loss: {total_loss}')
all_checkpoints = [x.split('.')[0] + '.ckpt' for x in os.listdir('temp') if 'ckpt.index' in x]
all_results = []
for checkpoint in tqdm(all_checkpoints):
m.load_weights(os.path.join('temp', checkpoint))
all_preds = []
all_labels = []
n_items = 4600
for x in tqdm(dev_dataset.take(n_items).map(mp3_decode_fn),
total=n_items, leave=False):
# for x in tqdm(dev_dataset.map(mp3_decode_fn),
# leave=False):
s = x[0]
all_preds.append(m.predict(
tf.expand_dims(x[0], axis=0)
)[0])
all_labels.append(x[1].numpy())
batch_size = 128
n_batches = len(all_preds) // batch_size
vec_size = len(all_preds[0])
np_preds = np.reshape(all_preds[0:batch_size * n_batches], (n_batches, batch_size, vec_size))
np_labls = np.reshape(all_labels[0:batch_size * n_batches], (n_batches, batch_size))
total_loss = 0
for lbl, pred in zip(np_labls, np_preds):
total_loss += triplet_loss(lbl, pred).numpy()
total_loss = total_loss / len(lbl)
cur_result = {
'checkpoint': checkpoint,
'val_loss': total_loss
}
print(cur_result)
all_results.append(cur_result)
df_val = pd.DataFrame(all_results)
df_val['idx'] = df_val.checkpoint.apply(lambda z: int(z.split('.')[0].split('-')[1]))
df_val = df_val.set_index('idx')
df_val.to_csv('val_triplet_loss.csv')
# df_val
df_val.plot()
```
| true |
code
| 0.607896 | null | null | null | null |
|
# MASH analysis pipeline with data-driven prior matrices
This notebook is a pipeline written in SoS to run `flashr + mashr` for multivariate analysis described in Urbut et al (2019). This pipeline was last applied to analyze GTEx V8 eQTL data, although it can be used as is to perform similar multivariate analysis for other association studies.
*Version: 2021.02.28 by Gao Wang and Yuxin Zou*
```
%revisions -s
```
## Data overview
`fastqtl` summary statistics data were obtained from dbGaP (data on CRI at UChicago Genetic Medicine). It has 49 tissues. [more description to come]
## Preparing MASH input
Using an established workflow (which takes 33hrs to run on a cluster system as configured by `midway2.yml`; see inside `fastqtl_to_mash.ipynb` for a note on computing environment),
```
INPUT_DIR=/project/compbio/GTEx_dbGaP/GTEx_Analysis_2017-06-05_v8/eqtl/GTEx_Analysis_v8_eQTL_all_associations
JOB_OPT="-c midway2.yml -q midway2"
sos run workflows/fastqtl_to_mash.ipynb --data-list $INPUT_DIR/FastQTLSumStats.list --common-suffix ".allpairs.txt" $JOB_OPT
```
As a result of command above I obtained the "mashable" data-set in the same format [as described here](https://stephenslab.github.io/gtexresults/gtexdata.html).
### Some data integrity check
1. Check if I get the same number of groups (genes) at the end of HDF5 data conversion:
```
$ zcat Whole_Blood.allpairs.txt.gz | cut -f1 | sort -u | wc -l
20316
$ h5ls Whole_Blood.allpairs.txt.h5 | wc -l
20315
```
The results agreed on Whole Blood sample (the original data has a header thus one line more than the H5 version). We should be good (since the pipeline reported success for all other files).
### Data & job summary
The command above took 33 hours on UChicago RCC `midway2`.
```
[MW] cat FastQTLSumStats.log
39832 out of 39832 groups merged!
```
So we have a total of 39832 genes (union of 49 tissues).
```
[MW] cat FastQTLSumStats.portable.log
15636 out of 39832 groups extracted!
```
We have 15636 groups without missing data in any tissue. This will be used to train the MASH model.
The "mashable" data file is `FastQTLSumStats.mash.rds`, 124Mb serialized R file.
## Multivariate adaptive shrinkage (MASH) analysis of eQTL data
Below is a "blackbox" implementation of the `mashr` eQTL workflow -- blackbox in the sense that you can run this pipeline as an executable, without thinking too much about it, if you see your problem fits our GTEx analysis scheme. However when reading it as a notebook it is a good source of information to help developing your own `mashr` analysis procedures.
Since the submission to biorxiv of Urbut 2017 we have improved implementation of MASH algorithm and made a new R package, [`mashr`](https://github.com/stephenslab/mashr). Major improvements compared to Urbut 2019 are:
1. Faster computation of likelihood and posterior quantities via matrix algebra tricks and a C++ implementation.
2. Faster computation of MASH mixture via convex optimization.
3. Replace `SFA` with `FLASH`, a new sparse factor analysis method to generate prior covariance candidates.
4. Improve estimate of residual variance $\hat{V}$.
At this point, the input data have already been converted from the original eQTL summary statistics to a format convenient for analysis in MASH, as a result of running the data conversion pipeline in `fastqtl_to_mash.ipynb`.
Example command:
```bash
JOB_OPT="-j 8"
#JOB_OPT="-c midway2.yml -q midway2"
sos run workflows/mashr_flashr_workflow.ipynb mash $JOB_OPT # --data ... --cwd ... --vhat ...
```
**FIXME: add comments on submitting jobs to HPC. Here we use the UChicago RCC cluster but other users can similarly configure their computating system to run the pipeline on HPC.**
### Global parameter settings
```
[global]
parameter: cwd = path('./mashr_flashr_workflow_output')
# Input summary statistics data
parameter: data = path("fastqtl_to_mash_output/FastQTLSumStats.mash.rds")
# Prefix of output files. If not specified, it will derive it from data.
# If it is specified, for example, `--output-prefix AnalysisResults`
# It will save output files as `{cwd}/AnalysisResults*`.
parameter: output_prefix = ''
# Exchangable effect (EE) or exchangable z-scores (EZ)
parameter: effect_model = 'EZ'
# Identifier of $\hat{V}$ estimate file
# Options are "identity", "simple", "mle", "vhat_corshrink_xcondition", "vhat_simple_specific"
parameter: vhat = 'mle'
parameter: mixture_components = ['flash', 'flash_nonneg', 'pca']
data = data.absolute()
cwd = cwd.absolute()
if len(output_prefix) == 0:
output_prefix = f"{data:bn}"
prior_data = file_target(f"{cwd:a}/{output_prefix}.{effect_model}.prior.rds")
vhat_data = file_target(f"{cwd:a}/{output_prefix}.{effect_model}.V_{vhat}.rds")
mash_model = file_target(f"{cwd:a}/{output_prefix}.{effect_model}.V_{vhat}.mash_model.rds")
def sort_uniq(seq):
seen = set()
return [x for x in seq if not (x in seen or seen.add(x))]
```
### Command interface
```
sos run mashr_flashr_workflow.ipynb -h
```
## Factor analyses
```
# Perform FLASH analysis with non-negative factor constraint (time estimate: 20min)
[flash]
input: data
output: f"{cwd}/{output_prefix}.flash.rds"
task: trunk_workers = 1, walltime = '2h', trunk_size = 1, mem = '8G', cores = 2, tags = f'{_output:bn}'
R: expand = "${ }", stderr = f'{_output:n}.stderr', stdout = f'{_output:n}.stdout'
dat = readRDS(${_input:r})
dat = mashr::mash_set_data(dat$strong.b, Shat=dat$strong.s, alpha=${1 if effect_model == 'EZ' else 0}, zero_Bhat_Shat_reset = 1E3)
res = mashr::cov_flash(dat, factors="default", remove_singleton=${"TRUE" if "canonical" in mixture_components else "FALSE"}, output_model="${_output:n}.model.rds")
saveRDS(res, ${_output:r})
# Perform FLASH analysis with non-negative factor constraint (time estimate: 20min)
[flash_nonneg]
input: data
output: f"{cwd}/{output_prefix}.flash_nonneg.rds"
task: trunk_workers = 1, walltime = '2h', trunk_size = 1, mem = '8G', cores = 2, tags = f'{_output:bn}'
R: expand = "${ }", stderr = f'{_output:n}.stderr', stdout = f'{_output:n}.stdout'
dat = readRDS(${_input:r})
dat = mashr::mash_set_data(dat$strong.b, Shat=dat$strong.s, alpha=${1 if effect_model == 'EZ' else 0}, zero_Bhat_Shat_reset = 1E3)
res = mashr::cov_flash(dat, factors="nonneg", remove_singleton=${"TRUE" if "canonical" in mixture_components else "FALSE"}, output_model="${_output:n}.model.rds")
saveRDS(res, ${_output:r})
[pca]
# Number of components in PCA analysis for prior
# set to 3 as in mash paper
parameter: npc = 3
input: data
output: f"{cwd}/{output_prefix}.pca.rds"
task: trunk_workers = 1, walltime = '1h', trunk_size = 1, mem = '4G', cores = 2, tags = f'{_output:bn}'
R: expand = "${ }", stderr = f'{_output:n}.stderr', stdout = f'{_output:n}.stdout'
dat = readRDS(${_input:r})
dat = mashr::mash_set_data(dat$strong.b, Shat=dat$strong.s, alpha=${1 if effect_model == 'EZ' else 0}, zero_Bhat_Shat_reset = 1E3)
res = mashr::cov_pca(dat, ${npc})
saveRDS(res, ${_output:r})
```
### Estimate residual variance
FIXME: add some narratives here explaining what we do in each method.
```
# V estimate: "identity" method
[vhat_identity]
input: data
output: f'{vhat_data:nn}.V_identity.rds'
R: expand = "${ }", workdir = cwd, stderr = f"{_output:n}.stderr", stdout = f"{_output:n}.stdout"
dat = readRDS(${_input:r})
saveRDS(diag(ncol(dat$random.b)), ${_output:r})
# V estimate: "simple" method (using null z-scores)
[vhat_simple]
depends: R_library("mashr")
input: data
output: f'{vhat_data:nn}.V_simple.rds'
R: expand = "${ }", workdir = cwd, stderr = f"{_output:n}.stderr", stdout = f"{_output:n}.stdout"
library(mashr)
dat = readRDS(${_input:r})
vhat = estimate_null_correlation_simple(mash_set_data(dat$random.b, Shat=dat$random.s, alpha=${1 if effect_model == 'EZ' else 0}, zero_Bhat_Shat_reset = 1E3))
saveRDS(vhat, ${_output:r})
# V estimate: "mle" method
[vhat_mle]
# number of samples to use
parameter: n_subset = 6000
# maximum number of iterations
parameter: max_iter = 6
depends: R_library("mashr")
input: data, prior_data
output: f'{vhat_data:nn}.V_mle.rds'
task: trunk_workers = 1, walltime = '36h', trunk_size = 1, mem = '4G', cores = 1, tags = f'{_output:bn}'
R: expand = "${ }", workdir = cwd, stderr = f"{_output:n}.stderr", stdout = f"{_output:n}.stdout"
library(mashr)
dat = readRDS(${_input[0]:r})
# choose random subset
set.seed(1)
random.subset = sample(1:nrow(dat$random.b), min(${n_subset}, nrow(dat$random.b)))
random.subset = mash_set_data(dat$random.b[random.subset,], dat$random.s[random.subset,], alpha=${1 if effect_model == 'EZ' else 0}, zero_Bhat_Shat_reset = 1E3)
# estimate V mle
vhat = estimate_null_correlation(random.subset, readRDS(${_input[1]:r}), max_iter = ${max_iter})
saveRDS(vhat, ${_output:r})
# Estimate each V separately via corshrink
[vhat_corshrink_xcondition_1]
# Utility script
parameter: util_script = path('/project/mstephens/gtex/scripts/SumstatQuery.R')
# List of genes to analyze
parameter: gene_list = path()
fail_if(not gene_list.is_file(), msg = 'Please specify valid path for --gene-list')
fail_if(not util_script.is_file() and len(str(util_script)), msg = 'Please specify valid path for --util-script')
genes = sort_uniq([x.strip().strip('"') for x in open(f'{gene_list:a}').readlines() if not x.strip().startswith('#')])
depends: R_library("CorShrink")
input: data, for_each = 'genes'
output: f'{vhat_data:nn}/{vhat_data:bnn}_V_corshrink_{_genes}.rds'
task: trunk_workers = 1, walltime = '3m', trunk_size = 500, mem = '3G', cores = 1, tags = f'{_output:bn}'
R: expand = "${ }", workdir = cwd, stderr = f"{_output:n}.stderr", stdout = f"{_output:n}.stdout"
source(${util_script:r})
CorShrink_sum = function(gene, database, z_thresh = 2){
print(gene)
dat <- GetSS(gene, database)
z = dat$"z-score"
max_absz = apply(abs(z), 1, max)
nullish = which(max_absz < z_thresh)
# if (length(nullish) < ncol(z)) {
# stop("not enough null data to estimate null correlation")
# }
if (length(nullish) <= 1){
mat = diag(ncol(z))
} else {
nullish_z = z[nullish, ]
mat = as.matrix(CorShrink::CorShrinkData(nullish_z, ash.control = list(mixcompdist = "halfuniform"))$cor)
}
return(mat)
}
V = Corshrink_sum("${_genes}", ${data:r})
saveRDS(V, ${_output:r})
# Estimate each V separately via "simple" method
[vhat_simple_specific_1]
# Utility script
parameter: util_script = path('/project/mstephens/gtex/scripts/SumstatQuery.R')
# List of genes to analyze
parameter: gene_list = path()
fail_if(not gene_list.is_file(), msg = 'Please specify valid path for --gene-list')
fail_if(not util_script.is_file() and len(str(util_script)), msg = 'Please specify valid path for --util-script')
genes = sort_uniq([x.strip().strip('"') for x in open(f'{gene_list:a}').readlines() if not x.strip().startswith('#')])
depends: R_library("Matrix")
input: data, for_each = 'genes'
output: f'{vhat_data:nn}/{vhat_data:bnn}_V_simple_{_genes}.rds'
task: trunk_workers = 1, walltime = '1m', trunk_size = 500, mem = '3G', cores = 1, tags = f'{_output:bn}'
R: expand = "${ }", workdir = cwd, stderr = f"{_output:n}.stderr", stdout = f"{_output:n}.stdout"
source(${util_script:r})
simple_V = function(gene, database, z_thresh = 2){
print(gene)
dat <- GetSS(gene, database)
z = dat$"z-score"
max_absz = apply(abs(z), 1, max)
nullish = which(max_absz < z_thresh)
# if (length(nullish) < ncol(z)) {
# stop("not enough null data to estimate null correlation")
# }
if (length(nullish) <= 1){
mat = diag(ncol(z))
} else {
nullish_z = z[nullish, ]
mat = as.matrix(Matrix::nearPD(as.matrix(cov(nullish_z)), conv.tol=1e-06, doSym = TRUE, corr=TRUE)$mat)
}
return(mat)
}
V = simple_V("${_genes}", ${data:r})
saveRDS(V, ${_output:r})
# Consolidate Vhat into one file
[vhat_corshrink_xcondition_2, vhat_simple_specific_2]
depends: R_library("parallel")
# List of genes to analyze
parameter: gene_list = path()
fail_if(not gene_list.is_file(), msg = 'Please specify valid path for --gene-list')
genes = paths([x.strip().strip('"') for x in open(f'{gene_list:a}').readlines() if not x.strip().startswith('#')])
input: group_by = 'all'
output: f"{vhat_data:nn}.V_{step_name.rsplit('_',1)[0]}.rds"
task: trunk_workers = 1, walltime = '1h', trunk_size = 1, mem = '4G', cores = 1, tags = f'{_output:bn}'
R: expand = "${ }", workdir = cwd, stderr = f"{_output:n}.stderr", stdout = f"{_output:n}.stdout"
library(parallel)
files = sapply(c(${genes:r,}), function(g) paste0(c(${_input[0]:adr}), '/', g, '.rds'), USE.NAMES=FALSE)
V = mclapply(files, function(i){ readRDS(i) }, mc.cores = 1)
R = dim(V[[1]])[1]
L = length(V)
V.array = array(as.numeric(unlist(V)), dim=c(R, R, L))
saveRDS(V.array, ${_output:ar})
```
### Compute MASH priors
Main reference are our `mashr` vignettes [this for mashr eQTL outline](https://stephenslab.github.io/mashr/articles/eQTL_outline.html) and [this for using FLASH prior](https://github.com/stephenslab/mashr/blob/master/vignettes/flash_mash.Rmd).
The outcome of this workflow should be found under `./mashr_flashr_workflow_output` folder (can be configured). File names have pattern `*.mash_model_*.rds`. They can be used to computer posterior for input list of gene-SNP pairs (see next section).
```
# Compute data-driven / canonical prior matrices (time estimate: 2h ~ 12h for ~30 49 by 49 matrix mixture)
[prior]
depends: R_library("mashr")
# if vhat method is `mle` it should use V_simple to analyze the data to provide a rough estimate, then later be refined via `mle`.
input: [data, vhat_data if vhat != "mle" else f'{vhat_data:nn}.V_simple.rds'] + [f"{cwd}/{output_prefix}.{m}.rds" for m in mixture_components]
output: prior_data
task: trunk_workers = 1, walltime = '36h', trunk_size = 1, mem = '4G', cores = 4, tags = f'{_output:bn}'
R: expand = "${ }", workdir = cwd, stderr = f"{_output:n}.stderr", stdout = f"{_output:n}.stdout"
library(mashr)
rds_files = c(${_input:r,})
dat = readRDS(rds_files[1])
vhat = readRDS(rds_files[2])
mash_data = mash_set_data(dat$strong.b, Shat=dat$strong.s, V=vhat, alpha=${1 if effect_model == 'EZ' else 0}, zero_Bhat_Shat_reset = 1E3)
# setup prior
U = list(XtX = t(mash_data$Bhat) %*% mash_data$Bhat / nrow(mash_data$Bhat))
for (f in rds_files[3:length(rds_files)]) U = c(U, readRDS(f))
U.ed = cov_ed(mash_data, U, logfile=${_output:nr})
# Canonical matrices
U.can = cov_canonical(mash_data)
saveRDS(c(U.ed, U.can), ${_output:r})
```
## `mashr` mixture model fitting
```
# Fit MASH mixture model (time estimate: <15min for 70K by 49 matrix)
[mash_1]
depends: R_library("mashr")
input: data, vhat_data, prior_data
output: mash_model
task: trunk_workers = 1, walltime = '36h', trunk_size = 1, mem = '4G', cores = 1, tags = f'{_output:bn}'
R: expand = "${ }", workdir = cwd, stderr = f"{_output:n}.stderr", stdout = f"{_output:n}.stdout"
library(mashr)
dat = readRDS(${_input[0]:r})
vhat = readRDS(${_input[1]:r})
U = readRDS(${_input[2]:r})
mash_data = mash_set_data(dat$random.b, Shat=dat$random.s, alpha=${1 if effect_model == 'EZ' else 0}, V=vhat, zero_Bhat_Shat_reset = 1E3)
saveRDS(mash(mash_data, Ulist = U, outputlevel = 1), ${_output:r})
```
### Optional posterior computations
Additionally provide posterior for the "strong" set in MASH input data.
```
# Compute posterior for the "strong" set of data as in Urbut et al 2017.
# This is optional because most of the time we want to apply the
# MASH model learned on much larger data-set.
[mash_2]
# default to True; use --no-compute-posterior to disable this
parameter: compute_posterior = True
# input Vhat file for the batch of posterior data
skip_if(not compute_posterior)
depends: R_library("mashr")
input: data, vhat_data, mash_model
output: f"{cwd:a}/{output_prefix}.{effect_model}.posterior.rds"
task: trunk_workers = 1, walltime = '36h', trunk_size = 1, mem = '4G', cores = 1, tags = f'{_output:bn}'
R: expand = "${ }", workdir = cwd, stderr = f"{_output:n}.stderr", stdout = f"{_output:n}.stdout"
library(mashr)
dat = readRDS(${_input[0]:r})
vhat = readRDS(${_input[1]:r})
mash_data = mash_set_data(dat$strong.b, Shat=dat$strong.s, alpha=${1 if effect_model == 'EZ' else 0}, V=vhat, zero_Bhat_Shat_reset = 1E3)
mash_model = readRDS(${_input[2]:ar})
saveRDS(mash_compute_posterior_matrices(mash_model, mash_data), ${_output:r})
```
## Compute MASH posteriors
In the GTEx V6 paper we assumed one eQTL per gene and applied the model learned above to those SNPs. Under that assumption, the input data for posterior calculation will be the `dat$strong.*` matrices.
It is a fairly straightforward procedure as shown in [this vignette](https://stephenslab.github.io/mashr/articles/eQTL_outline.html).
But it is often more interesting to apply MASH to given list of eQTLs, eg, from those from fine-mapping results. In GTEx V8 analysis we obtain such gene-SNP pairs from DAP-G fine-mapping analysis. See [this notebook](https://stephenslab.github.io/gtex-eqtls/analysis/Independent_eQTL_Results.html) for how the input data is prepared. The workflow below takes a number of input chunks (each chunk is a list of matrices `dat$Bhat` and `dat$Shat`)
and computes posterior for each chunk. It is therefore suited for running in parallel posterior computation for all gene-SNP pairs, if input data chunks are provided.
```
JOB_OPT="-c midway2.yml -q midway2"
DATA_DIR=/project/compbio/GTEx_eQTL/independent_eQTL
sos run workflows/mashr_flashr_workflow.ipynb posterior \
$JOB_OPT \
--posterior-input $DATA_DIR/DAPG_pip_gt_0.01-AllTissues/DAPG_pip_gt_0.01-AllTissues.*.rds \
$DATA_DIR/ConditionalAnalysis_AllTissues/ConditionalAnalysis_AllTissues.*.rds
```
```
# Apply posterior calculations
[posterior]
parameter: mash_model = path(f"{cwd:a}/{output_prefix}.{effect_model}.V_{vhat}.mash_model.rds")
parameter: posterior_input = paths()
parameter: posterior_vhat_files = paths()
# eg, if data is saved in R list as data$strong, then
# when you specify `--data-table-name strong` it will read the data as
# readRDS('{_input:r}')$strong
parameter: data_table_name = ''
parameter: bhat_table_name = 'Bhat'
parameter: shat_table_name = 'Shat'
mash_model = f"{mash_model:a}"
skip_if(len(posterior_input) == 0, msg = "No posterior input data to compute on. Please specify it using --posterior-input.")
fail_if(len(posterior_vhat_files) > 1 and len(posterior_vhat_files) != len(posterior_input), msg = "length of --posterior-input and --posterior-vhat-files do not agree.")
for p in posterior_input:
fail_if(not p.is_file(), msg = f'Cannot find posterior input file ``{p}``')
depends: R_library("mashr"), mash_model
input: posterior_input, group_by = 1
output: f"{_input:n}.posterior.rds"
task: trunk_workers = 1, walltime = '20h', trunk_size = 1, mem = '20G', cores = 1, tags = f'{_output:bn}'
R: expand = "${ }", workdir = cwd, stderr = f"{_output:n}.stderr", stdout = f"{_output:n}.stdout"
library(mashr)
data = readRDS(${_input:r})${('$' + data_table_name) if data_table_name else ''}
vhat = readRDS("${vhat_data if len(posterior_vhat_files) == 0 else posterior_vhat_files[_index]}")
mash_data = mash_set_data(data$${bhat_table_name}, Shat=data$${shat_table_name}, alpha=${1 if effect_model == 'EZ' else 0}, V=vhat, zero_Bhat_Shat_reset = 1E3)
saveRDS(mash_compute_posterior_matrices(readRDS(${mash_model:r}), mash_data), ${_output:r})
```
### Posterior results
1. The outcome of the `[posterior]` step should produce a number of serialized R objects `*.batch_*.posterior.rds` (can be loaded to R via `readRDS()`) -- I chopped data to batches to take advantage of computing in multiple cluster nodes. It should be self-explanary but please let me know otherwise.
2. Other posterior related files are:
1. `*.batch_*.yaml`: gene-SNP pairs of interest, identified elsewhere (eg. fine-mapping analysis).
2. The corresponding univariate analysis summary statistics for gene-SNPs from `*.batch_*.yaml` are extracted and saved to `*.batch_*.rds`, creating input to the `[posterior]` step.
3. Note the `*.batch_*.stdout` file documents some SNPs found in fine-mapping results but not found in the original `fastqtl` output.
| true |
code
| 0.560854 | null | null | null | null |
|
### Set Data Path
```
from pathlib import Path
base_dir = Path("data")
train_dir = base_dir/Path("train")
validation_dir = base_dir/Path("validation")
test_dir = base_dir/Path("test")
```
### Image Transform Function
```
from torchvision import transforms
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=(.5, .5, .5), std=(.5, .5, .5))
])
```
### Load Training Data (x: features, y: labels)
```
import torch
from PIL import Image
x, y = [], []
for file_name in train_dir.glob("*.jpg"):
bounding_box_file = file_name.with_suffix('.txt')
with open(bounding_box_file) as file:
lines = file.readlines()
if(len(lines) > 1):
continue
else:
line = lines[0].strip('\n')
(classes, cen_x, cen_y, box_w, box_h) = list(map(float, line.split(' ')))
torch_data = torch.FloatTensor([cen_x, cen_y, box_w, box_h])
y.append(torch_data)
img = Image.open(str(file_name)).convert('RGB')
img = transform(img)
x.append(img)
```
### Put Training Data into Torch Loader
```
import torch.utils.data as Data
tensor_x = torch.stack(x)
tensor_y = torch.stack(y)
torch_dataset = Data.TensorDataset(tensor_x, tensor_y)
loader = Data.DataLoader(dataset=torch_dataset, batch_size=32, shuffle=True, num_workers=2)
```
### Load Pretrained RestNet18 Model
```
import torchvision
from torch import nn
model = torchvision.models.resnet18(pretrained=True)
fc_in_size = model.fc.in_features
model.fc = nn.Linear(fc_in_size, 4)
```
### Parameters
```
EPOCH = 10
LR = 1e-3
```
### Loss Function & Optimizer
```
loss_func = nn.SmoothL1Loss()
opt = torch.optim.Adam(model.parameters(), lr=LR)
```
### Training
```
for epoch in range(EPOCH):
for step, (batch_x, batch_y) in enumerate(loader):
batch_x = batch_x
batch_y = batch_y
output = model(batch_x)
loss = loss_func(output, batch_y)
opt.zero_grad()
loss.backward()
opt.step()
if(step % 5 == 0):
print("Epoch {} | Step {} | Loss {}".format(epoch, step, loss))
```
### Show some of the Prediction
```
%matplotlib inline
import cv2
from matplotlib import pyplot as plt
import numpy as np
model = model.cpu()
for batch_x, batch_y in loader:
predict = model(batch_x)
for x, pred, y in zip(batch_x, predict, batch_y):
(pos_x, pos_y, box_w, box_h) = pred
pos_x *= 224
pos_y *= 224
box_w *= 224
box_h *= 224
image = transforms.ToPILImage()(x)
img = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
img = cv2.rectangle(img, (pos_x - box_w/2, pos_y - box_h/2), (pos_x + box_w/2, pos_y + box_h/2), (255, 0, 0), 3)
plt.imshow(img)
plt.show()
break
```
| true |
code
| 0.698677 | null | null | null | null |
|
# Partitioning feature space
**Make sure to get latest dtreeviz**
```
! pip install -q -U dtreeviz
! pip install -q graphviz==0.17 # 0.18 deletes the `run` func I need
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression, Ridge, Lasso, LogisticRegression
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import load_boston, load_iris, load_wine, load_digits, \
load_breast_cancer, load_diabetes
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, accuracy_score
import matplotlib.pyplot as plt
%config InlineBackend.figure_format = 'retina'
from sklearn import tree
from dtreeviz.trees import *
from dtreeviz.models.shadow_decision_tree import ShadowDecTree
def show_mse_leaves(X,y,max_depth):
t = DecisionTreeRegressor(max_depth=max_depth)
t.fit(X,y)
shadow = ShadowDecTree.get_shadow_tree(t, X, y, feature_names=['sqfeet'], target_name='rent')
root, leaves, internal = shadow._get_tree_nodes()
# node2samples = shadow._get_tree_nodes()_samples()
# isleaf = shadow.get_node_type(t)
n_node_samples = t.tree_.n_node_samples
mse = 99.9#mean_squared_error(y, [np.mean(y)]*len(y))
print(f"Root {0:3d} has {n_node_samples[0]:3d} samples with MSE ={mse:6.2f}")
print("-----------------------------------------")
avg_mse_per_record = 0.0
node2samples = shadow.get_node_samples()
for node in leaves:
leafy = y[node2samples[node.id]]
n = len(leafy)
mse = mean_squared_error(leafy, [np.mean(leafy)]*n)
avg_mse_per_record += mse * n
print(f"Node {node.id:3d} has {n_node_samples[node.id]:3d} samples with MSE ={mse:6.2f}")
avg_mse_per_record /= len(y)
print(f"Average MSE per record is {avg_mse_per_record:.1f}")
```
## Regression
```
df_cars = pd.read_csv("data/cars.csv")
X, y = df_cars[['ENG']], df_cars['MPG']
df_cars.head(3)
dt = DecisionTreeRegressor(max_depth=1)
dt.fit(X, y)
rtreeviz_univar(dt, X, y,
feature_names='Horsepower',
markersize=5,
mean_linewidth=1,
target_name='MPG',
fontsize=9,
show={})
```
**Q.** What is the MSE between y and predicted $\hat{y} = \overline{y}$?
Hints: You can use function `mean_squared_error(` $y$,$\hat{y}$ `)`; create a vector of length $|y|$ with $\overline{y}$ as elements.
<details>
<summary>Solution</summary>
<pre>
mean_squared_error(y, [np.mean(y)]*len(y)) # about 60.76
</pre>
</details>
**Q.** Where would you split this if you could only split once? Set the `split` variable to a reasonable value.
```
split = ...
```
<details>
<summary>Solution</summary>
The split location that gets most pure subregion might be about split = 200 HP because the region to the right has a relatively flat MPG average.
</details>
**Alter the rtreeviz_univar() call to show the split with arg show={'splits'}**
<details>
<summary>Solution</summary>
<pre>
rtreeviz_univar(dt, X, y,
feature_names='Horsepower',
markersize=5,
mean_linewidth=1,
target_name='MPG',
fontsize=9,
show={'splits'})
</pre>
</details>
**Q.** What are the MSE values for the left, right partitions?
Hints: Get the y values whose `X['ENG']` are less than `split` into `lefty` and those greater than or equal to `split` into `righty`. The split introduces two new children that are leaves until we (possibly) split them; the leaves predict the mean of their samples.
```
lefty = ...; mleft = ...
righty = ...; mright = ...
mse_left = ...
mse_right = ...
mse_left, mse_right
```
<details>
<summary>Solution</summary>
Should be (35.68916307096633, 12.770261374699789)<p>
<pre>
lefty = y[X['ENG']<split]
righty = y[X['ENG']>=split]
mleft = np.mean(lefty)
mright = np.mean(righty)
mse_left = mean_squared_error(lefty, [mleft]\*len(lefty))
mse_right = mean_squared_error(righty, [mright]\*len(righty))
</pre>
</details>
**Q.** Compare the MSE values for overall y and the average of the left, right partition MSEs (which is about 24.2)?
<details>
<summary>Solution</summary>
After the split the MSE of the children is much lower than before the split, therefore, it is a worthwhile split.
</details>
**Q.** Set the split value to 100 and recompare MSE values for y, left, and right.
<details>
<summary>Solution</summary>
With split=100, mse_left, mse_right become 33.6 and 41.0. These are still less than the y MSE of 60.7 so worthwhile but not nearly as splitting at 200.
</details>
### Effect of deeper trees
Consider the sequence of tree depths 1..6 for horsepower vs MPG.
```
X = df_cars[['ENG']].values
y = df_cars['MPG'].values
fig, axes = plt.subplots(1,6, figsize=(14,3), sharey=True)
for i,ax in enumerate(axes.flatten()):
dt = DecisionTreeRegressor(max_depth=i+1)
dt.fit(X, y)
t = rtreeviz_univar(dt,
X, y,
feature_names='Horsepower',
markersize=5,
mean_linewidth=1,
target_name='MPG' if i==0 else None,
fontsize=9,
show={'splits'},
ax=ax)
ax.set_title(f"Depth {i+1}", fontsize=9)
plt.tight_layout()
plt.show()
```
**Q.** Focusing on the orange horizontal lines, what do you notice as more splits appear?
<details>
<summary>Solution</summary>
With depth 1, model is biased due to coarseness of the approximations (just 2 leaf means). Depth 2 gets much better approximation, so bias is lower. As we add more depth to tree, number of splits increases and these appear to be chasing details of the data, decreasing bias on training set but also hurting generality.
</details>
**Q.** Consider the MSE for the 4 leaves of a depth 2 tree and 15 leaves of a depth 4 tree. What happens to the average MSE per leaf? What happens to the leaf sizes and how is it related to average MSE?
```
show_mse_leaves(df_cars[['ENG']], df_cars['MPG'], max_depth=2)
show_mse_leaves(df_cars[['ENG']], df_cars['MPG'], max_depth=4)
```
<details>
<summary>Solution</summary>
The average MSE is much lower as we increase depth because that allows the tree to isolate pure/more-similar regions. This also shrinks leaf size since we are splitting more as the tree deepens.
</details>
Consider the plot of the CYL feature (num cylinders) vs MPG:
```
X = df_cars[['CYL']].values
y = df_cars['MPG'].values
fig, axes = plt.subplots(1,3, figsize=(7,2.5), sharey=True)
depths = [1,2,10]
for i,ax in enumerate(axes.flatten()):
dt = DecisionTreeRegressor(max_depth=depths[i])
dt.fit(X, y)
t = rtreeviz_univar(dt,
X, y,
feature_names='Horsepower',
markersize=5,
mean_linewidth=1,
target_name='MPG' if i==0 else None,
fontsize=9,
show={'splits','title'},
ax=ax)
ax.set_title(f"Depth {i+1}", fontsize=9)
plt.tight_layout()
plt.show()
```
**Q.** Explain why the graph looks like a bunch of vertical bars.
<details>
<summary>Solution</summary>
The x values are integers and will clump together. Since there are many MPG values at each int, you get vertical clumps of data.
</details>
**Q.** Why don't we get many more splits for depth 10 vs depth 2?
<details>
<summary>Solution</summary>
Once each unique x value has a "bin", there are no more splits to do.
</details>
**Q.** Why are the orange predictions bars at the levels they are in the plot?
<details>
<summary>Solution</summary>
Decision tree leaves predict the average y for all samples in a leaf.
</details>
## Classification
```
wine = load_wine()
df_wine = pd.DataFrame(data=wine.data, columns=wine.feature_names)
df_wine.head(3)
feature_names = list(wine.feature_names)
class_names = list(wine.target_names)
```
### 1 variable
```
X = df_wine[['flavanoids']].values
y = wine.target
dt = DecisionTreeClassifier(max_depth=1)
dt.fit(X, y)
fig, ax = plt.subplots(1,1, figsize=(4,1.8))
ct = ctreeviz_univar(dt, X, y,
feature_names = 'flavanoids',
class_names=class_names,
target_name='Wine',
nbins=40, gtype='strip',
fontsize=9,
show={},
colors={'scatter_marker_alpha':1, 'scatter_marker_alpha':1},
ax=ax)
plt.show()
```
**Q.** Where would you split this (vertically) if you could only split once?
<details>
<summary>Solution</summary>
The split location that gets most pure subregion might be about 1.5 because it nicely carves off the left green samples.
</details>
**Alter the code to show the split with arg show={'splits'}**
<details>
<summary>Solution</summary>
<pre>
X = df_wine[['flavanoids']].values
y = wine.target
dt = DecisionTreeClassifier(max_depth=1)
dt.fit(X, y)
fig, ax = plt.subplots(1,1, figsize=(4,1.8))
ct = ctreeviz_univar(dt, X, y,
feature_names = 'flavanoids',
class_names=class_names,
target_name='Wine',
nbins=40, gtype='strip',
fontsize=9,
show={'splits'},
colors={'scatter_marker_alpha':1, 'scatter_marker_alpha':1},
ax=ax)
plt.show()
</pre>
</details>
**Q.** For max_depth=2, how many splits will we get?
<details>
<summary>Solution</summary>
3. We get one split for root and then with depth=2, we have 2 children that each get a split.
</details>
**Q.** Where would you split this graph in that many places?
<details>
<summary>Solution</summary>
Once we carve off the leftmost green, we would want to isolate the blue in between 1.3 and 2.3. The other place to split is not obvious as there is no great choice. (sklearn will add a split point at 1.0)
</details>
**Alter the code to show max_depth=2**
<details>
<summary>Solution</summary>
<pre>
X = df_wine[['flavanoids']].values
y = wine.target
dt = DecisionTreeClassifier(max_depth=2)
dt.fit(X, y)
fig, ax = plt.subplots(1,1, figsize=(4,1.8))
ct = ctreeviz_univar(dt, X, y,
feature_names = 'flavanoids',
class_names=class_names,
target_name='Wine',
nbins=40, gtype='strip',
fontsize=9,
show={'splits'},
colors={'scatter_marker_alpha':1, 'scatter_marker_alpha':1},
ax=ax)
plt.show()
</pre>
</details>
### Gini impurity
Let's compute the gini impurity for left and right sides for a depth=1 tree that splits flavanoids at 1.3. Here's a function that computes the value:
$$
Gini({\bf p}) = \sum_{i=1}^{k} p_i \left[ \sum_{j \ne i}^k p_j \right] = \sum_{i=1}^{k} p_i (1 - p_i) = 1 - \sum_{i=1}^{k} p_i^2
$$
where $p_i = \frac{|y[y==i]|}{|y|}$. Since $\sum_{j \ne i}^k p_j$ is the probability of "not $p_i$", we can summarize that as just $1-p_i$. The gini value is then computing $p_i$ times "not $p_i$" for $k$ classes. Value $p_i$ is the probability of seeing class $i$ in a list of target values, $y$.
```
def gini(y):
"""
Compute gini impurity from y vector of class values (from k unique values).
Result is in range 0..(k-1/k) inclusive; binary range is 0..1/2.
See https://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity"
"""
_, counts = np.unique(y, return_counts=True)
p = counts / len(y)
return 1 - np.sum( p**2 )
```
**Q.** Using that function, what is the gini impurity for the overall y target
<details>
<summary>Solution</summary>
gini(y) # about 0.66
</details>
**Get all y values for rows where `df_wine['flavanoids']`<1.3 into variable `lefty` and `>=` into `righty`**
```
lefty = ...
righty = ...
```
<details>
<summary>Solution</summary>
<pre>
lefty = y[df_wine['flavanoids']<1.3]
righty = y[df_wine['flavanoids']>=1.3]
</pre>
</details>
**Q.** What are the gini values for left and right partitions?
<details>
<summary>Solution</summary>
gini(lefty), gini(righty) # about 0.27, 0.53
</details>
**Q.** What can we conclude about the purity of left and right? Also, compare to gini for all y values.
<details>
<summary>Solution</summary>
Left partition is much more pure than right but right is still more pure than original gini(y). We can conclude that the split is worthwhile as the partition would let us give more accurate predictions.
</details>
### 2 variables
```
X = df_wine[['alcohol','flavanoids']].values
y = wine.target
dt = DecisionTreeClassifier(max_depth=1)
dt.fit(X, y)
fig, ax = plt.subplots(1, 1, figsize=(4,3))
ct = ctreeviz_bivar(dt, X, y,
feature_names = ['alcohol','flavanoid'], class_names=class_names,
target_name='iris',
show={},
colors={'scatter_marker_alpha':1, 'scatter_marker_alpha':1},
ax=ax
)
```
**Q.** Which variable and split point would you choose if you could only split once?
<details>
<summary>Solution</summary>
Because the blue dots are spread vertically, a horizontal split won't be very good. Hence, we should choose variable proline. The best split will carve off the blue dots, leaving the yellow and green mixed up. A split at proline=12.7 seems pretty good.
</details>
**Modify the code to view the splits and compare your answer**
**Q.** Which variable and split points would you choose next for depth=2?
<details>
<summary>Solution</summary>
Once we carve off most of the blue vertically, we should separate the yellow by choosing flavanoid=1.7 to split horizontally. NOTICE, however, that the 2nd split will not be across entire graph since we are splitting the region on the right. Splitting on the left can be at flavanoid=1 so we isolate the green from blue on left.
</details>
**Modify the code to view the splits for depth=2 and compare your answer**
### Gini
Let's examine gini impurity for a different pair of variables.
```
X = df_wine[['proline','flavanoids']].values
y = wine.target
dt = DecisionTreeClassifier(max_depth=1)
dt.fit(X, y)
fig, ax = plt.subplots(1, 1, figsize=(4,3))
ctreeviz_bivar(dt, X, y,
feature_names = ['proline','flavanoid'],
class_names=class_names,
target_name='iris',
show={'splits'},
colors={'scatter_marker_alpha':1, 'scatter_marker_alpha':1},
ax=ax)
plt.show()
```
**Get all y values for rows where the split var is less than the split value into variable `lefty` and those `>=` into `righty`**
```
lefty = ...
righty = ...
```
<details>
<summary>Solution</summary>
<pre>
lefty = y[df_wine['proline']<750]
righty = y[df_wine['proline']>=750]
</pre>
</details>
**Print out the gini for y, lefty, righty**
<details>
<summary>Solution</summary>
<pre>
gini(y), gini(lefty), gini(righty)
</pre>
</details>
## Training a single tree and print out the training accuracy (num correct / total)
```
t = DecisionTreeClassifier()
t.fit(df_wine, y)
accuracy_score(y, t.predict(df_wine))
```
Take a look at the feature importance:
```
from rfpimp import *
I = importances(t, df_wine, y)
plot_importances(I)
```
| true |
code
| 0.723059 | null | null | null | null |
|
## Dataset
The CIFAR-10 dataset (Canadian Institute For Advanced Research) is a collection of images that are commonly used to train machine learning and computer vision algorithms. It is one of the most widely used datasets for machine learning research. The CIFAR-10 dataset contains 60,000 32x32 color images in 10 different classes. The 10 different classes represent airplanes, cars, birds, cats, deer, dogs, frogs, horses, ships, and trucks. There are 6,000 images of each class.
Computer algorithms for recognizing objects in photos often learn by example. CIFAR-10 is a set of images that can be used to teach a computer how to recognize objects. Since the images in CIFAR-10 are low-resolution (32x32), this dataset can allow researchers to quickly try different algorithms to see what works. Various kinds of convolutional neural networks tend to be the best at recognizing the images in CIFAR-10.
<table>
<tr>
<td class="cifar-class-name">airplane</td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/airplane1.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/airplane2.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/airplane3.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/airplane4.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/airplane5.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/airplane6.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/airplane7.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/airplane8.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/airplane9.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/airplane10.png" class="cifar-sample" /></td>
</tr>
<tr>
<td class="cifar-class-name">automobile</td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/automobile1.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/automobile2.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/automobile3.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/automobile4.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/automobile5.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/automobile6.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/automobile7.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/automobile8.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/automobile9.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/automobile10.png" class="cifar-sample" /></td>
</tr>
<tr>
<td class="cifar-class-name">bird</td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/bird1.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/bird2.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/bird3.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/bird4.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/bird5.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/bird6.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/bird7.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/bird8.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/bird9.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/bird10.png" class="cifar-sample" /></td>
</tr>
<tr>
<td class="cifar-class-name">cat</td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/cat1.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/cat2.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/cat3.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/cat4.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/cat5.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/cat6.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/cat7.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/cat8.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/cat9.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/cat10.png" class="cifar-sample" /></td>
</tr>
<tr>
<td class="cifar-class-name">deer</td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/deer1.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/deer2.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/deer3.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/deer4.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/deer5.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/deer6.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/deer7.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/deer8.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/deer9.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/deer10.png" class="cifar-sample" /></td>
</tr>
<tr>
<td class="cifar-class-name">dog</td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/dog1.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/dog2.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/dog3.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/dog4.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/dog5.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/dog6.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/dog7.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/dog8.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/dog9.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/dog10.png" class="cifar-sample" /></td>
</tr>
<tr>
<td class="cifar-class-name">frog</td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/frog1.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/frog2.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/frog3.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/frog4.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/frog5.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/frog6.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/frog7.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/frog8.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/frog9.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/frog10.png" class="cifar-sample" /></td>
</tr>
<tr>
<td class="cifar-class-name">horse</td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/horse1.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/horse2.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/horse3.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/horse4.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/horse5.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/horse6.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/horse7.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/horse8.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/horse9.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/horse10.png" class="cifar-sample" /></td>
</tr>
<tr>
<td class="cifar-class-name">ship</td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/ship1.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/ship2.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/ship3.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/ship4.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/ship5.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/ship6.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/ship7.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/ship8.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/ship9.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/ship10.png" class="cifar-sample" /></td>
</tr>
<tr>
<td class="cifar-class-name">truck</td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/truck1.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/truck2.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/truck3.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/truck4.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/truck5.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/truck6.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/truck7.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/truck8.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/truck9.png" class="cifar-sample" /></td>
<td><img src="https://www.cs.toronto.edu/~kriz/cifar-10-sample/truck10.png" class="cifar-sample" /></td>
</tr>
</table>
[Dataset Download](https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz)
### 1. Load CIFAR-10 Database
```
import keras
from keras.datasets import cifar10
# load the pre-shuffled train and test data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
```
### 2. Visualize the First 24 Training Images
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
fig = plt.figure(figsize=(20,5))
for i in range(36):
ax = fig.add_subplot(3, 12, i + 1, xticks=[], yticks=[])
ax.imshow(np.squeeze(x_train[i]))
```
### 3. Rescale the Images by Dividing Every Pixel in Every Image by 255
```
# rescale [0,255] --> [0,1]
x_train = x_train.astype('float32')/255
x_test = x_test.astype('float32')/255
```
### 4. Break Dataset into Training, Testing, and Validation Sets
```
from keras.utils import np_utils
# one-hot encode the labels
num_classes = len(np.unique(y_train))
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# break training set into training and validation sets
(x_train, x_valid) = x_train[5000:], x_train[:5000]
(y_train, y_valid) = y_train[5000:], y_train[:5000]
# print shape of training set
print('x_train shape:', x_train.shape)
# print number of training, validation, and test images
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print(x_valid.shape[0], 'validation samples')
```
### 5. Define the Model Architecture
```
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=2, padding='same', activation='relu',
input_shape=(32, 32, 3)))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=64, kernel_size=2, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
model.summary()
```
### 6. Compile the Model
```
# compile the model
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',
metrics=['accuracy'])
```
### 7. Train the Model
```
from keras.callbacks import ModelCheckpoint
# train the model
checkpointer = ModelCheckpoint(filepath='model.weights.best.hdf5', verbose=1,
save_best_only=True)
hist = model.fit(x_train, y_train, batch_size=32, epochs=100,
validation_data=(x_valid, y_valid), callbacks=[checkpointer],
verbose=2, shuffle=True)
```
### 8. Load the Model with the Best Validation Accuracy
```
# load the weights that yielded the best validation accuracy
model.load_weights('model.weights.best.hdf5')
```
### 9. Calculate Classification Accuracy on Test Set
```
# evaluate and print test accuracy
score = model.evaluate(x_test, y_test, verbose=0)
print('\n', 'Test accuracy:', score[1])
```
### 10. Visualize Some Predictions
This may give you some insight into why the network is misclassifying certain objects.
```
# get predictions on the test set
y_hat = model.predict(x_test)
# define text labels (source: https://www.cs.toronto.edu/~kriz/cifar.html)
cifar10_labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# plot a random sample of test images, their predicted labels, and ground truth
fig = plt.figure(figsize=(20, 8))
for i, idx in enumerate(np.random.choice(x_test.shape[0], size=32, replace=False)):
ax = fig.add_subplot(4, 8, i + 1, xticks=[], yticks=[])
ax.imshow(np.squeeze(x_test[idx]))
pred_idx = np.argmax(y_hat[idx])
true_idx = np.argmax(y_test[idx])
ax.set_title("{} ({})".format(cifar10_labels[pred_idx], cifar10_labels[true_idx]),
color=("green" if pred_idx == true_idx else "red"))
```
| true |
code
| 0.75674 | null | null | null | null |
|
# Self Supervised Learning Fastai Extension
> Implementation of popular SOTA self-supervised learning algorithms as Fastai Callbacks.
You may find documentation [here](https://keremturgutlu.github.io/self_supervised) and github repo [here](https://github.com/keremturgutlu/self_supervised/tree/master/)
## Install
`pip install self-supervised`
## Algorithms
Here are the list of implemented algorithms:
- [SimCLR](https://arxiv.org/pdf/2002.05709.pdf)
- [BYOL](https://arxiv.org/pdf/2006.07733.pdf)
- [SwAV](https://arxiv.org/pdf/2006.09882.pdf)
## Simple Usage
```python
from self_supervised.simclr import *
dls = get_dls(resize, bs)
model = create_simclr_model(arch=xresnet34, pretrained=False)
learn = Learner(dls, model, SimCLRLoss(temp=0.1), opt_func=opt_func, cbs=[SimCLR(size=size)])
learn.fit_flat_cos(100, 1e-2)
```
```python
from self_supervised.byol import *
dls = get_dls(resize, bs)
model = create_byol_model(arch=xresnet34, pretrained=False)
learn = Learner(dls, model, byol_loss, opt_func=opt_func, cbs=[BYOL(size=size, T=0.99)])
learn.fit_flat_cos(100, 1e-2)
```
```python
from self_supervised.swav import *
dls = get_dls(resize, bs)
model = create_swav_model(arch=xresnet34, pretrained=False)
learn = Learner(dls, model, SWAVLoss(), opt_func=opt_func, cbs=[SWAV(crop_sizes=[size,96],
num_crops=[2,6],
min_scales=[0.25,0.2],
max_scales=[1.0,0.35])])
learn.fit_flat_cos(100, 1e-2)
```
## ImageWang Benchmarks
All of the algorithms implemented in this library have been evaluated in [ImageWang Leaderboard](https://github.com/fastai/imagenette#image%E7%BD%91-leaderboard).
In overall superiority of the algorithms are as follows `SwAV > BYOL > SimCLR` in most of the benchmarks. For details you may inspect the history of [ImageWang Leaderboard](https://github.com/fastai/imagenette#image%E7%BD%91-leaderboard) through github.
It should be noted that during these experiments no hyperparameter selection/tuning was made beyond using `learn.lr_find()` or making sanity checks over data augmentations by visualizing batches. So, there is still space for improvement and overall rankings of the alogrithms may change based on your setup. Yet, the overall rankings are on par with the papers.
## Contributing
Contributions and or requests for new self-supervised algorithms are welcome. This repo will try to keep itself up-to-date with recent SOTA self-supervised algorithms.
Before raising a PR please create a new branch with name `<self-supervised-algorithm>`. You may refer to previous notebooks before implementing your Callback.
Please refer to sections `Developers Guide, Abbreviations Guide, and Style Guide` from https://docs.fast.ai/dev-setup and note that same rules apply for this library.
| true |
code
| 0.677154 | null | null | null | null |
|
## Create Data
```
import numpy as np
import matplotlib.pyplot as plt
from patsy import dmatrix
from statsmodels.api import GLM, families
def simulate_poisson_process(rate, sampling_frequency):
return np.random.poisson(rate / sampling_frequency)
def plot_model_vs_true(time, spike_train, firing_rate, conditional_intensity, sampling_frequency):
fig, axes = plt.subplots(2, 1, figsize=(12, 6), sharex=True, constrained_layout=True)
s, t = np.nonzero(spike_train)
axes[0].scatter(np.unique(time)[s], t, s=1, color='black')
axes[0].set_ylabel('Trials')
axes[0].set_title('Simulated Spikes')
axes[0].set_xlim((0, 1))
axes[1].plot(np.unique(time), firing_rate[:, 0],
linestyle='--', color='black',
linewidth=4, label='True Rate')
axes[1].plot(time.ravel(), conditional_intensity * sampling_frequency,
linewidth=4, label='model conditional intensity')
axes[1].set_xlabel('Time')
axes[1].set_ylabel('Firing Rate (Hz)')
axes[1].set_title('True Rate vs. Model')
axes[1].set_ylim((0, 15))
plt.legend()
n_time, n_trials = 1500, 1000
sampling_frequency = 1500
# Firing rate starts at 5 Hz and switches to 10 Hz
firing_rate = np.ones((n_time, n_trials)) * 10
firing_rate[:n_time // 2, :] = 5
spike_train = simulate_poisson_process(
firing_rate, sampling_frequency)
time = (np.arange(0, n_time)[:, np.newaxis] / sampling_frequency *
np.ones((1, n_trials)))
trial_id = (np.arange(n_trials)[np.newaxis, :]
* np.ones((n_time, 1)))
```
## Good Fit
```
# Fit a spline model to the firing rate
design_matrix = dmatrix('bs(time, df=5)', dict(time=time.ravel()))
fit = GLM(spike_train.ravel(), design_matrix,
family=families.Poisson()).fit()
conditional_intensity = fit.mu
plot_model_vs_true(time, spike_train, firing_rate, conditional_intensity, sampling_frequency)
plt.savefig('simulated_spikes_model.png')
from time_rescale import TimeRescaling
conditional_intensity = fit.mu
rescaled = TimeRescaling(conditional_intensity,
spike_train.ravel(),
trial_id.ravel())
fig, axes = plt.subplots(1, 2, figsize=(12, 6))
rescaled.plot_ks(ax=axes[0])
rescaled.plot_rescaled_ISI_autocorrelation(ax=axes[1])
plt.savefig('time_rescaling_ks_autocorrelation.png')
```
### Adjust for short trials
```
rescaled_adjusted = TimeRescaling(conditional_intensity,
spike_train.ravel(),
trial_id.ravel(),
adjust_for_short_trials=True)
fig, axes = plt.subplots(1, 2, figsize=(12, 6))
rescaled_adjusted.plot_ks(ax=axes[0])
rescaled_adjusted.plot_rescaled_ISI_autocorrelation(ax=axes[1])
plt.savefig('time_rescaling_ks_autocorrelation_adjusted.png')
```
## Bad Fit
```
constant_fit = GLM(spike_train.ravel(),
np.ones_like(spike_train.ravel()),
family=families.Poisson()).fit()
conditional_intensity = constant_fit.mu
plot_model_vs_true(time, spike_train, firing_rate, conditional_intensity, sampling_frequency)
plt.savefig('constant_model_fit.png')
bad_rescaled = TimeRescaling(constant_fit.mu,
spike_train.ravel(),
trial_id.ravel(),
adjust_for_short_trials=True)
fig, axes = plt.subplots(1, 2, figsize=(12, 6))
bad_rescaled.plot_ks(ax=axes[0], scatter_kwargs=dict(s=10))
axes[0].set_title('KS Plot')
bad_rescaled.plot_rescaled_ISI_autocorrelation(ax=axes[1], scatter_kwargs=dict(s=10))
axes[1].set_title('Autocorrelation');
plt.savefig('time_rescaling_ks_autocorrelation_bad_fit.png')
```
| true |
code
| 0.773751 | null | null | null | null |
|
## Global Air Pollution Measurements
* [Air Quality Index - Wiki](https://en.wikipedia.org/wiki/Air_quality_index)
* [BigQuery - Wiki](https://en.wikipedia.org/wiki/BigQuery)
In this notebook data is extracted from *BigQuery Public Data* assesible exclusively only in *Kaggle*. The BigQurey Helper Object will convert data in cloud storage into *Pandas DataFrame* object. The query syntax is same as *SQL*. As size of data is very high convert entire data to DataFrame is cumbersome. So query is written such that will be readly available for Visualization.
***
>**Baisc attributes of Air quality index**
* Measurement units
* $ug/m^3$: micro gram/cubic meter
* $ppm$: Parts Per Million
* Pollutant
* $O3$: Ozone gas
* $SO2$: Sulphur Dioxed
* $NO2$: Nitrogen Dioxed
* $PM 2.5$: Particles with an aerodynamic diameter less than $2.5 μm$
* $PM 10$: Particles with an aerodynamic diameter less than $10 μm$
* $CO$: Carbon monoxide
**Steps**
1. Load Packages
2. Bigquery Object
3. AQI range and Statistics
4. Distribution of country listed in AQI
5. Location
6. Air Quality Index value distribution Map veiw
7. Pollutant Statistics
8. Distribution of pollutant and unit
9. Distribution of Source name
10. Sample AQI Averaged over in hours
11. AQI variation with time
12. Country Heatmap
13. Animation
### Load packages
```
# Load packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.basemap import Basemap
import folium
import folium.plugins as plugins
import warnings
warnings.filterwarnings('ignore')
pd.options.display.max_rows =10
%matplotlib inline
```
### Bigquery
BigQuery is a RESTful web service that enables interactive analysis of massively large datasets working in conjunction with Google Storage. It is an Infrastructure as a Service that may be used complementarily with MapReduce.
```
# Customized query helper function explosively in Kaggle
import bq_helper
# Helper object
openAQ = bq_helper.BigQueryHelper(active_project='bigquery-public-data',
dataset_name='openaq')
# List of table
openAQ.list_tables()
#Schema
openAQ.table_schema('global_air_quality')
```
### Table display
```
openAQ.head('global_air_quality')
# Summary statics
query = """SELECT value,averaged_over_in_hours
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE unit = 'µg/m³'
"""
p1 = openAQ.query_to_pandas(query)
p1.describe()
```
# Air Quality Index Range
* [AQI Range](http://aqicn.org/faq/2013-09-09/revised-pm25-aqi-breakpoints/)
<center><img src = 'https://campuspress.yale.edu/datadriven/files/2012/03/AQI-1024x634-1ybtu6l.png '><center>
The range of AQI is 0 - 500, so lets limit data to that range, in previous kernel's these outlier data points are not removed
```
query = """SELECT value,country
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE unit = 'µg/m³' AND value < 0
"""
p1 = openAQ.query_to_pandas(query)
p1.describe().T
```
There are more than 100 value having value less than 0. The lowest value is -999000, which is outlier data point. **Air Quality Meter** is digital a instruments, if meter is show error value then sensor is disconnected or faulty.
```
query2 = """SELECT value,country,pollutant
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE unit = 'µg/m³' AND value > 0
"""
p2 = openAQ.query_to_pandas(query2)
print('0.99 Quantile',p2['value'].quantile(0.99))
p2.describe().T
p2[p2['value']>10000]
```
Country
* MK is *Macedonia* [wiki](https://en.wikipedia.org/wiki/Republic_of_Macedonia)
* CL is *Chile* [Wiki](https://en.wikipedia.org/wiki/Chile)
>In both the countries some may some natural disaster happend so AQI is very high.
We will disgrad value more than 10000, which are outlier data point
### Distribution of country listed in AQI
```
query = """SELECT country,COUNT(country) as `count`
FROM `bigquery-public-data.openaq.global_air_quality`
GROUP BY country
HAVING COUNT(country) >10
ORDER BY `count`
"""
cnt = openAQ.query_to_pandas_safe(query)
cnt.tail()
plt.style.use('bmh')
plt.figure(figsize=(14,4))
sns.barplot(cnt['country'], cnt['count'], palette='magma')
plt.xticks(rotation=45)
plt.title('Distribution of country listed in data');
```
## Location
We find find different location where air quality is taken. This location data consist of latitude and logitude, city.
```
#Average polution of air by countries
query = """SELECT AVG(value) as `Average`,country
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE unit = 'µg/m³' AND value BETWEEN 0 AND 10000
GROUP BY country
ORDER BY Average DESC
"""
cnt = openAQ.query_to_pandas(query)
plt.figure(figsize=(14,4))
sns.barplot(cnt['country'],cnt['Average'], palette= sns.color_palette('gist_heat',len(cnt)))
plt.xticks(rotation=90)
plt.title('Average polution of air by countries in unit $ug/m^3$')
plt.ylabel('Average AQI in $ug/m^3$');
```
* Country PL ( Poland) and IN (India) are top pollutor of air
***
### AQI measurement center
```
query = """SELECT city,latitude,longitude,
AVG(value) as `Average`
FROM `bigquery-public-data.openaq.global_air_quality`
GROUP BY latitude,city,longitude
"""
location = openAQ.query_to_pandas_safe(query)
#Location AQI measurement center
m = folium.Map(location = [20,10],tiles='Mapbox Bright',zoom_start=2)
# add marker one by on map
for i in range(0,500):
folium.Marker(location = [location.iloc[i]['latitude'],location.iloc[i]['longitude']],\
popup=location.iloc[i]['city']).add_to(m)
m # DRAW MAP
```
We find that thier are many air qulity index measurement unit across -US- and -Europe-. Thier are few measurement center in -African- continent. We are hardly find any measuring center in Mid East, Russia.
### Air Quality Index value distribution Map veiw
```
query = """SELECT city,latitude,longitude,
AVG(value) as `Average`
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE unit = 'µg/m³' AND value BETWEEN 0 AND 10000
GROUP BY latitude,city,longitude
"""
location = openAQ.query_to_pandas_safe(query)
location.dropna(axis=0, inplace=True)
plt.style.use('ggplot')
f,ax = plt.subplots(figsize=(14,10))
m1 = Basemap(projection='cyl', llcrnrlon=-180, urcrnrlon=180, llcrnrlat=-90, urcrnrlat=90,
resolution='c',lat_ts=True)
m1.drawmapboundary(fill_color='#A6CAE0', linewidth=0)
m1.fillcontinents(color='grey', alpha=0.3)
m1.drawcoastlines(linewidth=0.1, color="white")
m1.shadedrelief()
m1.bluemarble(alpha=0.4)
avg = location['Average']
m1loc = m1(location['latitude'].tolist(),location['longitude'])
m1.scatter(m1loc[1],m1loc[0],lw=3,alpha=0.5,zorder=3,cmap='coolwarm', c=avg)
plt.title('Average Air qulity index in unit $ug/m^3$ value')
m1.colorbar(label=' Average AQI value in unit $ug/m^3$');
```
### US
```
#USA location
query = """SELECT
MAX(latitude) as `max_lat`,
MIN(latitude) as `min_lat`,
MAX(longitude) as `max_lon`,
MIN(longitude) as `min_lon`
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE country = 'US' """
us_loc = openAQ.query_to_pandas_safe(query)
us_loc
query = """ SELECT city,latitude,longitude,averaged_over_in_hours,
AVG(value) as `Average`
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE country = 'US' AND unit = 'µg/m³' AND value BETWEEN 0 AND 10000
GROUP BY latitude,city,longitude,averaged_over_in_hours,country """
us_aqi = openAQ.query_to_pandas_safe(query)
# USA
min_lat = us_loc['min_lat']
max_lat = us_loc['max_lat']
min_lon = us_loc['min_lon']
max_lon = us_loc['max_lon']
plt.figure(figsize=(14,8))
m2 = Basemap(projection='cyl', llcrnrlon=min_lon, urcrnrlon=max_lon, llcrnrlat=min_lat, urcrnrlat=max_lat,
resolution='c',lat_ts=True)
m2.drawcounties()
m2.drawmapboundary(fill_color='#A6CAE0', linewidth=0)
m2.fillcontinents(color='grey', alpha=0.3)
m2.drawcoastlines(linewidth=0.1, color="white")
m2.drawstates()
m2.bluemarble(alpha=0.4)
avg = (us_aqi['Average'])
m2loc = m2(us_aqi['latitude'].tolist(),us_aqi['longitude'])
m2.scatter(m2loc[1],m2loc[0],c = avg,lw=3,alpha=0.5,zorder=3,cmap='rainbow')
m1.colorbar(label = 'Average AQI value in unit $ug/m^3$')
plt.title('Average Air qulity index in unit $ug/m^3$ of US');
```
AQI of US range 0 to 400, most of city data points are within 100
### India
```
#INDIA location
query = """SELECT
MAX(latitude) as `max_lat`,
MIN(latitude) as `min_lat`,
MAX(longitude) as `max_lon`,
MIN(longitude) as `min_lon`
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE country = 'IN' """
in_loc = openAQ.query_to_pandas_safe(query)
in_loc
query = """ SELECT city,latitude,longitude,
AVG(value) as `Average`
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE country = 'IN' AND unit = 'µg/m³' AND value BETWEEN 0 AND 10000
GROUP BY latitude,city,longitude,country """
in_aqi = openAQ.query_to_pandas_safe(query)
# INDIA
min_lat = in_loc['min_lat']-5
max_lat = in_loc['max_lat']+5
min_lon = in_loc['min_lon']-5
max_lon = in_loc['max_lon']+5
plt.figure(figsize=(14,8))
m3 = Basemap(projection='cyl', llcrnrlon=min_lon, urcrnrlon=max_lon, llcrnrlat=min_lat, urcrnrlat=max_lat,
resolution='c',lat_ts=True)
m3.drawcounties()
m3.drawmapboundary(fill_color='#A6CAE0', linewidth=0)
m3.fillcontinents(color='grey', alpha=0.3)
m3.drawcoastlines(linewidth=0.1, color="white")
m3.drawstates()
avg = in_aqi['Average']
m3loc = m3(in_aqi['latitude'].tolist(),in_aqi['longitude'])
m3.scatter(m3loc[1],m3loc[0],c = avg,alpha=0.5,zorder=5,cmap='rainbow')
m1.colorbar(label = 'Average AQI value in unit $ug/m^3$')
plt.title('Average Air qulity index in unit $ug/m^3$ of India');
```
### Distribution of pollutant and unit
```
# Unit query
query = """SELECT unit,COUNT(unit) as `count`
FROM `bigquery-public-data.openaq.global_air_quality`
GROUP BY unit
"""
unit = openAQ.query_to_pandas(query)
# Pollutant query
query = """SELECT pollutant,COUNT(pollutant) as `count`
FROM `bigquery-public-data.openaq.global_air_quality`
GROUP BY pollutant
"""
poll_count = openAQ.query_to_pandas_safe(query)
plt.style.use('fivethirtyeight')
plt.style.use('bmh')
f, ax = plt.subplots(1,2,figsize = (14,5))
ax1,ax2= ax.flatten()
ax1.pie(x=unit['count'],labels=unit['unit'],shadow=True,autopct='%1.1f%%',explode=[0,0.1],\
colors=sns.color_palette('hot',2),startangle=90,)
ax1.set_title('Distribution of measurement unit')
explode = np.arange(0,0.1)
ax2.pie(x=poll_count['count'],labels=poll_count['pollutant'], shadow=True, autopct='%1.1f%%',\
colors=sns.color_palette('Set2',5),startangle=60,)
ax2.set_title('Distribution of pollutants in air');
```
* The most polular unit of mesurement of air quality is $ug/m^3$
* $O^3$ is share 23% pollution in air.
***
### Pollutant Statistics
```
query = """ SELECT pollutant,
AVG(value) as `Average`,
COUNT(value) as `Count`,
MIN(value) as `Min`,
MAX(value) as `Max`,
SUM(value) as `Sum`
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE unit = 'µg/m³' AND value BETWEEN 0 AND 10000
GROUP BY pollutant
"""
cnt = openAQ.query_to_pandas_safe(query)
cnt
```
We find
* The CO (carbon monoxide) having very wide range of value.
* Look at sum of CO which is highest in list.
* Except Average AQI of CO, all are below 54 $ug/m^3$
### Pollutants by Country
```
query = """SELECT AVG(value) as`Average`,country, pollutant
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE unit = 'µg/m³'AND value BETWEEN 0 AND 10000
GROUP BY country,pollutant"""
p1 = openAQ.query_to_pandas_safe(query)
# By country
p1_pivot = p1.pivot(index = 'country',values='Average', columns= 'pollutant')
plt.figure(figsize=(14,15))
ax = sns.heatmap(p1_pivot, lw=0.01, cmap=sns.color_palette('Reds',500))
plt.yticks(rotation=30)
plt.title('Heatmap average AQI by Pollutant');
f,ax = plt.subplots(figsize=(14,6))
sns.barplot(p1[p1['pollutant']=='co']['country'],p1[p1['pollutant']=='co']['Average'],)
plt.title('Co AQI in diffrent country')
plt.xticks(rotation=90);
f,ax = plt.subplots(figsize=(14,6))
sns.barplot(p1[p1['pollutant']=='pm25']['country'],p1[p1['pollutant']=='pm25']['Average'])
plt.title('pm25 AQI in diffrent country')
plt.xticks(rotation=90);
```
### Distribution of Source name
The institution where AQI is measure
```
#source_name
query = """ SELECT source_name, COUNT(source_name) as `count`
FROM `bigquery-public-data.openaq.global_air_quality`
GROUP BY source_name
ORDER BY count DESC
"""
source_name = openAQ.query_to_pandas_safe(query)
plt.figure(figsize=(14,10))
sns.barplot(source_name['count'][:20], source_name['source_name'][:20],palette = sns.color_palette('YlOrBr'))
plt.title('Distribution of Top 20 source_name')
#plt.axvline(source_name['count'].median())
plt.xticks(rotation=90);
```
We find
* Airnow is top source unit in list
* Europian country are top in the list, the instition name is starts with 'EEA country'.
***
### Sample AQI Averaged over in hours
The sample of AQI value taken in different hour
```
query = """SELECT averaged_over_in_hours, COUNT(*) as `count`
FROM `bigquery-public-data.openaq.global_air_quality`
GROUP BY averaged_over_in_hours
ORDER BY count DESC """
cnt = openAQ.query_to_pandas(query)
#cnt['averaged_over_in_hours'] = cnt['averaged_over_in_hours'].astype('category')
plt.figure(figsize=(14,5))
sns.barplot( cnt['averaged_over_in_hours'],cnt['count'], palette= sns.color_palette('brg'))
plt.title('Distibution of quality measurement per hour ');
```
we find that air quality is measured every hour
***
### AQI in ppm
```
query = """SELECT AVG(value) as`Average`,country,
EXTRACT(YEAR FROM timestamp) as `Year`,
pollutant
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE unit = 'ppm'
GROUP BY country,Year,pollutant"""
pol_aqi = openAQ.query_to_pandas_safe(query)
# By month in year
plt.figure(figsize=(14,8))
sns.barplot(pol_aqi['country'], pol_aqi['Average'])
plt.title('Distribution of average AQI by country $ppm$');
```
### AQI variation with time
```
query = """SELECT EXTRACT(YEAR FROM timestamp) as `Year`,
AVG(value) as `Average`
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE unit = 'µg/m³' AND value BETWEEN 0 AND 10000
GROUP BY EXTRACT(YEAR FROM timestamp)
"""
quality = openAQ.query_to_pandas(query)
query = """SELECT EXTRACT(MONTH FROM timestamp) as `Month`,
AVG(value) as `Average`
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE unit = 'µg/m³' AND value BETWEEN 0 AND 10000
GROUP BY EXTRACT(MONTH FROM timestamp)
"""
quality1 = openAQ.query_to_pandas(query)
# plot
f,ax = plt.subplots(1,2, figsize= (14,6),sharey=True)
ax1,ax2 = ax.flatten()
sns.barplot(quality['Year'],quality['Average'],ax=ax1)
ax1.set_title('Distribution of average AQI by year')
sns.barplot(quality1['Month'],quality['Average'], ax=ax2 )
ax2.set_title('Distribution of average AQI by month')
ax2.set_ylabel('');
# by year & month
query = """SELECT EXTRACT(YEAR from timestamp) as `Year`,
EXTRACT(MONTH FROM timestamp) as `Month`,
AVG(value) as `Average`
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE unit = 'µg/m³' AND value BETWEEN 0 AND 10000
GROUP BY year,Month"""
aqi_year = openAQ.query_to_pandas_safe(query)
# By month in year
plt.figure(figsize=(14,8))
sns.pointplot(aqi_year['Month'],aqi_year['Average'],hue = aqi_year['Year'])
plt.title('Distribution of average AQI by month');
```
We find
* the data available for perticular year is incomplete
* the year 2016, 2017 data is availabel completely
### Country Heatmap
```
# Heatmap by country
query = """SELECT AVG(value) as `Average`,
EXTRACT(YEAR FROM timestamp) as `Year`,
country
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE unit = 'µg/m³' AND value BETWEEN 0 AND 10000
GROUP BY country,Year
"""
coun_aqi = openAQ.query_to_pandas_safe(query)
coun_pivot = coun_aqi.pivot(index='country', columns='Year', values='Average').fillna(0)
# By month in year
plt.figure(figsize=(14,15))
sns.heatmap(coun_pivot, lw=0.01, cmap=sns.color_palette('Reds',len(coun_pivot)))
plt.yticks(rotation=30)
plt.title('Heatmap average AQI by YEAR');
```
### Animation
```
query = """SELECT EXTRACT(YEAR FROM timestamp) as `Year`,AVG(value) as `Average`,
latitude,longitude
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE unit = 'µg/m³' AND value BETWEEN 0 AND 10000
GROUP BY Year, latitude,longitude
"""
p1 = openAQ.query_to_pandas_safe(query)
from matplotlib import animation,rc
import io
import base64
from IPython.display import HTML, display
import warnings
warnings.filterwarnings('ignore')
fig = plt.figure(figsize=(14,10))
plt.style.use('ggplot')
def animate(Year):
ax = plt.axes()
ax.clear()
ax.set_title('Average AQI in Year: '+str(Year))
m4 = Basemap(llcrnrlat=-90, urcrnrlat=90, llcrnrlon=-180,urcrnrlon=180,projection='cyl')
m4.drawmapboundary(fill_color='#A6CAE0', linewidth=0)
m4.fillcontinents(color='grey', alpha=0.3)
m4.drawcoastlines(linewidth=0.1, color="white")
m4.shadedrelief()
lat_y = list(p1[p1['Year'] == Year]['latitude'])
lon_y = list(p1[p1['Year'] == Year]['longitude'])
lat,lon = m4(lat_y,lon_y)
avg = p1[p1['Year'] == Year]['Average']
m4.scatter(lon,lat,c = avg,lw=2, alpha=0.3,cmap='hot_r')
ani = animation.FuncAnimation(fig,animate,list(p1['Year'].unique()), interval = 1500)
ani.save('animation.gif', writer='imagemagick', fps=1)
plt.close(1)
filename = 'animation.gif'
video = io.open(filename, 'r+b').read()
encoded = base64.b64encode(video)
HTML(data='''<img src="data:image/gif;base64,{0}" type="gif" />'''.format(encoded.decode('ascii')))
# Continued
```
>>>>>> ### Thank you for visiting, please upvote if you like it.
| true |
code
| 0.590779 | null | null | null | null |
|
# MACHINE LEARNING LAB - 4 ( Backpropagation Algorithm )
**4. Build an Artificial Neural Network by implementing the Backpropagation algorithm and test the same using appropriate data sets.**
```
import numpy as np
X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float) # X = (hours sleeping, hours studying)
y = np.array(([92], [86], [89]), dtype=float) # y = score on test
# scale units
X = X/np.amax(X, axis=0) # maximum of X array
y = y/100 # max test score is 100
class Neural_Network(object):
def __init__(self):
# Parameters
self.inputSize = 2
self.outputSize = 1
self.hiddenSize = 3
# Weights
self.W1 = np.random.randn(self.inputSize, self.hiddenSize) # (3x2) weight matrix from input to hidden layer
self.W2 = np.random.randn(self.hiddenSize, self.outputSize) # (3x1) weight matrix from hidden to output layer
def forward(self, X):
#forward propagation through our network
self.z = np.dot(X, self.W1) # dot product of X (input) and first set of 3x2 weights
self.z2 = self.sigmoid(self.z) # activation function
self.z3 = np.dot(self.z2, self.W2) # dot product of hidden layer (z2) and second set of 3x1 weights
o = self.sigmoid(self.z3) # final activation function
return o
def sigmoid(self, s):
return 1/(1+np.exp(-s)) # activation function
def sigmoidPrime(self, s):
return s * (1 - s) # derivative of sigmoid
def backward(self, X, y, o):
# backward propgate through the network
self.o_error = y - o # error in output
self.o_delta = self.o_error*self.sigmoidPrime(o) # applying derivative of sigmoid to
self.z2_error = self.o_delta.dot(self.W2.T) # z2 error: how much our hidden layer weights contributed to output error
self.z2_delta = self.z2_error*self.sigmoidPrime(self.z2) # applying derivative of sigmoid to z2 error
self.W1 += X.T.dot(self.z2_delta) # adjusting first set (input --> hidden) weights
self.W2 += self.z2.T.dot(self.o_delta) # adjusting second set (hidden --> output) weights
def train (self, X, y):
o = self.forward(X)
self.backward(X, y, o)
NN = Neural_Network()
for i in range(1000): # trains the NN 1,000 times
print ("\nInput: \n" + str(X))
print ("\nActual Output: \n" + str(y))
print ("\nPredicted Output: \n" + str(NN.forward(X)))
print ("\nLoss: \n" + str(np.mean(np.square(y - NN.forward(X))))) # mean sum squared loss)
NN.train(X, y)
```
| true |
code
| 0.599837 | null | null | null | null |
|
# LSV Data Analysis and Parameter Estimation
##### First, all relevent Python packages are imported
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from scipy.optimize import curve_fit
from scipy.signal import savgol_filter, find_peaks, find_peaks_cwt
import pandas as pd
import math
import glob
import altair as alt
from voltammetry import preprocessing, plotting, fitting
```
##### The user will be able to import experimental data for an LSV scan
##### (Currently, we assume that the LSV sweep starts at equilibrium)
```
##Import Experimental Reversible Data:
rev_exp_data = pd.read_csv("data/10mVs_Reversible.csv")
current_exp=rev_exp_data['current(A)'].values
voltage_exp=rev_exp_data['voltage(mV)'].values
time_exp=rev_exp_data['time(s)'].values
## all appropriate packages and the singular experimental data file is imported now
```
##### Next, the program will grab some simple quantitative information from the graph that may be hard to do by hand or over extensive datasets
```
t,i,v = preprocessing.readFile('data/10mM_F2CA_1M_KOH_pH_14_100mV.DTA',type='gamry',scan='first')
length = len(t)
v1, v2 = v[0:int(length/2)], v[int(length/2):]
i1, i2 = i[0:int(length/2)], i[int(length/2):]
t1, t2 = t[0:int(length/2)], t[int(length/2):]
peak_list = []
_, v_peaks, i_peaks = fitting.peak_find(v1,i1,v2,i2)
b1, b2 = fitting.baseline(v1,i1,v2,i2)
for n in range(len(v_peaks)):
peak_list.append([i_peaks[n],v_peaks[n]])
plotting.plot_voltammogram(t,i,v, peaks = peak_list).display()
plt.plot(v1,b1)
plt.plot(v1,i1)
plt.plot(v2,b2)
plt.plot(v2,i2)
```
##### This program can also return relevant parameters using a physics-based model.
```
# Import the dimensionless voltammagram (V I) for reversible reactions
rev_dim_values = pd.read_csv("data/dimensionless_values_rev.csv")
rev_dim_current=rev_dim_values['dimensionless_current'].values
rev_dim_voltage=rev_dim_values['dimensionless_Voltage'].values
##We will now prompt the user to submit known parameters (THESE CAN BE CHANGED OR MADE MORE CONVENIENT)
sweep_rate= float(input("What is the Voltage sweep rate in mV/s?(10)"))
electrode_surface_area= float(input("What is the electrode surface area in cm^2?(.2)"))
concentration_initial= float(input("What is the initial concentration in mol/cm^3?(.00001)"))
Temp= float(input("What is the temperature in K?(298)"))
eq_pot= float(input("What is the equilibrium potential in V?(.10)"))
##we are inserting a diffusion coefficient to check math here, we will estimate this later:
Diff_coeff=0.00001
## Here we define constant variables, these can be made to user inputs if needed.
n=1
Faradays_const=96285
R_const=8.314
sigma=(n*Faradays_const*sweep_rate)/(R_const*Temp)
Pre=electrode_surface_area*concentration_initial*n*Faradays_const*math.sqrt(Diff_coeff*sigma)
output_voltage=(eq_pot+rev_dim_voltage/n)
output_current=Pre*rev_dim_current
plt.plot(output_voltage,output_current)
```
##### Then, we can back out a relevant parameter from the data:
```
# Fitting Diff_Coeff
def test_func(rev_dim_current, D):
return electrode_surface_area*concentration_initial*n*Faradays_const*math.sqrt(D*sigma)*rev_dim_current
params, params_covariance = curve_fit(test_func, rev_dim_current, output_current,p0=None,bounds = (0,[1]))
print("Diffusion Coefficient (cm^2/s): {}".format(params[0]))
```
##### We can repeat this exercise on an LSV with an irreversible reaction to determine exchange current density.
```
##Import Experimental Irreversible Data:
irrev_exp_data = pd.read_csv("data/10mVs_Irreversible.csv")
current_exp=irrev_exp_data['current(A)'].values
voltage_exp=irrev_exp_data['voltage(mV)'].values
time_exp=irrev_exp_data['time(s)'].values
## all appropriate packages and the singular experimental data file is imported now
# Import the dimensionless voltammagram (V I) for irreversible reactions
irrev_dim_values = pd.read_csv("data/dimensionless_values_irrev.csv")
irrev_dim_current=irrev_dim_values['dimensionless_current'].values
irrev_dim_voltage=irrev_dim_values['dimensionless_Voltage'].values
##We will now prompt the user to submit known parameters (THESE CAN BE CHANGED OR MADE MORE CONVENIENT)
sweep_rate= float(input("What is the Voltage sweep rate in mV/s?(10)"))
electrode_surface_area= float(input("What is the electrode surface area in cm^2?(.2)"))
concentration_initial= float(input("What is the initial concentration in mol/cm^3?(.00001)"))
Temp= float(input("What is the temperature in K?(298)"))
eq_pot= float(input("What is the equilibrium potential in mV?(100)"))
##we are inserting a diffusion coefficient to check math here, we will estimate this later:
Diff_coeff=0.00001
## Here we define constant variables, these can be made to user inputs if needed.
n=1
Faradays_const=96285
R_const=8.314
exchange_current_density=0.0002
kinetic_coefficient=exchange_current_density/n/Faradays_const/electrode_surface_area/concentration_initial
transfer_coefficient=.6
eV_const=59.1
beta=transfer_coefficient*n*Faradays_const*sweep_rate/R_const/Temp/1000
Pre=(concentration_initial*n*Faradays_const*
math.sqrt(Diff_coeff*sweep_rate*transfer_coefficient
*Faradays_const/(R_const*Temp*1000)))
output_voltage=eq_pot+irrev_dim_voltage/transfer_coefficient-eV_const/transfer_coefficient*math.log(math.sqrt(math.pi*Diff_coeff*beta)/kinetic_coefficient)
output_current=Pre*irrev_dim_current
plt.plot(output_voltage,output_current)
# Fitting Diff_Coeff
from scipy import optimize
def test_func(irrev_dim_voltage, exchange_current_density):
return eq_pot+irrev_dim_voltage/transfer_coefficient-eV_const/transfer_coefficient*math.log(math.sqrt(math.pi*Diff_coeff*beta)/(exchange_current_density/n/Faradays_const/electrode_surface_area/concentration_initial))
params, params_covariance = optimize.curve_fit(test_func, irrev_dim_voltage, output_voltage,p0=None,bounds = (0,[1]))
print("Exchange current density (A/cm^2): {}".format(params[0]))
```
| true |
code
| 0.438364 | null | null | null | null |
|
# Test: Minimum error discrimination
In this notebook we are testing the evolution of the error probability with the number of evaluations.
```
import sys
sys.path.append('../../')
import itertools
import numpy as np
import matplotlib.pyplot as plt
from numpy import pi
from qiskit.algorithms.optimizers import SPSA
from qnn.quantum_neural_networks import StateDiscriminativeQuantumNeuralNetworks as nnd
from qnn.quantum_state import QuantumState
plt.style.use('ggplot')
def callback(params, results, prob_error, prob_inc, prob):
data.append(prob_error)
# Create random states
ψ = QuantumState.random(1)
ϕ = QuantumState.random(1)
# Parameters
th_u, fi_u, lam_u = [0], [0], [0]
th1, th2 = [0], [pi]
th_v1, th_v2 = [0], [0]
fi_v1, fi_v2 = [0], [0]
lam_v1, lam_v2 = [0], [0]
params = list(itertools.chain(th_u, fi_u, lam_u, th1, th2, th_v1, th_v2, fi_v1, fi_v2, lam_v1, lam_v2))
# Initialize Discriminator
discriminator = nnd([ψ, ϕ])
data = []
results = discriminator.discriminate(SPSA(100), params, callback=callback)
optimal = nnd.helstrom_bound(ψ, ϕ)
print(f'Optimal results: {optimal}\nActual results: {results}')
fig = plt.figure(figsize=(14, 6))
plt.plot(data, '-')
plt.xlabel('Number of evaluations')
plt.ylabel('Probability')
plt.legend(['Experimental'])
plt.title('Evolution of error probability for 2 states')
fig.savefig('twostates.png')
plt.show()
th_u, fi_u, lam_u = results[0][:3]
th1 = results[0][3]
th2 = results[0][4]
th_v1 = results[0][5]
th_v2 = results[0][6]
fi_v1 = results[0][7]
fi_v2 = results[0][8]
lam_v1 = results[0][9]
lam_v2 = results[0][10]
M = nnd.povm( 2,
[th_u], [fi_u], [lam_u],
[th1], [th2],
[th_v1], [th_v2],
[fi_v1], [fi_v2],
[lam_v1], [lam_v2], output='povm' )
plt.style.use('default')
sphere = nnd.plot_bloch_sphere( M , [ψ, ϕ] )
sphere.render()
plt.savefig('sphere_2_states')
plt.style.use('ggplot')
# Create random states
ψ = QuantumState.random(1)
ϕ = QuantumState.random(1)
χ = QuantumState.random(1)
# Parameters
th_u, fi_u, lam_u = [0], [0], [0]
th1, th2 = 2 * [0], 2 * [pi]
th_v1, th_v2 = 2 * [0], 2 * [0]
fi_v1, fi_v2 = 2 * [0], 2 * [0]
lam_v1, lam_v2 = 2 * [0], 2 * [0]
params = list(itertools.chain(th_u, fi_u, lam_u, th1, th2, th_v1, th_v2, fi_v1, fi_v2, lam_v1, lam_v2))
# Initialize Discriminator
discriminator = nnd([ψ, ϕ, χ])
data = []
results = discriminator.discriminate(SPSA(100), params, callback=callback)
print(f'Results: {results}')
fig = plt.figure(figsize=(14, 6))
plt.plot(data, '-')
plt.xlabel('Number of evaluations')
plt.ylabel('Probability')
plt.legend(['Experimental'])
plt.title('Evolution of error probability for 3 states')
fig.savefig('3states.png')
plt.show()
th_u, fi_u, lam_u = results[0][:3]
th1 = results[0][3:5]
th2 = results[0][5:7]
th_v1 = results[0][7:9]
th_v2 = results[0][9:11]
fi_v1 = results[0][11:13]
fi_v2 = results[0][13:15]
lam_v1 = results[0][15:17]
lam_v2 = results[0][17:19]
M = nnd.povm( 3,
[th_u], [fi_u], [lam_u],
th1, th2,
th_v1, th_v2,
fi_v1, fi_v2,
lam_v1, lam_v2, output='povm' )
plt.style.use('default')
sphere = nnd.plot_bloch_sphere( M , [ψ, ϕ, χ] )
sphere.render()
plt.savefig('sphere_3_states.png')
plt.style.use('ggplot')
# Create random states
ψ = QuantumState([ np.array([1,0]) ])
ϕ = QuantumState([ np.array([np.cos(np.pi/4), np.sin(np.pi/4)]),
np.array([np.cos(0.1+np.pi/4),np.sin(0.1+np.pi/4)] ) ])
χ = QuantumState([ np.array([np.cos(np.pi/4), 1j*np.sin(np.pi/4)]),
np.array([np.cos(0.1+np.pi/4), 1j*np.sin(0.1+np.pi/4)] ),
np.array([np.cos(-0.1+np.pi/4), 1j*np.sin(-0.1+np.pi/4)] )])
# Parameters
th_u, fi_u, lam_u = list(np.pi*np.random.randn(1)), list(np.pi*np.random.randn(1)), list(np.pi*np.random.randn(1))
th1, th2 = list(np.pi*np.random.randn(2)), list(np.pi*np.random.randn(2))
th_v1, th_v2 = list(np.pi*np.random.randn(2)), list(np.pi*np.random.randn(2))
fi_v1, fi_v2 = list(np.pi*np.random.randn(2)), list(np.pi*np.random.randn(2))
lam_v1, lam_v2 = list(np.pi*np.random.randn(2)), list(np.pi*np.random.randn(2))
params = list(itertools.chain(th_u, fi_u, lam_u, th1, th2, th_v1, th_v2, fi_v1, fi_v2, lam_v1, lam_v2))
# Initialize Discriminator
discriminator = nnd([ψ, ϕ, χ])
data = []
results = discriminator.discriminate(SPSA(100), params, callback=callback)
print(f'Results: {results}')
fig = plt.figure(figsize=(14, 6))
plt.plot(data, '-')
plt.xlabel('Number of evaluations')
plt.ylabel('Probability')
plt.legend(['Experimental'])
plt.title('Evolution of error probability for 3 states with noise')
fig.savefig('noisy.png')
plt.show()
th_u, fi_u, lam_u = results[0][:3]
th1 = results[0][3:5]
th2 = results[0][5:7]
th_v1 = results[0][7:9]
th_v2 = results[0][9:11]
fi_v1 = results[0][11:13]
fi_v2 = results[0][13:15]
lam_v1 = results[0][15:17]
lam_v2 = results[0][17:19]
M = nnd.povm( 3,
[th_u], [fi_u], [lam_u],
th1, th2,
th_v1, th_v2,
fi_v1, fi_v2,
lam_v1, lam_v2, output='povm' )
plt.style.use('default')
sphere = nnd.plot_bloch_sphere( M , [ψ, ϕ, χ] )
sphere.render()
plt.savefig('sphere_3_states_noisy.png')
plt.style.use('ggplot')
```
| true |
code
| 0.521349 | null | null | null | null |
|
# Single layer Neural Network
In this notebook, we will code a single neuron and use it as a linear classifier with two inputs. The tuning of the neuron parameters is done by backpropagation using gradient descent.
```
from sklearn.datasets import make_blobs
import numpy as np
# matplotlib to display the data
import matplotlib
matplotlib.rc('font', size=16)
matplotlib.rc('xtick', labelsize=16)
matplotlib.rc('ytick', labelsize=16)
from matplotlib import pyplot as plt, cm
from matplotlib.colors import ListedColormap
%matplotlib inline
```
## Dataset
Let's create some labeled data in the form of (X, y) with an associated class which can be 0 or 1. For this we can use the function `make_blobs` in the `sklearn.datasets` module. Here we use 2 centers with coordinates (-0.5, -1.0) and (1.0, 1.0).
```
X, y = make_blobs(n_features=2, random_state=42, centers=[(-0.5, -1.0), (1.0, 1.0)])
y = y.reshape((y.shape[0], 1))
print(X.shape)
print(y.shape)
```
Plot our training data using `plt.scatter` to have a first visualization. Here we color the points with their labels stored in `y`.
```
plt.scatter(X[:, 0], X[:, 1], c=y.squeeze(), edgecolors='gray')
plt.title('training data with labels')
plt.axis('equal')
plt.show()
```
## Activation functions
Here we play with popular activation functions like tanh, ReLu or sigmoid.
```
def heaviside(x):
return np.heaviside(x, np.zeros_like(x))
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def ReLU(x):
return np.maximum(0, x)
def leaky_ReLU(x, alpha=0.1):
return np.maximum(alpha * x, x)
def tanh(x):
return np.tanh(x)
from math import pi
plt.figure()
x = np.arange(-pi, pi, 0.01)
plt.axhline(y=0., color='gray', linestyle='dashed')
plt.axhline(y=-1, color='gray', linestyle='dashed')
plt.axhline(y=1., color='gray', linestyle='dashed')
plt.axvline(x=0., color='gray', linestyle='dashed')
plt.xlim(-pi, pi)
plt.ylim(-1.2, 1.2)
plt.title('activation functions', fontsize=16)
plt.plot(x, heaviside(x), label='heavyside', linewidth=3)
legend = plt.legend(loc='lower right')
plt.savefig('activation_functions_1.pdf')
plt.plot(x, sigmoid(x), label='sigmoid', linewidth=3)
plt.legend(loc='lower right')
plt.savefig('activation_functions_2.pdf')
plt.plot(x, tanh(x), label='tanh', linewidth=3)
plt.legend(loc='lower right')
plt.savefig('activation_functions_3.pdf')
plt.plot(x, ReLU(x), label='ReLU', linewidth=3)
plt.legend(loc='lower right')
plt.savefig('activation_functions_4.pdf')
plt.plot(x, leaky_ReLU(x), label='leaky ReLU', linewidth=3)
plt.legend(loc='lower right')
plt.savefig('activation_functions_5.pdf')
plt.show()
# gradients of the activation functions
def sigmoid_grad(x):
s = sigmoid(x)
return s * (1 - s)
def relu_grad(x):
return 1. * (x > 0)
def tanh_grad(x):
return 1 - np.tanh(x) ** 2
plt.figure()
x = np.arange(-pi, pi, 0.01)
plt.plot(x, sigmoid_grad(x), label='sigmoid gradient', linewidth=3)
plt.plot(x, relu_grad(x), label='ReLU gradient', linewidth=3)
plt.plot(x, tanh_grad(x), label='tanh gradient', linewidth=3)
plt.xlim(-pi, pi)
plt.title('activation function derivatives', fontsize=16)
legend = plt.legend()
legend.get_frame().set_linewidth(2)
plt.savefig('activation_functions_derivatives.pdf')
plt.show()
```
## ANN implementation
A simple neuron with two inputs $(x_1, x_2)$ which applies an affine transform of weigths $(w_1, w_2)$ and bias $w_0$.
The neuron compute the quantity called activation $a=\sum_i w_i x_i + w_0 = w_0 + w_1 x_1 + w_2 x_2$
This quantity is send to the activation function chosen to be a sigmoid function here: $f(a)=\dfrac{1}{1+e^{-a}}$
$f(a)$ is the output of the neuron bounded between 0 and 1.
### Quick implementation
First let's implement our network in a concise fashion.
```
import numpy as np
from numpy.random import randn
X, y = make_blobs(n_samples= 100, n_features=2, random_state=42, centers=[[-0.5, -1], [1, 1]])
# adjust the sizes of our arrays
X = np.c_[np.ones(X.shape[0]), X]
print(X.shape)
y = y.reshape((y.shape[0], 1))
np.random.seed(2)
W = randn(3, 1)
print('* model params: {}'.format(W.tolist()))
eta = 1e-2 # learning rate
n_epochs = 50
for t in range(n_epochs):
# forward pass
y_pred = sigmoid(X.dot(W))
loss = np.sum((y_pred - y) ** 2)
print(t, loss)
# backprop
grad_y_pred = 2 * (y_pred - y)
grad_W = np.dot(X.T, grad_y_pred * y_pred * (1 - y_pred))
# update rule
W -= eta * grad_W
print('* new model params: {}'.format(W.tolist()))
```
### Modular implementation
Now let's create a class to represent our neural network to have more flexibility and modularity. This will prove to be useful later when we add more layers.
```
class SingleLayerNeuralNetwork:
"""A simple artificial neuron with a single layer and two inputs.
This type of network is called a Single Layer Neural Network and belongs to
the Feed-Forward Neural Networks. Here, the activation function is a sigmoid,
the loss is computed using the squared error between the target and
the prediction. Learning the parameters is achieved using back-propagation
and gradient descent
"""
def __init__(self, eta=0.01, rand_seed=42):
"""Initialisation routine."""
np.random.seed(rand_seed)
self.W = np.random.randn(3, 1) # weigths
self.eta = eta # learning rate
self.loss_history = []
def sigmoid(self, x):
"""Our activation function."""
return 1 / (1 + np.exp(-x))
def sigmoid_grad(self, x):
"""Gradient of the sigmoid function."""
return self.sigmoid(x) * (1 - self.sigmoid(x))
def predict(self, X, bias_trick=True):
X = np.atleast_2d(X)
if bias_trick:
# bias trick: add a column of 1 to X
X = np.c_[np.ones((X.shape[0])), X]
return self.sigmoid(np.dot(X, self.W))
def loss(self, X, y, bias_trick=False):
"""Compute the squared error loss for a given set of inputs."""
y_pred = self.predict(X, bias_trick=bias_trick)
y_pred = y_pred.reshape((y_pred.shape[0], 1))
loss = np.sum((y_pred - y) ** 2)
return loss
def back_propagation(self, X, y):
"""Conduct backpropagation to update the weights."""
X = np.atleast_2d(X)
y_pred = self.sigmoid(np.dot(X, self.W)).reshape((X.shape[0], 1))
grad_y_pred = 2 * (y_pred - y)
grad_W = np.dot(X.T, grad_y_pred * y_pred * (1 - y_pred))
# update weights
self.W -= eta * grad_W
def fit(self, X, y, n_epochs=10, method='batch', save_fig=False):
"""Perform gradient descent on a given number of epochs to update the weights."""
# bias trick: add a column of 1 to X
X = np.c_[np.ones((X.shape[0])), X]
self.loss_history.append(self.loss(X, y)) # initial loss
for i_epoch in range(n_epochs):
if method == 'batch':
# perform backprop on the whole training set (batch)
self.back_propagation(X, y)
# weights were updated, compute the loss
loss = self.loss(X, y)
self.loss_history.append(loss)
print(i_epoch, self.loss_history[-1])
else:
# here we update the weight for every data point (SGD)
for (xi, yi) in zip(X, y):
self.back_propagation(xi, yi)
# weights were updated, compute the loss
loss = self.loss(X, y)
self.loss_history.append(loss)
if save_fig:
self.plot_model(i_epoch, save=True, display=False)
def decision_boundary(self, x):
"""Return the decision boundary in 2D."""
return -self.W[0] / self.W[2] - self.W[1] / self.W[2] * x
def plot_model(self, i_epoch=-1, save=False, display=True):
"""Build a figure to vizualise how the model perform."""
xx0, xx1 = np.arange(-3, 3.1, 0.1), np.arange(-3, 4.1, 0.1)
XX0, XX1 = np.meshgrid(xx0, xx1)
# apply the model to the grid
y_an = np.empty(len(XX0.ravel()))
i = 0
for (x0, x1) in zip(XX0.ravel(), XX1.ravel()):
y_an[i] = self.predict(np.array([x0, x1]))
i += 1
y_an = y_an.reshape((len(xx1), len(xx0)))
figure = plt.figure(figsize=(12, 4))
ax1 = plt.subplot(1, 3, 1)
#ax1.set_title(r'$w_0=%.3f$, $w_1=%.3f$, $w_2=%.3f$' % (self.W[0], self.W[1], self.W[2]))
ax1.set_title("current prediction")
ax1.contourf(XX0, XX1, y_an, alpha=.5)
ax1.scatter(X[:, 0], X[:, 1], c=y.squeeze(), edgecolors='gray')
ax1.set_xlim(-3, 3)
ax1.set_ylim(-3, 4)
print(ax1.get_xlim())
x = np.array(ax1.get_xlim())
ax1.plot(x, self.decision_boundary(x), 'k-', linewidth=2)
ax2 = plt.subplot(1, 3, 2)
x = np.arange(3) # the label locations
rects1 = ax2.bar(x, [self.W[0, 0], self.W[1, 0], self.W[2, 0]])
ax2.set_title('model parameters')
ax2.set_xticks(x)
ax2.set_xticklabels([r'$w_0$', r'$w_1$', r'$w_2$'])
ax2.set_ylim(-1, 2)
ax2.set_yticks([0, 2])
ax2.axhline(xmin=0, xmax=2)
ax3 = plt.subplot(1, 3, 3)
ax3.plot(self.loss_history, c='lightgray', lw=2)
if i_epoch < 0:
i_epoch = len(self.loss_history) - 1
ax3.plot(i_epoch, self.loss_history[i_epoch], 'o')
ax3.set_title('loss evolution')
ax3.set_yticks([])
plt.subplots_adjust(left=0.05, right=0.98)
if save:
plt.savefig('an_%02d.png' % i_epoch)
if display:
plt.show()
plt.close()
```
### Train our model on the data set
Create two blobs with $n=1000$ data points.
Instantiate the model with $\eta$=0.1 and a random seed of 2.
Train the model using the batch gradient descent on 20 epochs.
```
X, y = make_blobs(n_samples=10000, n_features=2, random_state=42, centers=[[-0.5, -1], [1, 1]])
y = y.reshape((y.shape[0], 1))
an1 = SingleLayerNeuralNetwork(eta=0.1, rand_seed=2)
print('* init model params: {}'.format(an1.W.tolist()))
print(an1.loss(X, y, bias_trick=True))
an1.fit(X, y, n_epochs=100, method='batch', save_fig=False)
print('* new model params: {}'.format(an1.W.tolist()))
```
Now we have trained our model, plot the results
```
an1.plot_model()
```
Now try to train another network using SGD. Use only 1 epoch since with SGD, we are updating the weights with every training point (so $n$ times per epoch).
```
an2 = SingleLayerNeuralNetwork(eta=0.1, rand_seed=2)
print('* init model params: {}'.format(an2.W.tolist()))
an2.fit(X, y, n_epochs=1, method='SGD', save_fig=False)
print('* new model params: {}'.format(an2.W.tolist()))
```
plot the difference in terms of loss evolution using batch or stochastic gradient descent
```
plt.plot(an1.loss_history[:], label='batch GD')
plt.plot(an2.loss_history[::100], label='stochastic GD')
#plt.ylim(0, 2000)
plt.legend()
plt.show()
an2.plot_model()
```
## Logistic regression
Our single layer network using the logistic function for activation is very similar to the logistic regression we saw in a previous tutorial. We can easily compare our result with the logistic regression using `sklearn` toolbox.
```
from sklearn.linear_model import LogisticRegression
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42, centers=[[-0.5, -1], [1, 1]])
log_reg = LogisticRegression(solver='lbfgs')
log_reg.fit(X, y)
print(log_reg.coef_)
print(log_reg.intercept_)
x0, x1 = np.meshgrid(
np.linspace(-3, 3.1, 62).reshape(-1, 1),
np.linspace(-3, 4.1, 72).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_proba = log_reg.predict_proba(X_new)
zz = y_proba[:, 1].reshape(x0.shape)
plt.figure(figsize=(4, 4))
contour = plt.contourf(x0, x1, zz, alpha=0.5)
plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='gray')
# decision boundary
x_bounds = np.array([-3, 3])
boundary = -(log_reg.coef_[0][0] * x_bounds + log_reg.intercept_[0]) / log_reg.coef_[0][1]
plt.plot(x_bounds, boundary, "k-", linewidth=3)
plt.xlim(-3, 3)
plt.ylim(-3, 4)
plt.show()
```
| true |
code
| 0.872673 | null | null | null | null |
|
# What are Tensors?
```
# -*- coding: utf-8 -*-
import numpy as np
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random input and output data
x = np.random.randn(N, D_in)
y = np.random.randn(N, D_out)
# Randomly initialize weights
w1 = np.random.randn(D_in, H)
w2 = np.random.randn(H, D_out)
learning_rate = 1e-6
for t in range(500):
# Forward pass: compute predicted y
h = x.dot(w1)
h_relu = np.maximum(h, 0)
y_pred = h_relu.dot(w2)
# Compute and print loss
loss = np.square(y_pred - y).sum()
print(t, loss)
# Backprop to compute gradients of w1 and w2 with respect to loss
grad_y_pred = 2.0 * (y_pred - y)
grad_w2 = h_relu.T.dot(grad_y_pred)
grad_h_relu = grad_y_pred.dot(w2.T)
grad_h = grad_h_relu.copy()
grad_h[h < 0] = 0
grad_w1 = x.T.dot(grad_h)
# Update weights
w1 -= learning_rate * grad_w1
w2 -= learning_rate * grad_w2
```
# PyTorch Tensors
Clearly modern deep neural networks are in need of more than what our beloved numpy can offer.
Here we introduce the most fundamental PyTorch concept: the *Tensor*. A PyTorch Tensor is conceptually identical to a numpy array: a Tensor is an n-dimensional array, and PyTorch provides many functions for operating on these Tensors. Like numpy arrays, PyTorch Tensors do not know anything about deep learning or computational graphs or gradients; they are a generic tool for scientific computing.
However unlike numpy, PyTorch Tensors can utilize GPUs to accelerate their numeric computations. To run a PyTorch Tensor on GPU, you simply need to cast it to a new datatype.
Here we use PyTorch Tensors to fit a two-layer network to random data. Like the numpy example above we need to manually implement the forward and backward passes through the network:
```
import torch
dtype = torch.FloatTensor
# dtype = torch.cuda.FloatTensor # Uncomment this to run on GPU
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random input and output data
x = torch.randn(N, D_in).type(dtype)
y = torch.randn(N, D_out).type(dtype)
# Randomly initialize weights
w1 = torch.randn(D_in, H).type(dtype)
w2 = torch.randn(H, D_out).type(dtype)
learning_rate = 1e-6
for t in range(500):
# Forward pass: compute predicted y
h = x.mm(w1)
h_relu = h.clamp(min=0)
y_pred = h_relu.mm(w2)
# Compute and print loss
loss = (y_pred - y).pow(2).sum()
print(t, loss)
# Backprop to compute gradients of w1 and w2 with respect to loss
grad_y_pred = 2.0 * (y_pred - y)
grad_w2 = h_relu.t().mm(grad_y_pred)
grad_h_relu = grad_y_pred.mm(w2.t())
grad_h = grad_h_relu.clone()
grad_h[h < 0] = 0
grad_w1 = x.t().mm(grad_h)
# Update weights using gradient descent
w1 -= learning_rate * grad_w1
w2 -= learning_rate * grad_w2
```
# Autograd
PyTorch variables and autograd. Autograd package provides cool functionality as the forward pass of your network defines the computational graph; nodes in the graph will be Tensors and edges will be functions that produce output Tensors from input Tensors. Backprop through this graph then allows us to easily compue gradients.
Here we wrap the PyTorch Tensor in a Variable object; where Vaiabel represents a node in the computational graph. if x is a variable then x.data is a Tensor and x.grad is another Varialble holding the gradient of x w.r.t to some scalar value.
PyTorch Variables have samer API as PyTorch Tensots: any operation that you can do with Tensor, also works fine with Variables, difference only being that the Variable defines a computational graph, allowing us to automatically compute gradients.
```
# Use of Vaiables and Autograd in a 2-layer network with no need to manually implement backprop!
import torch
from torch.autograd import Variable
dtype = torch.FloatTensor
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold input and outputs and wrap them in Variables.
x = Variable(torch.randn(N, D_in).type(dtype), requires_grad=False) # requires_grad=False means no need to compute gradients
y = Variable(torch.randn(N, D_out).type(dtype), requires_grad=False)
# Create random Tensors to hold weights and wrap them in Variables.
# requires_grad=True here to compute gradients w.r.t Variables during a backprop pass.
w1 = Variable(torch.randn(D_in, H).type(dtype), requires_grad=True) # requires_grad=False means no need to compute gradients
w2 = Variable(torch.randn(H, D_out).type(dtype), requires_grad=True)
learning_rate = 1e-6
for t in range(500):
# Forward pass: compute predicted y using operations on Variables; these
# are exactly the same operations we used to compute the forward pass using
# Tensors, but we do not need to keep references to intermediate values since
# we are not implementing the backward pass by hand.
y_pred = x.mm(w1).clamp(min=0).mm(w2)
# Compute and print loss using operations on Variables.
# Now loss is a Variable of shape (1,) and loss.data is a Tensor of shape
# (1,); loss.data[0] is a scalar value holding the loss.
loss = (y_pred - y).pow(2).sum()
print(t, loss.data[0])
# Use autograd to compute the backward pass. This call will compute the
# gradient of loss with respect to all Variables with requires_grad=True.
# After this call w1.grad and w2.grad will be Variables holding the gradient
# of the loss with respect to w1 and w2 respectively.
loss.backward()
# Update weights using gradient descent; w1.data and w2.data are Tensors,
# w1.grad and w2.grad are Variables and w1.grad.data and w2.grad.data are
# Tensors.
w1.data -= learning_rate * w1.grad.data
w2.data -= learning_rate * w2.grad.data
# Manually zero the gradients after updating weights
w1.grad.data.zero_()
w2.grad.data.zero_()
```
# PyTorch: Defining new autograd functions
Under the hood, each primitive autograd operator is really two functions that operate on Tensors. The forward function computes output Tensors from input Tensors. The backward function receives the gradient of the output Tensors with respect to some scalar value, and computes the gradient of the input Tensors with respect to that same scalar value.
In PyTorch we can easily define our own autograd operator by defining a subclass of torch.autograd.Function and implementing the forward and backward functions. We can then use our new autograd operator by constructing an instance and calling it like a function, passing Variables containing input data.
In this example we define our own custom autograd function for performing the ReLU nonlinearity, and use it to implement our two-layer network:
```
# -*- coding: utf-8 -*-
import torch
from torch.autograd import Variable
class MyReLU(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
"""
def forward(self, input):
"""
In the forward pass we receive a Tensor containing the input and return a
Tensor containing the output. You can cache arbitrary Tensors for use in the
backward pass using the save_for_backward method.
"""
self.save_for_backward(input)
return input.clamp(min=0)
def backward(self, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
input, = self.saved_tensors
grad_input = grad_output.clone()
grad_input[input < 0] = 0
return grad_input
dtype = torch.FloatTensor
# dtype = torch.cuda.FloatTensor # Uncomment this to run on GPU
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold input and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in).type(dtype), requires_grad=False)
y = Variable(torch.randn(N, D_out).type(dtype), requires_grad=False)
# Create random Tensors for weights, and wrap them in Variables.
w1 = Variable(torch.randn(D_in, H).type(dtype), requires_grad=True)
w2 = Variable(torch.randn(H, D_out).type(dtype), requires_grad=True)
learning_rate = 1e-6
for t in range(500):
# Construct an instance of our MyReLU class to use in our network
relu = MyReLU()
# Forward pass: compute predicted y using operations on Variables; we compute
# ReLU using our custom autograd operation.
y_pred = relu(x.mm(w1)).mm(w2)
# Compute and print loss
loss = (y_pred - y).pow(2).sum()
print(t, loss.data[0])
# Use autograd to compute the backward pass.
loss.backward()
# Update weights using gradient descent
w1.data -= learning_rate * w1.grad.data
w2.data -= learning_rate * w2.grad.data
# Manually zero the gradients after updating weights
w1.grad.data.zero_()
w2.grad.data.zero_()
```
## What is a nn module
When building neural networks we frequently think of arranging the computation into layers, some of which have learnable parameters which will be optimized during learning.
In TensorFlow, packages like Keras, TensorFlow-Slim, and TFLearn provide higher-level abstractions over raw computational graphs that are useful for building neural networks.
In PyTorch, the nn package serves this same purpose. The nn package defines a set of Modules, which are roughly equivalent to neural network layers. A Module receives input Variables and computes output Variables, but may also hold internal state such as Variables containing learnable parameters. The nn package also defines a set of useful loss functions that are commonly used when training neural networks.
In this example we use the nn package to implement our two-layer network:
```
# -*- coding: utf-8 -*-
import torch
from torch.autograd import Variable
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in))
y = Variable(torch.randn(N, D_out), requires_grad=False)
# Use the nn package to define our model as a sequence of layers. nn.Sequential
# is a Module which contains other Modules, and applies them in sequence to
# produce its output. Each Linear Module computes output from input using a
# linear function, and holds internal Variables for its weight and bias.
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
# The nn package also contains definitions of popular loss functions; in this
# case we will use Mean Squared Error (MSE) as our loss function.
loss_fn = torch.nn.MSELoss(size_average=False)
learning_rate = 1e-4
for t in range(500):
# Forward pass: compute predicted y by passing x to the model. Module objects
# override the __call__ operator so you can call them like functions. When
# doing so you pass a Variable of input data to the Module and it produces
# a Variable of output data.
y_pred = model(x)
# Compute and print loss. We pass Variables containing the predicted and true
# values of y, and the loss function returns a Variable containing the
# loss.
loss = loss_fn(y_pred, y)
print(t, loss.data[0])
# Zero the gradients before running the backward pass.
model.zero_grad()
# Backward pass: compute gradient of the loss with respect to all the learnable
# parameters of the model. Internally, the parameters of each Module are stored
# in Variables with requires_grad=True, so this call will compute gradients for
# all learnable parameters in the model.
loss.backward()
# Update the weights using gradient descent. Each parameter is a Variable, so
# we can access its data and gradients like we did before.
for param in model.parameters():
param.data -= learning_rate * param.grad.data
```
## PyTorch - optim
With learning rate of $1e-4$
```
import torch
from torch.autograd import Variable
N, D_in, H, D_out = 64, 1000, 100, 10
x = Variable(torch.randn(N, D_in))
y = Variable(torch.randn(N, D_out), requires_grad=False)
model = torch.nn.Sequential( torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out)
)
loss_fxn = torch.nn.MSELoss(size_average=False)
learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# We loop
for i in range(500):
y_pred = model(x)
loss = loss_fxn(y_pred, y)
print(t, loss.data[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
```
## Custom nn module
For more complex computation, you can define your own module by subclassing nn.Module
```
import torch
from torch.autograd import Variable
class DoubleLayerNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
# initialize 2 instances of nn.Linear mods
super(DoubleLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
# in this fxn we accept a Var of input data and
# return a Var of output data.
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
# Next, again as usual, define batch size, input dimensions, hidden dimension and output dimension
N, D_in, H, D_out = 64, 1000, 100, 10
# Create some random tensors to hold both input and output
x = Variable(torch.randn(N, D_in))
y = Variable(torch.randn(N, D_out), requires_grad=False)
# Build model by instantiating class defined above
my_model = DoubleLayerNet(D_in, H, D_out)
# Build loss fxn and optimizer
criterion = torch.nn.MSELoss(size_average=False)
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
# and then we loop
for i in range(500):
# fwd pass, calculate predicted y by passing x to the model
y_pred = my_model(x)
#calculate and print loss
loss = criteria(y_pred, y)
print(t, loss.data[0])
# Zero gradients, performs a backprop pass and update the weights as it goe along
optimizer.zero_grad()
loss.backward()
optimizer.step()
```
| true |
code
| 0.569075 | null | null | null | null |
|
<small><small><i>
All the IPython Notebooks in **[Python Seaborn Module](https://github.com/milaan9/12_Python_Seaborn_Module)** lecture series by **[Dr. Milaan Parmar](https://www.linkedin.com/in/milaanparmar/)** are available @ **[GitHub](https://github.com/milaan9)**
</i></small></small>
<a href="https://colab.research.google.com/github/milaan9/12_Python_Seaborn_Module/blob/main/017_Seaborn_FacetGrid_Plot.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# FacetGrid
Welcome to another lecture on *Seaborn*! Our journey began with assigning *style* and *color* to our plots as per our requirement. Then we moved on to *visualize distribution of a dataset*, and *Linear relationships*, and further we dived into topics covering *plots for Categorical data*. Every now and then, we've also roughly touched customization aspects using underlying Matplotlib code. That indeed is the end of the types of plots offered by Seaborn, and only leaves us with widening the scope of usage of all the plots that we have learnt till now.
Our discussion in upcoming lectures is majorly going to focus on using the core of Seaborn, based on which, *Seaborn* allows us to plot these amazing figures, that we had been detailing previously. This ofcourse isn't going to be a brand new topic because every now & then I have used these in previous lectures but hereon we're going to specifically deal with each one of those.
To introduce our new topic, i.e. **<span style="color:red">Grids</span>**, we shall at first list the options available. Majorly, there are just two aspects to our discussion on *Grids* that includes:
- **<span style="color:red">FacetGrid</span>**
- **<span style="color:red">PairGrid</span>**
Additionally, we also have a companion function for *PairGrid* to enhance execution speed of *PairGrid*, i.e.
- **<span style="color:red">Pairplot</span>**
Our discourse shall detail each one of these topics in-length for better understanding. As we have already covered the statistical inference of each type of plot, our emphasis shall mostly be on scaling and parameter variety of known plots on these grids. So let us commence our journey with **[FacetGrid](http://seaborn.pydata.org/generated/seaborn.FacetGrid.html?highlight=facetgrid#seaborn.FacetGrid)** in this lecture.
## FacetGrid
The term **Facet** here refers to *a dimension* or say, an *aspect* or a feature of a *multi-dimensional dataset*. This analysis is extremely useful when working with a multi-variate dataset which has a varied blend of datatypes, specially in *Data Science* & *Machine Learning* domain, where generally you would be dealing with huge datasets. If you're a *working pofessional*, you know what I am talking about. And if you're a *fresher* or a *student*, just to give you an idea, in this era of *Big Data*, an average *CSV file* (which is generally the most common form), or even a RDBMS size would vary from Gigabytes to Terabytes of data. If you are dealing with *Image/Video/Audio datasets*, then you may easily expect those to be in *hundreds of gigabyte*.
On the other hand, the term **Grid** refers to any *framework with spaced bars that are parallel to or cross each other, to form a series of squares or rectangles*. Statistically, these *Grids* are also used to represent and understand an entire *population* or just a *sample space* out of it. In general, these are pretty powerful tool for presentation, to describe our dataset and to study the *interrelationship*, or *correlation* between *each facet* of any *environment*.
Subplot grid for plotting conditional relationships.
The FacetGrid is an object that links a Pandas DataFrame to a matplotlib figure with a particular structure.
In particular, FacetGrid is used to draw plots with multiple Axes where each Axes shows the same relationship conditioned on different levels of some variable. It’s possible to condition on up to three variables by assigning variables to the rows and columns of the grid and using different colors for the plot elements.
The general approach to plotting here is called “small multiples”, where the same kind of plot is repeated multiple times, and the specific use of small multiples to display the same relationship conditioned on one ore more other variables is often called a “trellis plot”.
The basic workflow is to initialize the FacetGrid object with the dataset and the variables that are used to structure the grid. Then one or more plotting functions can be applied to each subset by calling **`FacetGrid.map()`** or **`FacetGrid.map_dataframe()`**. Finally, the plot can be tweaked with other methods to do things like change the axis labels, use different ticks, or add a legend. See the detailed code examples below for more information.
To kill our curiousity, let us plot a simple **<span style="color:red">FacetGrid</span>** before continuing on with our discussion. And to do that, we shall once again quickly import our package dependencies and set the aesthetics for future use with built-in datasets.
```
# Importing intrinsic libraries:
import numpy as np
import pandas as pd
np.random.seed(101)
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set(style="whitegrid", palette="rocket")
import warnings
warnings.filterwarnings("ignore")
# Let us also get tableau colors we defined earlier:
tableau_20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scaling above RGB values to [0, 1] range, which is Matplotlib acceptable format:
for i in range(len(tableau_20)):
r, g, b = tableau_20[i]
tableau_20[i] = (r / 255., g / 255., b / 255.)
# Loading built-in Tips dataset:
tips = sns.load_dataset("tips")
tips.head()
# Initialize a 2x2 grid of facets using the tips dataset:
sns.set(style="ticks", color_codes=True)
sns.FacetGrid(tips, row='time', col='smoker')
# Draw a univariate plot on each facet:
x = sns.FacetGrid(tips, col='time',row='smoker')
x = x.map(plt.hist,"total_bill")
bins = np.arange(0,65,5)
x = sns.FacetGrid(tips, col="time", row="smoker")
x =x.map(plt.hist, "total_bill", bins=bins, color="g")
# Plot a bivariate function on each facet:
x = sns.FacetGrid(tips, col="time", row="smoker")
x = x.map(plt.scatter, "total_bill", "tip", edgecolor="w")
# Assign one of the variables to the color of the plot elements:
x = sns.FacetGrid(tips, col="time", hue="smoker")
x = x.map(plt.scatter,"total_bill","tip",edgecolor = "w")
x =x.add_legend()
# Plotting a basic FacetGrid with Scatterplot representation:
ax = sns.FacetGrid(tips, col="sex", hue="smoker", size=5)
ax.map(plt.scatter, "total_bill", "tip", alpha=.6)
ax.add_legend()
```
This is a combined scatter representation of Tips dataset that we have seen earlier as well, where Total tip generated against Total Bill amount is drawn in accordance with their Gender and Smoking practice. With this we can conclude how **FacetGrid** helps us visualize distribution of a variable or the relationship between multiple variables separately within subsets of our dataset. Important to note here is that Seaborn FacetGrid can only support upto **3-Dimensional figures**, using `row`, `column` and `hue` dimensions of the grid for *Categorical* and *Discrete* variables within our dataset.
Let us now have a look at the *parameters* offered or supported by Seaborn for a **FacetGrid**:
**`seaborn.FacetGrid(data, row=None, col=None, hue=None, col_wrap=None, sharex=True, sharey=True, size=3, aspect=1, palette=None, row_order=None, col_order=None, hue_order=None, hue_kws=None, dropna=True, legend_out=True, despine=True, margin_titles=False, xlim=None, ylim=None, subplot_kws=None, gridspec_kws=None`**
There seems to be few new parameters out here for us, so let us one-by-one understand their scope before we start experimenting with those on our plots:
- We are well acquainted with mandatory **`data`**, **`row`**, **`col`** and **`hue`** parameters.
- Next is **`col_wrap`** that defines the **width of our variable** selected as **`col`** dimension, so that the *column facets* can span multiple rows.
- **`sharex`** helps us **draft dedicated Y-axis** for each sub-plot, if declared **`False`**. Same concept holds good for **`sharey`** as well.
- **`size`** helps us determine the size of our grid-frame.
- We may also declare **`hue_kws`** parameter that lets us **control other aesthetics** of our plot.
- **`dropna`** drops all the **NULL variables** from the selected features; and **`legend_out`** places the Legend either inside or outside our plot, as we've already seen.
- **`margin_titles`** fetch the **feature names** from our dataset; and **`xlim`** & **`ylim`** additionally offers Matplotlib style limitation to each of our axes on the grid.
That pretty much seems to cover *intrinsic parameters* so let us now try to use them one-by-one with slight modifications:
Let us begin by pulling the *Legend inside* our FacetGrid and *creating a Header* for our grid:
```
ax = sns.FacetGrid(tips, col="sex", hue="smoker", size=5, legend_out=False)
ax.map(plt.scatter, "total_bill", "tip", alpha=.6)
ax.add_legend()
plt.suptitle('Tip Collection based on Gender and Smoking', fontsize=11)
```
So declaring **`legend_out`** as **`False`** and creating a **Superhead title** using *Matplotlib* seems to be working great on our Grid. Customization on *Header size* gives us an add-on capability as well. Right now, we are going by default **`palette`** for **marker colors** which can be customized by setting to a different one. Let us try other parameters as well:
Actually, before we jump further into utilization of other parameters, let me quickly take you behind the curtain of this plot. As visible, we assigned **`ax`** as a variable to our **FacetGrid** for creating a visualizaion figure, and then plotted a **Scatterplot** on top of it, before decorating further with a *Legend* and a *Super Title*. So when we initialized the assignment of **`ax`**, the grid actually gets created using backend *Matplotlib figure and axes*, though doesn't plot anything on top of it. This is when we call Scatterplot on our sample data, that in turn at the backend calls **`FacetGrid.map()`** function to map this grid to our Scatterplot. We intended to draw a linear relation plot, and thus entered multiple variable names, i.e. **`Total Bill`** and associated **`Tip`** to form *facets*, or dimensions of our grid.
```
# Change the size and aspect ratio of each facet:
x = sns.FacetGrid(tips, col="day", size=5, aspect=.5)
x =x.map(plt.hist, "total_bill", bins=bins)
# Specify the order for plot elements:
g = sns.FacetGrid(tips, col="smoker", col_order=["Yes", "No"])
g = g.map(plt.hist, "total_bill", bins=bins, color="m")
# Use a different color palette:
kws = dict(s=50, linewidth=.5, edgecolor="w")
g =sns.FacetGrid(tips, col="sex", hue="time", palette="Set1",\
hue_order=["Dinner", "Lunch"])
g = g.map(plt.scatter, "total_bill", "tip", **kws)
g.add_legend()
# Use a dictionary mapping hue levels to colors:
pal = dict(Lunch="seagreen", Dinner="gray")
g = sns.FacetGrid(tips, col="sex", hue="time", palette=pal,\
hue_order=["Dinner", "Lunch"])
g = g.map(plt.scatter, "total_bill", "tip", **kws)
g.add_legend()
# FacetGrid with boxplot
x = sns.FacetGrid(tips,col= 'day')
x = x.map(sns.boxplot,"total_bill","time")
```
Also important to note is the use the **[matplotlib.pyplot.gca()](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.gca.html)** function, if required to *set the current axes* on our Grid. This shall fetch the current Axes instance on our current figure matching the given keyword arguments or params, & if unavailable, it shall even create one.
```
# Let us create a dummy DataFrame:
football = pd.DataFrame({
"Wins": [76, 64, 38, 78, 63, 45, 32, 46, 13, 40, 59, 80],
"Loss": [55, 67, 70, 56, 59, 69, 72, 24, 45, 21, 58, 22],
"Team": ["Arsenal"] * 4 + ["Liverpool"] * 4 + ["Chelsea"] * 4,
"Year": [2015, 2016, 2017, 2018] * 3})
```
Before I begin illustration using this DataFrame, on a lighter note, I would add a disclosure that this is a dummy dataset and holds no resemblance whatsoever to actual records of respective Soccer clubs. So if you're one among those die-hard fans of any of these clubs, kindly excuse me if the numbers don't tally, as they are all fabricated.
Here, **football** is kind of a *Time-series Pandas DataFrame* that in entirety reflects 4 features, where **`Wins`** and **`Loss`** variables represent the quarterly Scorecard of three soccer **`Teams`** for last four **`Years`**, from 2015 to 2018. Let us check how this DataFrame looks like:
```
football
```
This looks pretty good for our purpose so now let us initialize our FacetGrid on top of it and try to obtain a time-indexed with further plotting. In production environment, to keep our solution scalable, this is generally done by defining a function for data manipulation so we shall try that in this example:
```
# Defining a customizable function to be precise with our requirements & shall discuss it a little later:
# We shall be using a new type of plot here that I shall discuss in detail later on.
def football_plot(data, color):
sns.heatmap(data[["Wins", "Loss"]])
# 'margin_titles' won't necessarily guarantee desired results so better to be cautious:
ax = sns.FacetGrid(football, col="Team", size=5, margin_titles=True)
ax.map_dataframe(football_plot)
ax = sns.FacetGrid(football, col="Team", size=5)
ax.map(sns.kdeplot, "Wins", "Year", hist=True, lw=2)
```
As visible, **Heatmap** plots rectangular boxes for data points as a color-encoded matrix, and this is a topic we shall be discussing in detail in another Lecture but for now, I just wanted you to have a preview of it, and hence used it on top of our **FacetGrid**. Another good thing to know with *FacetGrid* is **gridspec** module which allows Matplotlib params to be passed for drawing attention to a particular facet by increasing its size. To better understand, let us try to use this module now:
```
# Loading built-in Titanic Dataset:
titanic = sns.load_dataset("titanic")
# Assigning reformed `deck` column:
titanic = titanic.assign(deck=titanic.deck.astype(object)).sort_values("deck")
# Creating Grid and Plot:
ax = sns.FacetGrid(titanic, col="class", sharex=False, size=7,
gridspec_kws={"width_ratios": [3.5, 2, 2]})
ax.map(sns.boxplot, "deck", "age")
ax.set_titles(fontweight='bold', size=17)
```
Breaking it down, at first we import our built-in Titanic dataset, and then assign a new column, i.e. **`deck`** using Pandas **`.assign()`** function. Here we declare this new column as a component of pre-existing **`deck`** column from Titanic dataset, but as a sorted object. Then we create our *FacetGrid* mentioning the DataFrame, the column on which Grids get segregated but with shared across *Y-axis*; for **`chosen deck`** against **`Age`** of passengers. Next in action is our **grid keyword specifications**, where we decide the *width ratio* of the plot that shall be passed on to these grids. Finally, we have our **Box Plot** representing values of **`Age`** feature across respective decks.
Now let us try to use different axes with same size for multivariate plotting on Tips dataset:
```
# Loading built-in Tips dataset:
tips = sns.load_dataset("tips")
# Mapping a Scatterplot to our FacetGrid:
ax = sns.FacetGrid(tips, col="smoker", row="sex", size=3.5)
ax = (ax.map(plt.scatter, "total_bill", "tip", color=tableau_20[6]).set_axis_labels("Total Bill Generated (USD)", "Tip Amount"))
# Increasing size for subplot Titles & making it appear Bolder:
ax.set_titles(fontweight='bold', size=11)
```
**Scatterplot** dealing with data that has multiple variables is no new science for us so instead let me highlight what **`.map()`** does for us. This function actually allows us to project our figure axes, in accordance to which our Scatterplot spreads the feature datapoints across the grids, depending upon the segregators. Here we have **`sex`** and **`smoker`** as our segregators (When I use the general term "segregator", it just refers to the columns on which we decide to determine the layout). This comes in really handy as we can pass *Matplotlib parrameters* for further customization of our plot. At the end, when we add **`.set_axis_labels()`** it gets easy for us to label our axes but please note that this method shall work for you only when you're dealing with grids, hence you didn't observe me adapting to this function, while detailing various other plots.
- Let us now talk about the **`football_plot`** function we defined earlier with **football** DataFrame. The only reason I didn't speak of it then was because I wanted you to go through a few more parameter implementation before getting into this. There are **3 important rules for defining such functions** that are supported by **[FacetGrid.map](http://xarray.pydata.org/en/stable/generated/xarray.plot.FacetGrid.map.html)**:
-They must take array-like inputs as positional arguments, with the first argument corresponding to the **`X-Axis`**, and the second argument corresponding to **`y-Axis`**.
-They must also accept two keyword arguments: **`color`**, and **`label`**. If you want to use a **`hue`** variable, than these should get passed to the underlying plotting function (As a side note: You may just catch **`**kwargs`** and not do anything with them, if it's not relevant to the specific plot you're making.
-Lastly, when called, they must draw a plot on the "currently active" matplotlib Axes.
- Important to note is that there may be cases where your function draws a plot that looks correct without taking `x`, `y`, positional inputs and then it is better to just call the plot, like: **`ax.set_axis_labels("Column_1", "Column_2")`** after you use **`.map()`**, which should rename your axes properly. Alternatively, you may also want to do something like `ax.set(xticklabels=)` to get more meaningful ticks.
- Well I am also quite stoked to mention another important function (though not that comonly used), that is **[`FacetGrid.map_dataframe()`](http://nullege.com/codes/search/axisgrid.FacetGrid.map_dataframe)**. The rules here are similar to **`FacetGrid.map`** but the function you pass must accept a DataFrame input in a parameter called `data`, and instead of taking *array-like positional* inputs it takes *strings* that correspond to variables in that dataframe. Then on each iteration through the *facets*, the function will be called with the *Input dataframe*, masked to just the values for that combination of **`row`**, **`col`**, and **`hue`** levels.
Another important to note with both the above-mentioned functions is that the **`return`** value is ignored so you don't really have to worry about it. Just for illustration purpose, let us consider drafting a function that just *draws a horizontal line* in each **`facet`** at **`y=2`** and ignores all the Input data*:
```
# That is all you require in your function:
def plot_func(x, y, color=None, label=None):
ax.map(plt.axhline, y=2)
```
I know this function concept might look little hazy at the moment but once you have covered more on dates and maptplotlib syntax in particular, the picture shall get much more clearer for you.
Let us look at one more example of **`FacetGrid()`** and this time let us again create a synthetic DataFrame for this demonstration:
```
# Creating synthetic Data (Don't focus on how it's getting created):
units = np.linspace(0, 50)
A = [1., 18., 40., 100.]
df = []
for i in A:
V1 = np.sin(i * units)
V2 = np.cos(i * units)
df.append(pd.DataFrame({"units": units, "V_1": V1, "V_2": V2, "A": i}))
sample = pd.concat(df, axis=0)
# Previewing DataFrame:
sample.head(10)
sample.describe()
# Melting our sample DataFrame:
sample_melt = sample.melt(id_vars=['A', 'units'], value_vars=['V_1', 'V_2'])
# Creating plot:
ax = sns.FacetGrid(sample_melt, col='A', hue='A', palette="icefire", row='variable', sharey='row', margin_titles=True)
ax.map(plt.plot, 'units', 'value')
ax.add_legend()
```
This process shall come in handy if you ever wish to vertically stack rows of subplots on top of one another. You do not really have to focus on the process of creating dataset, as generally you will have your dataset provided with a problem statement. For our plot, you may just consider these visual variations as **[Sinusoidal waves](https://en.wikipedia.org/wiki/Sine_wave)**. I shall attach a link in our notebook, if you wish to dig deeper into what these are and how are they actually computed.
Our next lecture would be pretty much a small follow up to this lecture, where we would try to bring more of *Categorical data* to our **`FacetGrid()`**. Meanwhile, I would again suggest you to play around with analyzing and plotting datasets, as much as you can because visualization is a very important facet of *Data Science & Research*. And, I shall see you in our next lecture with **[Heat Map](https://github.com/milaan9/12_Python_Seaborn_Module/blob/main/018_Seaborn_Heat_Map.ipynb)**.
| true |
code
| 0.558568 | null | null | null | null |
|
## Project 2: Exploring the Uganda's milk imports and exports
A country's economy depends, sometimes heavily, on its exports and imports. The United Nations Comtrade database provides data on global trade. It will be used to analyse the Uganda's imports and exports of milk in 2015:
* How much does the Uganda export and import and is the balance positive (more exports than imports)?
* Which are the main trading partners, i.e. from/to which countries does the Uganda import/export the most?
* Which are the regular customers, i.e. which countries buy milk from the Uganda every month?
* Which countries does the Uganda both import from and export to?
```
import warnings
warnings.simplefilter('ignore', FutureWarning)
from pandas import *
%matplotlib inline
```
## Getting and preparing the data
The data is obtained from the [United Nations Comtrade](http://comtrade.un.org/data/) website, by selecting the following configuration:
- Type of Product: goods
- Frequency: monthly
- Periods: Jan - May 2018
- Reporter: Uganda
- Partners: all
- Flows: imports and exports
- HS (as reported) commodity codes: 401 (Milk and cream, neither concentrated nor sweetened) and 402 (Milk and cream, concentrated or sweetened)
```
LOCATION = 'comrade_milk_ug_jan_dec_2015.csv'
```
On reading in the data, the commodity code has to be read as a string, to not lose the leading zero.
```
import pandas as pd
milk = pd.read_csv(LOCATION, dtype={'Commodity Code':str})
milk.tail(2)
```
The data only covers the first five months of 2015. Most columns are irrelevant for this analysis, or contain always the same value, like the year and reporter columns. The commodity code is transformed into a short but descriptive text and only the relevant columns are selected.
```
def milkType(code):
if code == '401': # neither concentrated nor sweetened
return 'unprocessed'
if code == '402': # concentrated or sweetened
return 'processed'
return 'unknown'
COMMODITY = 'Milk and cream'
milk[COMMODITY] = milk['Commodity Code'].apply(milkType)
MONTH = 'Period'
PARTNER = 'Partner'
FLOW = 'Trade Flow'
VALUE = 'Trade Value (US$)'
headings = [MONTH, PARTNER, FLOW, COMMODITY, VALUE]
milk = milk[headings]
milk.head()
```
The data contains the total imports and exports per month, under the 'World' partner. Those rows are removed to keep only the per-country data.
```
milk = milk[milk[PARTNER] != 'World']
milk.head()
milk.tail()
```
## Total trade flow
To answer the first question, 'how much does the Uganda export and import and is the balance positive (more exports than imports)?', the dataframe is split into two groups: exports from the Uganda and imports into the Uganda. The trade values within each group are summed up to get the total trading.
```
grouped = milk.groupby([FLOW])
grouped[VALUE].aggregate(sum)
```
This shows a trade surplus of over 30 million dollars.
## Main trade partners
To address the second question, 'Which are the main trading partners, i.e. from/to which countries does the Uganda import/export the most?', the dataframe is split by country instead, and then each group aggregated for the total trade value. This is done separately for imports and exports. The result is sorted in descending order so that the main partners are at the top.
```
imports = milk[milk[FLOW] == 'Imports']
grouped = imports.groupby([PARTNER])
print('The Uganda imports from', len(grouped), 'countries.')
print('The 5 biggest exporters to the Uganda are:')
totalImports = grouped[VALUE].aggregate(sum).sort_values(inplace=False,ascending=False)
totalImports.head()
```
The export values can be plotted as a bar chart, making differences between countries easier to see.
```
totalImports.head(10).plot(kind='barh')
```
We can deduce that Switzerland is the lowest partnering company of milk to Uganda for imports.
```
exports = milk[milk[FLOW] == 'Exports']
grouped = exports.groupby([PARTNER])
print('The Uganda exports to', len(grouped), 'countries.')
print('The 5 biggest importers from the Uganda are:')
grouped[VALUE].aggregate(sum).sort_values(ascending=False,inplace=False).head()
```
## Regular importers
Given that there are two commodities, the third question, 'Which are the regular customers, i.e. which countries buy milk from the Uganda every month?', is meant in the sense that a regular customer imports both commodities every month. This means that if the exports dataframe is grouped by country, each group has exactly ten rows (two commodities bought each of the five months). To see the countries, only the first month of one commodity has to be listed, as by definition it's the same countries every month and for the other commodity.
```
def buysEveryMonth(group):
reply = len(group) == 20
return reply
grouped = exports.groupby([PARTNER])
regular = grouped.filter(buysEveryMonth)
print(regular)
regular[(regular[MONTH] == 201501) & (regular[COMMODITY] == 'processed')]
```
Just over 5% of the total Uganda exports are due to these regular customers.
```
regular[VALUE].sum() / exports[VALUE].sum()
```
## Bi-directional trade
To address the fourth question,
'Which countries does the Uganda both import from and export to?', a pivot table is used to list the total export and import value for each country.
```
countries = pivot_table(milk, index=[PARTNER], columns=[FLOW],
values=VALUE, aggfunc=sum)
countries.head()
```
Removing the rows with a missing value will result in only those countries with bi-directional trade flow with the Uganda.
```
countries.dropna()
```
## Conclusions
The milk and cream trade of the Uganda from January to December 2015 was analysed in terms of which countries the Uganda mostly depends on for income (exports) and goods (imports). Over the period, the Uganda had a trade surplus of over 1 million US dollars.
Kenya is the main partner, but it exported from the Uganda almost the triple in value than it imported to the Uganda.
The Uganda exported to over 100 countries during the period, but only imported from 24 countries, the main ones (top five by trade value) being not so geographically close (Kenya, Netherlands, United Arab Emirates, Oman, and South Africa). Kenya and Netherlands are the main importers that are not also main exporters except Kenya.
The Uganda is heavily dependent on its regular customers, the 10 countries that buy all types of milk and cream every month. They contribute three quarters of the total export value.
Although for some, the trade value (in US dollars) is suspiciously low, which raises questions about the data's accuracy.
| true |
code
| 0.25174 | null | null | null | null |
|
```
from sklearn.datasets import load_iris # iris dataset
from sklearn import tree # for fitting model
# for the particular visualization used
from six import StringIO
import pydot
import os.path
# to display graphs
%matplotlib inline
import matplotlib.pyplot
# get dataset
iris = load_iris()
iris.keys()
import pandas
iris_df = pandas.DataFrame(iris.data)
iris_df.columns = iris.feature_names
iris_df['target'] = [iris.target_names[target] for target in iris.target]
iris_df.head()
iris_df.describe()
print(iris_df)
# choose two features to plot
x_feature = 0
y_feature = 3
#x = list(list(zip(*iris.data))[x_feature])
#y = list(list(zip(*iris.data))[y_feature])
x = iris.data[:, x_feature]
y = iris.data[:, y_feature]
# The data are in order by type (types of irises). Find out the border indexes of the types.
end_type_one = list(iris.target).index(1)
end_type_two = list(iris.target).index(2)
fig = matplotlib.pyplot.figure() # create graph
fig.suptitle('Two Features of the Iris Data Set') # set title
# set axis labels
matplotlib.pyplot.xlabel(iris.feature_names[x_feature])
matplotlib.pyplot.ylabel(iris.feature_names[y_feature])
# put the input data on the graph, with different colors and shapes for each type
scatter_0 = matplotlib.pyplot.scatter(x[:end_type_one], y[:end_type_one],
c="red", marker="o", label=iris.target_names[0])
scatter_1 = matplotlib.pyplot.scatter(x[end_type_one:end_type_two], y[end_type_one:end_type_two],
c="blue", marker="^", label=iris.target_names[1])
scatter_2 = matplotlib.pyplot.scatter(x[end_type_two:], y[end_type_two:],
c="green", marker="*", label=iris.target_names[2])
matplotlib.pyplot.legend(handles=[scatter_0, scatter_1, scatter_2]) # make legend
matplotlib.pyplot.show() # show the graph
print(iris.data)
print(x)
decision_tree = tree.DecisionTreeClassifier() # make model
decision_tree.fit(iris.data, iris.target) # fit model to data
# make pdf diagram of decision tree
dot_data = StringIO()
tree.export_graphviz(decision_tree, out_file=dot_data, feature_names=iris.feature_names, class_names=iris.target_names,
filled=True, rounded=True, special_characters=True)
graph = pydot.graph_from_dot_data(dot_data.getvalue())[0]
graph.write_pdf(os.path.expanduser("~/Desktop/introToML/ML/New Jupyter Notebooks/iris_decision_tree_regular.pdf"))
inputs = [iris.data[0], iris.data[end_type_one], iris.data[end_type_two]] # use the first input of each class
print('Class predictions: {0}'.format(list(iris.target_names[prediction] for prediction in decision_tree.predict(inputs)))) # print predictions
print('Probabilities:\n{0}'.format(decision_tree.predict_proba(inputs))) # print prediction probabilities
```
# Exercise Option #1 - Standard Difficulty
0. Submit the PDF you generated as a separate file in Canvas.
1. According to the PDF, a petal width <= 0.8 cm would tell you with high (100%) probability that you are looking at a setosa iris.
2. According to the PDF, you're supposed to look at the petal length, petal width, and sepal length to tell a virginica from a versicolor.
3. The array value at each node in the pdf shows how many data values of each class passed through the node.
4. The predictions are always have a 100% probability because any data value you give will end up at one end node. Each end node has one class prediction.
5. Below I use a subset of the features (3/4). The new decision tree was completely different than the original: it had more nodes and a different overall shape. When looking at the original decision tree, most of the nodes separated data based on petal length or petal width. The one feature that the new tree does not use is petal width, which is the most likely cause for why the second tree had to use more nodes (it lacked a feature that would make it easy to distinguish the classes).
```
# Use 3/4 columns (the first, second, & third)
first_feature = 0
second_feature = 1
third_feature = 2
iris_inputs = iris.data[:,[first_feature, second_feature, third_feature]] # use only two collumns of the data
decision_tree_with_portion = tree.DecisionTreeClassifier() # make model
decision_tree_with_portion.fit(iris_inputs, iris.target) # fit model to data
# make pdf diagram of decision tree
dot_data = StringIO()
tree.export_graphviz(decision_tree_with_portion, out_file=dot_data, feature_names=iris.feature_names[:3], class_names=iris.target_names,
filled=True, rounded=True, special_characters=True)
graph = pydot.graph_from_dot_data(dot_data.getvalue())[0]
graph.write_pdf(os.path.expanduser("~/Desktop/introToML/ML/New Jupyter Notebooks/iris_decision_tree_with_portion.pdf"))
new_inputs = [iris_inputs[0], iris_inputs[end_type_one], iris_inputs[end_type_two]] # make new inputs with iris_inputs, which only has two features per input
print('Class predictions: {0}'.format(list(iris.target_names[prediction] for prediction in decision_tree_with_portion.predict(new_inputs)))) # print predictions
print('Probabilities:\n{0}'.format(decision_tree_with_portion.predict_proba(new_inputs))) # print prediction probabilities
```
# Exercise Option #2 - Advanced Difficulty
Try fitting a Random Forest model to the iris data. See [this example](http://scikit-learn.org/stable/modules/ensemble.html#forest).
As seen below, the random forest & decision tree had the same F1 score (a perfect 1.0), meaning that they performed the same.
```
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html?highlight=random%20forest#sklearn.ensemble.RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
rand_forst = RandomForestClassifier() # make model
rand_forst = rand_forst.fit(iris.data, iris.target) # fit model
print('Class predictions: {0}'.format(list(iris.target_names[prediction] for prediction in rand_forst.predict(inputs)))) # print class predictions
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html?highlight=f1#sklearn.metrics.f1_score
from sklearn.metrics import f1_score
# get predictions for whole dataset
decision_tree_predictions = decision_tree.predict(iris.data)
rand_forst_predictions = rand_forst.predict(iris.data)
# print F1 scores
print ('Decision tree F1 score: {}'.format(f1_score(iris.target, decision_tree_predictions, average='weighted')))
print ('Random forest F1 score: {}'.format(f1_score(iris.target, rand_forst_predictions, average='weighted')))
```
| true |
code
| 0.765766 | null | null | null | null |
|
## 1. Volatility changes over time
<p>What is financial risk? </p>
<p>Financial risk has many faces, and we measure it in many ways, but for now, let's agree that it is a measure of the possible loss on an investment. In financial markets, where we measure prices frequently, volatility (which is analogous to <em>standard deviation</em>) is an obvious choice to measure risk. But in real markets, volatility changes with the market itself. </p>
<p><img src="https://assets.datacamp.com/production/project_738/img/VolaClusteringAssetClasses.png" alt=""></p>
<p>In the picture above, we see the returns of four very different assets. All of them exhibit alternating regimes of low and high volatilities. The highest volatility is observed around the end of 2008 - the most severe period of the recent financial crisis.</p>
<p>In this notebook, we will build a model to study the nature of volatility in the case of US government bond yields.</p>
```
# Load the packages
library(xts)
library(readr)
# Load the data
yc_raw <- read_csv("datasets/FED-SVENY.csv")
# Convert the data into xts format
yc_all <- as.xts(x = yc_raw[, -1], order.by = yc_raw$Date)
# Show only the tail of the 1st, 5th, 10th, 20th and 30th columns
yc_all_tail <- tail(yc_all[,c(1,5,10, 20, 30)])
yc_all_tail
```
## 2. Plotting the evolution of bond yields
<p>In the output table of the previous task, we see the yields for some maturities.</p>
<p>These data include the whole yield curve. The yield of a bond is the price of the money lent. The higher the yield, the more money you receive on your investment. The yield curve has many maturities; in this case, it ranges from 1 year to 30 years. Different maturities have different yields, but yields of neighboring maturities are relatively close to each other and also move together.</p>
<p>Let's visualize the yields over time. We will see that the long yields (e.g. SVENY30) tend to be more stable in the long term, while the short yields (e.g. SVENY01) vary a lot. These movements are related to the monetary policy of the FED and economic cycles.</p>
```
library(viridis)
# Define plot arguments
yields <- yc_all
plot.type <- "single"
plot.palette <- viridis(n = 30)
asset.names <- colnames(yc_all)
# Plot the time series
plot.zoo(x = yc_all, plot.type = plot.type, col = plot.palette)
# Add the legend
legend(x = "topleft", legend = asset.names,
col = plot.palette, cex = 0.45, lwd = 3)
```
## 3. Make the difference
<p>In the output of the previous task, we see the level of bond yields for some maturities, but to understand how volatility evolves we have to examine the changes in the time series. Currently, we have yield levels; we need to calculate the changes in the yield levels. This is called "differentiation" in time series analysis. Differentiation has the added benefit of making a time series independent of time.</p>
```
# Differentiate the time series
ycc_all <- diff.xts(yc_all)
# Show the tail of the 1st, 5th, 10th, 20th and 30th columns
ycc_all_tail <- tail(ycc_all[, c(1, 5, 10, 20, 30)])
ycc_all_tail
```
## 4. The US yields are no exceptions, but maturity matters
<p>Now that we have a time series of the changes in US government yields let's examine it visually.</p>
<p>By taking a look at the time series from the previous plots, we see hints that the returns following each other have some unique properties:</p>
<ul>
<li>The direction (positive or negative) of a return is mostly independent of the previous day's return. In other words, you don't know if the next day's return will be positive or negative just by looking at the time series.</li>
<li>The magnitude of the return is similar to the previous day's return. That means, if markets are calm today, we expect the same tomorrow. However, in a volatile market (crisis), you should expect a similarly turbulent tomorrow.</li>
</ul>
```
# Define the plot parameters
yield.changes <- ycc_all
plot.type <- "multiple"
# Plot the differentiated time series
plot.zoo(x = yield.changes, plot.type = plot.type,
ylim = c(-0.5, 0.5), cex.axis = 0.7,
ylab = 1:30, col = plot.palette)
```
## 5. Let's dive into some statistics
<p>The statistical properties visualized earlier can be measured by analytical tools. The simplest method is to test for autocorrelation. Autocorrelation measures how a datapoint's past determines the future of a time series. </p>
<ul>
<li>If the autocorrelation is close to 1, the next day's value will be very close to today's value. </li>
<li>If the autocorrelation is close to 0, the next day's value will be unaffected by today's value.</li>
</ul>
<p>Because we are interested in the recent evolution of bond yields, we will filter the time series for data from 2000 onward.</p>
```
# Filter for changes in and after 2000
ycc <- ycc_all["2000/",]
# Save the 1-year and 20-year maturity yield changes into separate variables
x_1 <- ycc[,"SVENY01"]
x_20 <- ycc[, "SVENY20"]
# Plot the autocorrelations of the yield changes
par(mfrow=c(2,2))
acf_1 <- acf(x_1)
acf_20 <- acf(x_20)
# Plot the autocorrelations of the absolute changes of yields
acf_abs_1 <- acf(abs(x_1))
acf_abs_20 <- acf(abs(x_20))
```
## 6. GARCH in action
<p>A Generalized AutoRegressive Conditional Heteroskedasticity (<a href="https://en.wikipedia.org/wiki/Autoregressive_conditional_heteroskedasticity">GARCH</a>) model is the most well known econometric tool to handle changing volatility in financial time series data. It assumes a hidden volatility variable that has a long-run average it tries to return to while the short-run behavior is affected by the past returns.</p>
<p>The most popular form of the GARCH model assumes that the volatility follows this process:
</p><p></p>
<math>
σ<sup>2</sup><sub>t</sub> = ω + α ⋅ ε<sup>2</sup><sub>t-1</sub> + β ⋅ σ<sup>2</sup><sub>t-1</sub>
</math>
<p></p><p></p>
<math>
where σ is the current volatility, σ<sub>t-1</sub> the last day's volatility and ε<sub>t-1</sub> is the last day's return. The estimated parameters are ω, α, and β.
</math>
<p>For GARCH modeling we will use <a href="https://cran.r-project.org/web/packages/rugarch/index.html"><code>rugarch</code></a> package developed by Alexios Ghalanos.</p>
```
library(rugarch)
# Specify the GARCH model with the skewed t-distribution
spec <- ugarchspec(distribution.model = "sstd")
# Fit the model
fit_1 <- ugarchfit(x_1, spec = spec)
# Save the volatilities and the rescaled residuals
vol_1 <- sigma(fit_1)
res_1 <- scale(residuals(fit_1, standardize = TRUE)) * sd(x_1) + mean(x_1)
# Plot the yield changes with the estimated volatilities and residuals
merge_1 <- merge.xts(x_1, vol_1, res_1)
plot.zoo(merge_1)
```
## 7. Fitting the 20-year maturity
<p>Let's do the same for the 20-year maturity. As we can see in the plot from Task 6, the bond yields of various maturities show similar but slightly different characteristics. These different characteristics can be the result of multiple factors such as the monetary policy of the FED or the fact that the investors might be different.</p>
<p>Are there differences between the 1-year maturity and 20-year maturity plots?</p>
```
# Fit the model
fit_20 <- ugarchfit(x_20, spec = spec)
# Save the volatilities and the rescaled residuals
vol_20 <- sigma(fit_20)
res_20 <- scale(residuals(fit_20, standardize = TRUE)) * sd(x_20) + mean(x_20)
# Plot the yield changes with the estimated volatilities and residuals
merge_20 <- merge.xts(x_20, vol_20, res_20)
plot.zoo(merge_20)
```
## 8. What about the distributions? (Part 1)
<p>From the plots in Task 6 and Task 7, we can see that the 1-year GARCH model shows a similar but more erratic behavior compared to the 20-year GARCH model. Not only does the 1-year model have greater volatility, but the volatility of its volatility is larger than the 20-year model. That brings us to two statistical facts of financial markets not mentioned yet. </p>
<ul>
<li>The unconditional (before GARCH) distribution of the yield differences has heavier tails than the normal distribution.</li>
<li>The distribution of the yield differences adjusted by the GARCH model has lighter tails than the unconditional distribution, but they are still heavier than the normal distribution.</li>
</ul>
<p>Let's find out what the fitted GARCH model did with the distribution we examined.</p>
```
# Calculate the kernel density for the 1-year maturity and residuals
density_x_1 <- density(x_1)
density_res_1 <- density(res_1)
# Plot the density diagram for the 1-year maturity and residuals
plot(density_x_1)
lines(density_res_1, col = "red")
# Add the normal distribution to the plot
norm_dist <- dnorm(seq(-0.4, 0.4, by = .01), mean = mean(x_1), sd = sd(x_1))
lines(seq(-0.4, 0.4, by = .01),
norm_dist,
col = "darkgreen"
)
# Add legend
legend <- c("Before GARCH", "After GARCH", "Normal distribution")
legend("topleft", legend = legend,
col = c("black", "red", "darkgreen"), lty=c(1,1))
```
## 9. What about the distributions? (Part 2)
<p>In the previous plot, we see that the two distributions from the GARCH models are different from the normal distribution of the data, but the tails, where the differences are the most profound, are hard to see. Using a Q-Q plot will help us focus in on the tails.</p>
<p>You can read an excellent summary of Q-Q plots <a href="https://stats.stackexchange.com/questions/101274/how-to-interpret-a-qq-plot">here</a>.</p>
```
# Define the data to plot: the 1-year maturity yield changes and residuals
data_orig <- x_1
data_res <- res_1
# Define the benchmark distribution
distribution <- qnorm
# Make the Q-Q plot of original data with the line of normal distribution
qqnorm(data_orig, ylim = c(-0.5, 0.5))
qqline(data_orig, distribution = distribution, col = "darkgreen")
# Make the Q-Q plot of GARCH residuals with the line of normal distribution
par(new=TRUE)
qqnorm(data_res * 0.614256270265139, col = "red", ylim = c(-0.5, 0.5))
qqline(data_res * 0.614256270265139, distribution = distribution, col = "darkgreen")
legend("topleft", c("Before GARCH", "After GARCH"), col = c("black", "red"), pch=c(1,1))
```
## 10. A final quiz
<p>In this project, we fitted a GARCH model to develop a better understanding of how bond volatility evolves and how it affects the probability distribution. In the final task, we will evaluate our model. Did the model succeed, or did it fail?</p>
```
# Q1: Did GARCH revealed how volatility changed over time? # Yes or No?
(Q1 <- "Yes")
# Q2: Did GARCH bring the residuals closer to normal distribution? Yes or No?
(Q2 <- "Yes")
# Q3: Which time series of yield changes deviates more
# from a normally distributed white noise process? Choose 1 or 20.
(Q3 <- 1)
```
| true |
code
| 0.709648 | null | null | null | null |
|
# T008 · Protein data acquisition: Protein Data Bank (PDB)
Authors:
- Anja Georgi, CADD seminar, 2017, Charité/FU Berlin
- Majid Vafadar, CADD seminar, 2018, Charité/FU Berlin
- Jaime Rodríguez-Guerra, Volkamer lab, Charité
- Dominique Sydow, Volkamer lab, Charité
__Talktorial T008__: This talktorial is part of the TeachOpenCADD pipeline described in the first TeachOpenCADD publication ([_J. Cheminform._ (2019), **11**, 1-7](https://jcheminf.biomedcentral.com/articles/10.1186/s13321-019-0351-x)), comprising of talktorials T001-T010.
## Aim of this talktorial
In this talktorial, we conduct the groundwork for the next talktorial where we will generate a ligand-based ensemble pharmacophore for EGFR. Therefore, we
(i) fetch all PDB IDs for EGFR from the PDB database,
(ii) retrieve five protein-ligand structures, which have the best structural quality and are derived from X-ray crystallography, and
(iii) align all structures to each in 3D as well as extract and save the ligands to be used in the next talktorial.
### Contents in Theory
* Protein Data Bank (PDB)
* Python package `pypdb`
### Contents in Practical
* Select query protein
* Get all PDB IDs for query protein
* Get statistic on PDB entries for query protein
* Get meta information on PDB entries
* Filter and sort meta information on PDB entries
* Get meta information of ligands from top structures
* Draw top ligand molecules
* Create protein-ligand ID pairs
* Get the PDB structure files
* Align PDB structures
### References
* Protein Data Bank
([PDB website](http://www.rcsb.org/))
* `pypdb` python package
([_Bioinformatics_ (2016), **1**, 159-60](https://academic.oup.com/bioinformatics/article-lookup/doi/10.1093/bioinformatics/btv543), [documentation](http://www.wgilpin.com/pypdb_docs/html/))
* Molecular superposition with the python package `opencadd` ([repository](https://github.com/volkamerlab/opencadd))
## Theory
### Protein Data Bank (PDB)
The Protein Data Bank (PDB) is one of the most comprehensive structural biology information database and a key resource in areas of structural biology, such as structural genomics and drug design ([PDB website](http://www.rcsb.org/)).
Structural data is generated from structural determination methods such as X-ray crystallography (most common method), nuclear magnetic resonance (NMR), and cryo electron microscopy (cryo-EM).
For each entry, the database contains (i) the 3D coordinates of the atoms and the bonds connecting these atoms for proteins, ligand, cofactors, water molecules, and ions, as well as (ii) meta information on the structural data such as the PDB ID, the authors, the deposition date, the structural determination method used and the structural resolution.
The structural resolution is a measure of the quality of the data that has been collected and has the unit Å (Angstrom). The lower the value, the higher the quality of the structure.
The PDB website offers a 3D visualization of the protein structures (with ligand interactions if available) and a structure quality metrics, as can be seen for the PDB entry of an example epidermal growth factor receptor (EGFR) with the PDB ID [3UG5](https://www.rcsb.org/structure/3UG5).

Figure 1: The protein structure (in gray) with an interacting ligand (in green) is shown for an example epidermal growth factor receptor (EGFR) with the PDB ID 3UG5 (figure by Dominique Sydow).
### Python package `pypdb`
`pypdb` is a python programming interface for the PDB and works exclusively in Python 3 ([_Bioinformatics_ (2016), **1**, 159-60](https://academic.oup.com/bioinformatics/article-lookup/doi/10.1093/bioinformatics/btv543), [documentation](http://www.wgilpin.com/pypdb_docs/html/)).
This package facilitates the integration of automatic PDB searches within bioinformatics workflows and simplifies the process of performing multiple searches based on the results of existing searches.
It also allows an advanced querying of information on PDB entries.
The PDB currently uses a RESTful API that allows for the retrieval of information via standard HTML vocabulary. `pypdb` converts these objects into XML strings.
## Practical
```
import collections
import logging
import pathlib
import time
import warnings
import pandas as pd
from tqdm.auto import tqdm
import redo
import requests_cache
import nglview
import pypdb
from rdkit.Chem import Draw
from rdkit.Chem import PandasTools
from opencadd.structure.superposition.api import align, METHODS
from opencadd.structure.core import Structure
# Disable some unneeded warnings
logger = logging.getLogger("opencadd")
logger.setLevel(logging.ERROR)
warnings.filterwarnings("ignore")
# cache requests -- this will speed up repeated queries to PDB
requests_cache.install_cache("rcsb_pdb", backend="memory")
# define paths
HERE = pathlib.Path(_dh[-1])
DATA = HERE / "data"
```
### Select query protein
We use EGFR as query protein for this talktorial. The UniProt ID of EGFR is `P00533`, which will be used in the following to query the PDB database.
### Get all PDB IDs for query protein
First, we get all PDB structures for our query protein EGFR, using the `pypdb` functions `make_query` and `do_search`.
```
search_dict = pypdb.make_query("P00533")
found_pdb_ids = pypdb.do_search(search_dict)
print("Sample PDB IDs found for query:", *found_pdb_ids[:3], "...")
print("Number of EGFR structures found:", len(found_pdb_ids))
```
### Get statistics on PDB entries for query protein
Next, we ask the question: How many PDB entries are deposited in the PDB for EGFR per year and how many in total?
Using `pypdb`, we can find all deposition dates of EGFR structures from the PDB database. The number of deposited structures was already determined and is needed to set the parameter `max_results` of the function `find_dates`.
```
# Query database
dates = pypdb.find_dates("P00533", max_results=len(found_pdb_ids))
# Example of the first three deposition dates
dates[:3]
```
We extract the year from the deposition dates and calculate a depositions-per-year histogram.
```
# Extract year
years = pd.Series([int(date[:4]) for date in dates])
bins = years.max() - years.min() + 1
axes = years.hist(bins=bins)
axes.set_ylabel("New entries per year")
axes.set_xlabel("Year")
axes.set_title("PDB entries for EGFR");
```
### Get meta information for PDB entries
We use `describe_pdb` to get meta information about the structures, which is stored per structure as a dictionary.
Note: we only fetch meta information on PDB structures here, we do not fetch the structures (3D coordinates), yet.
> The `redo.retriable` line is a _decorator_. This wraps the function and provides extra functionality. In this case, it will retry failed queries automatically (10 times maximum).
```
@redo.retriable(attempts=10, sleeptime=2)
def describe_one_pdb_id(pdb_id):
"""Fetch meta information from PDB."""
described = pypdb.describe_pdb(pdb_id)
if described is None:
print(f"! Error while fetching {pdb_id}, retrying ...")
raise ValueError(f"Could not fetch PDB id {pdb_id}")
return described
pdbs = [describe_one_pdb_id(pdb_id) for pdb_id in found_pdb_ids]
pdbs[0]
```
### Filter and sort meta information on PDB entries
Since we want to use the information to filter for relevant PDB structures, we convert the data set from dictionary to DataFrame for easier handling.
```
pdbs = pd.DataFrame(pdbs)
pdbs.head()
print(f"Number of PDB structures for EGFR: {len(pdbs)}")
```
We start filtering our dataset based on the following criteria:
#### 1. Experimental method: X-ray diffraction
We only keep structures resolved by `X-RAY DIFFRACTION`, the most commonly used structure determination method.
```
pdbs = pdbs[pdbs.expMethod == "X-RAY DIFFRACTION"]
print(f"Number of PDB structures for EGFR from X-ray: {len(pdbs)}")
```
#### 2. Structural resolution
We only keep structures with a resolution equal or lower than 3 Å. The lower the resolution value, the higher is the quality of the structure (-> the higher is the certainty that the assigned 3D coordinates of the atoms are correct). Below 3 Å, atomic orientations can be determined and therefore is often used as threshold for structures relevant for structure-based drug design.
```
pdbs.resolution = pdbs.resolution.astype(float) # convert to floats
pdbs = pdbs[pdbs.resolution <= 3.0]
print(f"Number of PDB entries for EGFR from X-ray with resolution <= 3.0 Angstrom: {len(pdbs)}")
```
We sort the data set by the structural resolution.
```
pdbs = pdbs.sort_values(["resolution"], ascending=True, na_position="last")
```
We check the top PDB structures (sorted by resolution):
```
pdbs.head()[["structureId", "resolution"]]
```
#### 3. Ligand-bound structures
Since we will create ensemble ligand-based pharmacophores in the next talktorial, we remove all PDB structures from our DataFrame, which do not contain a bound ligand: we use the `pypdb` function `get_ligands` to check/retrieve the ligand(s) from a PDB structure. PDB-annotated ligands can be ligands, cofactors, but also solvents and ions. In order to filter only ligand-bound structures, we (i) remove all structures without any annotated ligand and (ii) remove all structures that do not contain any ligands with a molecular weight (MW) greater than 100 Da (Dalton), since many solvents and ions weight less. Note: this is a simple, but not comprehensive exclusion of solvents and ions.
```
# Get all PDB IDs from DataFrame
pdb_ids = pdbs["structureId"].tolist()
# Remove structures
# (i) without ligand and
# (ii) without any ligands with molecular weight (MW) greater than 100 Da (Dalton)
@redo.retriable(attempts=10, sleeptime=2)
def get_ligands(pdb_id):
"""Decorate pypdb.get_ligands so it retries after a failure."""
return pypdb.get_ligands(pdb_id)
mw_cutoff = 100.0 # Molecular weight cutoff in Da
# This database query may take a moment
passed_pdb_ids = []
removed_pdb_ids = []
progressbar = tqdm(pdb_ids)
for pdb_id in progressbar:
progressbar.set_description(f"Processing {pdb_id}...")
ligand_dict = get_ligands(pdb_id)
# (i) Remove structure if no ligand present
if ligand_dict["ligandInfo"] is None:
removed_pdb_ids.append(pdb_id) # Store ligand-free PDB IDs
# (ii) Remove structure if not a single annotated ligand has a MW above mw_cutoff
else:
# Get ligand information
ligands = ligand_dict["ligandInfo"]["ligand"]
# Technicality: if only one ligand, cast dict to list (for the subsequent list comprehension)
if type(ligands) == dict:
ligands = [ligands]
# Get MW per annotated ligand
mw_list = [float(ligand["@molecularWeight"]) for ligand in ligands]
# Remove structure if not a single annotated ligand has a MW above mw_cutoff
if sum([mw > mw_cutoff for mw in mw_list]) == 0:
removed_pdb_ids.append(pdb_id) # Store ligand-free PDB IDs
else:
passed_pdb_ids.append(pdb_id) # Remove ligand-free PDB IDs from list
print(
"PDB structures without a ligand (removed from our data set):",
*removed_pdb_ids,
)
print("Number of structures with ligand:", len(passed_pdb_ids))
```
### Get meta information of ligands from top structures
In the next talktorial, we will build ligand-based ensemble pharmacophores from the top `top_num` structures with the highest resolution.
```
top_num = 8 # Number of top structures
selected_pdb_ids = passed_pdb_ids[:top_num]
selected_pdb_ids
```
The selected highest resolution PDB entries can contain ligands targeting different binding sites, e.g. allosteric and orthosteric ligands, which would hamper ligand-based pharmacophore generation. Thus, we will focus on the following 4 structures, which contain ligands in the orthosteric binding pocket. The code provided later in the notebook can be used to verify this.
```
selected_pdb_ids = ["5UG9", "5HG8", "5UG8", "3POZ"]
```
We fetch the PDB information about the top `top_num` ligands using `get_ligands`, to be stored as *csv* file (as dictionary per ligand).
If a structure contains several ligands, we select the largest ligand. Note: this is a simple, but not comprehensive method to select ligand binding the binding site of a protein. This approach may also select a cofactor bound to the protein. Therefore, please check the automatically selected top ligands visually before further usage.
```
ligands_list = []
for pdb_id in selected_pdb_ids:
ligands = get_ligands(pdb_id)["ligandInfo"]["ligand"]
# Technicality: if only one ligand, cast dict to list (for the subsequent list comprehension)
if isinstance(ligands, dict):
ligands = [ligands]
weight = 0
this_lig = {}
# If several ligands contained, take largest
for ligand in ligands:
if float(ligand["@molecularWeight"]) > weight:
this_ligand = ligand
weight = float(ligand["@molecularWeight"])
ligands_list.append(this_ligand)
# NBVAL_CHECK_OUTPUT
# Change the format to DataFrame
ligands = pd.DataFrame(ligands_list)
ligands
ligands.to_csv(DATA / "PDB_top_ligands.csv", header=True, index=False)
```
### Draw top ligand molecules
```
PandasTools.AddMoleculeColumnToFrame(ligands, "smiles")
Draw.MolsToGridImage(
mols=list(ligands.ROMol),
legends=list(ligands["@chemicalID"] + ", " + ligands["@structureId"]),
molsPerRow=top_num,
)
```
### Create protein-ligand ID pairs
```
# NBVAL_CHECK_OUTPUT
pairs = collections.OrderedDict(zip(ligands["@structureId"], ligands["@chemicalID"]))
pairs
```
### Align PDB structures
Since we want to build ligand-based ensemble pharmacophores in the next talktorial, it is necessary to align all structures to each other in 3D.
We will use one the python package `opencadd` ([repository](https://github.com/volkamerlab/opencadd)), which includes a 3D superposition subpackage to guide the structural alignment of the proteins. The approach is based on superposition guided by sequence alignment provided matched residues. There are other methods in the package, but this simple one will be enough for the task at hand.
#### Get the PDB structure files
We now fetch the PDB structure files, i.e. 3D coordinates of the protein, ligand (and if available other atomic or molecular entities such as cofactors, water molecules, and ions) from the PDB using `opencadd.structure.superposition`.
Available file formats are *pdb* and *cif*, which store the 3D coordinations of atoms of the protein (and ligand, cofactors, water molecules, and ions) as well as information on bonds between atoms. Here, we work with *pdb* files.
```
# Download PDB structures
structures = [Structure.from_pdbid(pdb_id) for pdb_id in pairs]
structures
```
#### Extract protein and ligand
Extract protein and ligand from the structure in order to remove solvent and other artifacts of crystallography.
```
complexes = [
Structure.from_atomgroup(structure.select_atoms(f"protein or resname {ligand}"))
for structure, ligand in zip(structures, pairs.values())
]
complexes
# Write complex to file
for complex_, pdb_id in zip(complexes, pairs.keys()):
complex_.write(DATA / f"{pdb_id}.pdb")
```
#### Align proteins
Align complexes (based on protein atoms).
```
results = align(complexes, method=METHODS["mda"])
```
`nglview` can be used to visualize molecular data within Jupyter notebooks. With the next cell we will visualize out aligned protein-ligand complexes.
```
view = nglview.NGLWidget()
for complex_ in complexes:
view.add_component(complex_.atoms)
view
view.render_image(trim=True, factor=2, transparent=True);
view._display_image()
```
#### Extract ligands
```
ligands = [
Structure.from_atomgroup(complex_.select_atoms(f"resname {ligand}"))
for complex_, ligand in zip(complexes, pairs.values())
]
ligands
for ligand, pdb_id in zip(ligands, pairs.keys()):
ligand.write(DATA / f"{pdb_id}_lig.pdb")
```
We check the existence of all ligand *pdb* files.
```
ligand_files = []
for file in DATA.glob("*_lig.pdb"):
ligand_files.append(file.name)
ligand_files
```
We can also use `nglview` to depict the co-crystallized ligands alone. As we can see, the selected complexes contain ligands populating the same binding pocket and can thus be used in the next talktorial for ligand-based pharmacophore generation.
```
view = nglview.NGLWidget()
for component_id, ligand in enumerate(ligands):
view.add_component(ligand.atoms)
view.remove_ball_and_stick(component=component_id)
view.add_licorice(component=component_id)
view
view.render_image(trim=True, factor=2, transparent=True);
view._display_image()
```
## Discussion
In this talktorial, we learned how to retrieve protein and ligand meta information and structural information from the PDB. We retained only X-ray structures and filtered our data by resolution and ligand availability. Ultimately, we aimed for an aligned set of ligands to be used in the next talktorial for the generation of ligand-based ensemble pharmacophores.
In order to enrich information about ligands for pharmacophore modeling, it is advisable to not only filter by PDB structure resolution, but also to check for ligand diversity (see **Talktorial 005** on molecule clustering by similarity) and to check for ligand activity (i.e. to include only potent ligands).
## Quiz
1. Summarize the kind of data that the Protein Data Bank contains.
2. Explain what the resolution of a structure stands for and how and why we filter for it in this talktorial.
3. Explain what an alignment of structures means and discuss the alignment performed in this talktorial.
| true |
code
| 0.557665 | null | null | null | null |
|
# Image classification training on a DEBIAI project with a dataset generator
This tutorial shows how to classify images of flowers after inserting the project contextual into DEBIAI.
Based on the tensorflow tutorial : https://www.tensorflow.org/tutorials/images/classification
```
# Import TensorFlow and other libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import PIL
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
# The pythonModule folder need to be in the same folder
from debiai import debiai
```
## Download and explore the dataset
This tutorial uses a dataset of about 3,700 photos of flowers. The dataset contains 5 sub-directories, one per class:
daisy, dandelion, roses, sunflowers and tulips
```
import pathlib
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)
```
## Create a dataset
```
# Define some parameters for the loader:
batch_size = 32
img_height = 180
img_width = 180
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
print(class_names)
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
```
## Insert the project contextual data in DEBIAI
```
# Creation of the DEBIAI project block structure
DEBIAI_block_structure = [
{
"name": "image_id",
"groundTruth": [
{ "name": "class", "type": "text"},
],
"contexts": [
{ "name": "img_path", "type": "text"},
]
}
]
```
#### Converting some of the project data in a dataframe
In this exemple, it is done with the creation of a dataframe
more details here :
https://git.irt-systemx.fr/ML/DEBIAI/pythonModule#adding-samples
```
# Creation of a dataframe with the same columns as the block structure
data = {"image_id": [], "class": [], "img_path": []}
i = 0
for class_name in class_names:
images = list(data_dir.glob(class_name + '/*'))
for image in images:
data["image_id"].append(i)
data["class"].append(class_name)
data["img_path"].append(str(image))
i += 1
df = pd.DataFrame(data=data)
df
# Creation of a DEBIAI instance
DEBIAI_BACKEND_URL = 'http://localhost:3000/'
DEBIAI_PROJECT_NAME = 'Image classification demo'
my_debiai = debiai.Debiai(DEBIAI_BACKEND_URL)
# Creation of a DEBIAI project if it doesn't exist
debiai_project = my_debiai.get_project(DEBIAI_PROJECT_NAME)
if not debiai_project :
debiai_project = my_debiai.create_project(DEBIAI_PROJECT_NAME)
debiai_project
# Set the project block_structure if not already done
if not debiai_project.block_structure_defined():
debiai_project.set_blockstructure(DEBIAI_block_structure)
debiai_project.get_block_structure()
# Adding the dataframe
debiai_project.add_samples_pd(df, get_hash=False)
```
## Create the model
```
num_classes = len(class_names)
model = Sequential([
layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
# Compile the model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
```
## Train the model with the DEBIAI Dataset generator
```
# Because DEBIAI doesn't have the images to train the models, we will provide them with a function that take a sample information based on the given block_structure
def model_input_from_debiai_sample(debiai_sample: dict):
# "image_id", "class", "img_path"
img = keras.preprocessing.image.load_img(
debiai_sample['img_path'], target_size=(img_height, img_width))
img_array = keras.preprocessing.image.img_to_array(img)
return tf.expand_dims(img_array, 0) # Create a batch
# TF generated dataset
train_dataset_imported = debiai_project.get_tf_dataset_with_provided_inputs(
model_input_from_debiai_sample,
output_types=(tf.float32, tf.int32),
output_shapes=([None, img_height, img_width, 3], [1, ]),
classes=class_names
)
AUTOTUNE = tf.data.AUTOTUNE
train_dataset_imported = train_dataset_imported.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
# get_tf_dataset_with_provided_inputs Also work with a selection
# Train the model
epochs = 3
model.fit(train_dataset_imported, epochs=epochs)
```
| true |
code
| 0.669502 | null | null | null | null |
|
# DCGAN - Create Images from Random Numbers!
### Generative Adversarial Networks
Ever since Ian Goodfellow and colleagues [introduced the concept of Generative Adversarial Networks (GANs)](https://arxiv.org/abs/1406.2661), GANs have been a popular topic in the field of AI. GANs are an application of unsupervised learning - you don't need labels for your dataset in order to train a GAN.
The GAN framework composes of two neural networks: a generator network and a discriminator network.
The generator's job is to take a set of random numbers and produce data (such as images or text).
The discriminator then takes in that data as well as samples of that data from a dataset and tries to determine if is "fake" (created by the generator network) or "real" (from the original dataset).
During training, the two networks play a game against each other.
The generator tries to create realistic data, so that it can fool the discriminator into thinking that the data it generated is from the original dataset. At the same time, the discriminator tries to not be fooled - it learns to become better at determining if data is real or fake.
Since the two networks are fighting in this game, they can be seen as as adversaries, which is where the term "Generative Adverserial Network" comes from.
### Deep Convolutional Generative Adversarial Networks
This notebook takes a look at Deep Convolutional Generative Adversarial Networks (DCGAN), which combines Convolutional Neural Networks (CNNs) ands GANs.
We will create a DCGAN that is able to create images of handwritten digits from random numbers.
The tutorial uses the neural net architecture and guidelines outlined in [this paper](https://arxiv.org/abs/1511.06434), and the MNIST dataset.
## How to Use This Tutorial
You can use this tutorial by executing each snippet of python code in order as it appears in the notebook.
In this tutorial, we will train DCGAN on MNIST which will ultimately produces two neural networks:
- The first net is the "generator" and creates images of handwritten digits from random numbers.
- The second net is the "discriminator" and determines if the image created by the generator is real (a realistic looking image of handwritten digits) or fake (an image that doesn't look like it came from the original dataset).
Apart from creating a DCGAN, you'll also learn:
- How to manipulate and iterate through batches images that you can feed into your neural network.
- How to create a custom MXNet data iterator that generates random numbers from a normal distribution.
- How to create a custom training process in MXNet, using lower level functions from the [MXNet Module API](http://mxnet.io/api/python/module.html) such as `.bind()` `.forward()` and `.backward()`. The training process for a DCGAN is more complex than many other neural net's, so we need to use these functions instead of using the higher level `.fit()` function.
- How to visualize images as they are going through the training process
## Prerequisites
This notebook assumes you're familiar with the concept of CNN's and have implemented one in MXNet. If you haven't, check out [this tutorial](https://github.com/dmlc/mxnet-notebooks/blob/master/python/tutorials/mnist.ipynb), which walks you through implementing a CNN in MXNet. You should also be familiar with the concept of logistic regression.
Having a basic understanding for MXNet data iterators helps, since we'll create a custom Data Iterator to iterate though random numbers as inputs to our generator network. Take a look at [this tutorial](https://github.com/dmlc/mxnet-notebooks/blob/master/python/basic/data.ipynb) for a better understanding of how MXNet `DataIter` works.
This example is designed to be trained on a single GPU. Training this network on CPU can be slow, so it's recommended that you use a GPU for training.
To complete this tutorial, you need:
- [MXNet](http://mxnet.io/get_started/setup.html#overview)
- [Python 2.7](https://www.python.org/download/releases/2.7/), and the following libraries for Python:
- [Numpy](http://www.numpy.org/) - for matrix math
- [OpenCV](http://opencv.org/) - for image manipulation
- [Scikit-learn](http://scikit-learn.org/) - to easily get our dataset
- [Matplotlib](https://matplotlib.org/) - to visualize our output
## The Data
We need two pieces of data to train our DCGAN:
1. Images of handwritten digits from the MNSIT dataset
2. Random numbers from a normal distribution
Our generator network will use the random numbers as the input to produce images of handwritten digits, and out discriminator network will use images of handwritten digits from the MNIST dataset to determine if images produced by our generator are realistic.
We are going to use the python library, scikit-learn, to get the MNIST dataset. Scikit-learn comes with a function that gets the dataset for us, which we will then manipulate to create our training and testing inputs.
The MNIST dataset contains 70,000 images of handwritten digits. Each image is 28x28 pixels in size.
To create random numbers, we're going to create a custom MXNet data iterator, which will returns random numbers from a normal distribution as we need then.
## Prepare the Data
### 1. Preparing the MNSIT dataset
Let's start by preparing our handwritten digits from the MNIST dataset. We import the fetch_mldata function from scikit-learn, and use it to get the MNSIT dataset. Notice that it's shape is 70000x784. This contains the 70000 images on every row and 784 pixels of each image in the columns of each row. Each image is 28x28 pixels, but has been flattened so that all 784 images are represented in a single list.
```
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original')
mnist.data.shape
```
Next, we'll randomize the handwritten digits by using numpy to create random permutations on the dataset on our rows (images). We'll then reshape our dataset from 70000x786 to 70000x28x28, so that every image in our dataset is arranged into a 28x28 grid, where each cell in the grid represents 1 pixel of the image.
```
import numpy as np
#Use a seed so that we get the same random permutation each time
np.random.seed(1)
p = np.random.permutation(mnist.data.shape[0])
X = mnist.data[p]
X = X.reshape((70000, 28, 28))
```
Since the DCGAN that we're creating takes in a 64x64 image as the input, we'll use OpenCV to resize the each 28x28 image to 64x64 images:
```
import cv2
X = np.asarray([cv2.resize(x, (64,64)) for x in X])
```
Each pixel in our 64x64 image is represented by a number between 0-255, that represents the intensity of the pixel. However, we want to input numbers between -1 and 1 into our DCGAN, as suggested by the research paper. To rescale our pixels to be in the range of -1 to 1, we'll divide each pixel by (255/2). This put our images on a scale of 0-2. We can then subtract by 1, to get them in the range of -1 to 1.
```
X = X.astype(np.float32)/(255.0/2) - 1.0
```
Ultimately, images are inputted into our neural net from a 70000x3x64x64 array, and they are currently in a 70000x64x64 array. We need to add 3 channels to our images. Typically when we are working with images, the 3 channels represent the red, green, and blue components of each image. Since the MNIST dataset is grayscale, we only need 1 channel to represent our dataset. We will pad the other channels with 0's:
```
X = X.reshape((70000, 1, 64, 64))
X = np.tile(X, (1, 3, 1, 1))
```
Finally, we'll put our images into MXNet's NDArrayIter, which will allow MXNet to easily iterate through our images during training. We'll also split up them images into a batches, with 64 images in each batch. Every time we iterate, we'll get a 4 dimensional array with size `(64, 3, 64, 64)`, representing a batch of 64 images.
```
import mxnet as mx
batch_size = 64
image_iter = mx.io.NDArrayIter(X, batch_size=batch_size)
```
## 2. Preparing Random Numbers
We need to input random numbers from a normal distribution to our generator network, so we'll create an MXNet DataIter that produces random numbers for each training batch. The `DataIter` is the base class of [MXNet's Data Loading API](http://mxnet.io/api/python/io.html). Below, we create a class called `RandIter` which is a subclass of `DataIter`. If you want to know more about how MXNet data loading works in python, please look at [this notebook](https://github.com/dmlc/mxnet-notebooks/blob/master/python/basic/data.ipynb). We use MXNet's built in `mx.random.normal` function in order to return the normally distributed random numbers every time we iterate.
```
class RandIter(mx.io.DataIter):
def __init__(self, batch_size, ndim):
self.batch_size = batch_size
self.ndim = ndim
self.provide_data = [('rand', (batch_size, ndim, 1, 1))]
self.provide_label = []
def iter_next(self):
return True
def getdata(self):
#Returns random numbers from a gaussian (normal) distribution
#with mean=0 and standard deviation = 1
return [mx.random.normal(0, 1.0, shape=(self.batch_size, self.ndim, 1, 1))]
```
When we initalize our `RandIter`, we need to provide two numbers: the batch size and how many random numbers we want to produce a single image from. This number is referred to as `Z`, and we'll set this to 100. This value comes from the research paper on the topic. Every time we iterate and get a batch of random numbers, we will get a 4 dimensional array with shape: `(batch_size, Z, 1, 1)`, which in our example is `(64, 100, 1, 1)`.
```
Z = 100
rand_iter = RandIter(batch_size, Z)
```
## Create the Model
Our model has two networks that we will train together - the generator network and the disciminator network.
Below is an illustration of our generator network:
<img src="dcgan-model.png">
Source: https://arxiv.org/abs/1511.06434
The discriminator works exactly the same way but in reverse - using convolutional layers instead of deconvolutional layers to take an image and determine if it is real or fake.
The DCGAN paper recommends the following best practices for architecting DCGANs:
- Replace any pooling layers with strided convolutions (discriminator) and fractional-strided convolutions (generator).
- Use batchnorm in both the generator and the discriminator.
- Remove fully connected hidden layers for deeper architectures.
- Use ReLU activation in generator for all layers except for the output, which uses Tanh.
- Use LeakyReLU activation in the discriminator for all layers.
Our model will implement these best practices.
### The Generator
Let's start off by defining the generator network:
```
no_bias = True
fix_gamma = True
epsilon = 1e-5 + 1e-12
rand = mx.sym.Variable('rand')
g1 = mx.sym.Deconvolution(rand, name='g1', kernel=(4,4), num_filter=1024, no_bias=no_bias)
gbn1 = mx.sym.BatchNorm(g1, name='gbn1', fix_gamma=fix_gamma, eps=epsilon)
gact1 = mx.sym.Activation(gbn1, name='gact1', act_type='relu')
g2 = mx.sym.Deconvolution(gact1, name='g2', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=512, no_bias=no_bias)
gbn2 = mx.sym.BatchNorm(g2, name='gbn2', fix_gamma=fix_gamma, eps=epsilon)
gact2 = mx.sym.Activation(gbn2, name='gact2', act_type='relu')
g3 = mx.sym.Deconvolution(gact2, name='g3', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=256, no_bias=no_bias)
gbn3 = mx.sym.BatchNorm(g3, name='gbn3', fix_gamma=fix_gamma, eps=epsilon)
gact3 = mx.sym.Activation(gbn3, name='gact3', act_type='relu')
g4 = mx.sym.Deconvolution(gact3, name='g4', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=128, no_bias=no_bias)
gbn4 = mx.sym.BatchNorm(g4, name='gbn4', fix_gamma=fix_gamma, eps=epsilon)
gact4 = mx.sym.Activation(gbn4, name='gact4', act_type='relu')
g5 = mx.sym.Deconvolution(gact4, name='g5', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=3, no_bias=no_bias)
generatorSymbol = mx.sym.Activation(g5, name='gact5', act_type='tanh')
```
Our generator image starts with random numbers that will be obtained from the `RandIter` we created earlier, so we created the `rand` variable for this input.
We then start creating the model starting with a Deconvolution layer (sometimes called 'fractionally strided layer'). We apply batch normalization and ReLU activation after the Deconvolution layer.
We repeat this process 4 times, applying a `(2,2)` stride and `(1,1)` pad at each Deconvolutional layer, which doubles the size of our image at each layer. By creating these layers, our generator network will have to learn to upsample our input vector of random numbers, `Z` at each layer, so that network output a final image. We also reduce half the number of filters at each layer, reducing dimensionality at each layer. Ultimatley, our output layer is a 64x64x3 layer, representing the size and channels of our image. We use tanh activation instead of relu on the last layer, as recommended by the research on DCGANs. The output of neurons in the final `gout` layer represent the pixels of generated image.
Notice we used 3 parameters to help us create our model: no_bias, fixed_gamma, and epsilon.
Neurons in our network won't have a bias added to them, this seems to work better in practice for the DCGAN.
In our batch norm layer, we set `fixed_gamma=True`, which means `gamma=1` for all of our batch norm layers.
`epsilon` is a small number that gets added to our batch norm so that we don't end up dividing by zero. By default, CuDNN requires that this number is greater than `1e-5`, so we add a small number to this value, ensuring this values stays small.
### The Discriminator
Let's now create our discriminator network, which will take in images of handwritten digits from the MNIST dataset and images created by the generator network:
```
data = mx.sym.Variable('data')
d1 = mx.sym.Convolution(data, name='d1', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=128, no_bias=no_bias)
dact1 = mx.sym.LeakyReLU(d1, name='dact1', act_type='leaky', slope=0.2)
d2 = mx.sym.Convolution(dact1, name='d2', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=256, no_bias=no_bias)
dbn2 = mx.sym.BatchNorm(d2, name='dbn2', fix_gamma=fix_gamma, eps=epsilon)
dact2 = mx.sym.LeakyReLU(dbn2, name='dact2', act_type='leaky', slope=0.2)
d3 = mx.sym.Convolution(dact2, name='d3', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=512, no_bias=no_bias)
dbn3 = mx.sym.BatchNorm(d3, name='dbn3', fix_gamma=fix_gamma, eps=epsilon)
dact3 = mx.sym.LeakyReLU(dbn3, name='dact3', act_type='leaky', slope=0.2)
d4 = mx.sym.Convolution(dact3, name='d4', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=1024, no_bias=no_bias)
dbn4 = mx.sym.BatchNorm(d4, name='dbn4', fix_gamma=fix_gamma, eps=epsilon)
dact4 = mx.sym.LeakyReLU(dbn4, name='dact4', act_type='leaky', slope=0.2)
d5 = mx.sym.Convolution(dact4, name='d5', kernel=(4,4), num_filter=1, no_bias=no_bias)
d5 = mx.sym.Flatten(d5)
label = mx.sym.Variable('label')
discriminatorSymbol = mx.sym.LogisticRegressionOutput(data=d5, label=label, name='dloss')
```
We start off by creating the `data` variable, which is used to hold our input images to the discriminator.
The discriminator then goes through a series of 5 convolutional layers, each with a 4x4 kernel, 2x2 stride, and 1x1 pad. These layers half the size of the image (which starts at 64x64) at each convolutional layer. Our model also increases dimensionality at each layer by doubling the number of filters per convolutional layer, starting at 128 filters and ending at 1024 filters before we flatten the output.
At the final convolution, we flatten the neural net to get one number as the final output of discriminator network. This number is the probability the image is real, as determined by our discriminator. We use logistic regression to determine this probability. When we pass in "real" images from the MNIST dataset, we can label these as `1` and we can label the "fake" images from the generator net as `0` to perform logistic regression on the discriminator network.
### Prepare the models using the `Module` API
So far we have defined a MXNet `Symbol` for both the generator and the discriminator network.
Before we can train our model, we need to bind these symbols using the `Module` API, which creates the computation graph for our models. It also allows us to decide how we want to initialize our model and what type of optimizer we want to use. Let's set up `Module` for both of our networks:
```
#Hyperperameters
sigma = 0.02
lr = 0.0002
beta1 = 0.5
ctx = mx.gpu(0)
#=============Generator Module=============
generator = mx.mod.Module(symbol=generatorSymbol, data_names=('rand',), label_names=None, context=ctx)
generator.bind(data_shapes=rand_iter.provide_data)
generator.init_params(initializer=mx.init.Normal(sigma))
generator.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'beta1': beta1,
})
mods = [generator]
# =============Discriminator Module=============
discriminator = mx.mod.Module(symbol=discriminatorSymbol, data_names=('data',), label_names=('label',), context=ctx)
discriminator.bind(data_shapes=image_iter.provide_data,
label_shapes=[('label', (batch_size,))],
inputs_need_grad=True)
discriminator.init_params(initializer=mx.init.Normal(sigma))
discriminator.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'beta1': beta1,
})
mods.append(discriminator)
```
First, we create `Modules` for our networks and then bind the symbols that we've created in the previous steps to our modules.
We use `rand_iter.provide_data` as the `data_shape` to bind our generator network. This means that as we iterate though batches of data on the generator `Module`, our `RandIter` will provide us with random numbers to feed our `Module` using it's `provide_data` function.
Similarly, we bind the discriminator `Module` to `image_iter.provide_data`, which gives us images from MNIST from the `NDArrayIter` we had set up earlier, called `image_iter`.
Notice that we're using the `Normal` initialization, with the hyperparameter `sigma=0.02`. This means our weight initializations for the neurons in our networks will random numbers from a Gaussian (normal) distribution with a mean of 0 and a standard deviation of 0.02.
We also use the adam optimizer for gradient decent. We've set up two hyperparameters, `lr` and `beta1` based on the values used in the DCGAN paper. We're using a single gpu, `gpu(0)` for training.
### Visualizing Our Training
Before we train the model, let's set up some helper functions that will help visualize what our generator is producing, compared to what the real image is:
```
from matplotlib import pyplot as plt
#Takes the images in our batch and arranges them in an array so that they can be
#Plotted using matplotlib
def fill_buf(buf, num_images, img, shape):
width = buf.shape[0]/shape[1]
height = buf.shape[1]/shape[0]
img_width = (num_images%width)*shape[0]
img_hight = (num_images/height)*shape[1]
buf[img_hight:img_hight+shape[1], img_width:img_width+shape[0], :] = img
#Plots two images side by side using matplotlib
def visualize(fake, real):
#64x3x64x64 to 64x64x64x3
fake = fake.transpose((0, 2, 3, 1))
#Pixel values from 0-255
fake = np.clip((fake+1.0)*(255.0/2.0), 0, 255).astype(np.uint8)
#Repeat for real image
real = real.transpose((0, 2, 3, 1))
real = np.clip((real+1.0)*(255.0/2.0), 0, 255).astype(np.uint8)
#Create buffer array that will hold all the images in our batch
#Fill the buffer so to arrange all images in the batch onto the buffer array
n = np.ceil(np.sqrt(fake.shape[0]))
fbuff = np.zeros((int(n*fake.shape[1]), int(n*fake.shape[2]), int(fake.shape[3])), dtype=np.uint8)
for i, img in enumerate(fake):
fill_buf(fbuff, i, img, fake.shape[1:3])
rbuff = np.zeros((int(n*real.shape[1]), int(n*real.shape[2]), int(real.shape[3])), dtype=np.uint8)
for i, img in enumerate(real):
fill_buf(rbuff, i, img, real.shape[1:3])
#Create a matplotlib figure with two subplots: one for the real and the other for the fake
#fill each plot with our buffer array, which creates the image
fig = plt.figure()
ax1 = fig.add_subplot(2,2,1)
ax1.imshow(fbuff)
ax2 = fig.add_subplot(2,2,2)
ax2.imshow(rbuff)
plt.show()
```
## Fit the Model
Training the DCGAN is a complex process that requires multiple steps.
To fit the model, for every batch of data in our dataset:
1. Use the `Z` vector, which contains our random numbers to do a forward pass through our generator. This outputs the "fake" image, since it's created from our generator.
2. Use the fake image as the input to do a forward and backwards pass through the discriminator network. We set our labels for our logistic regression to `0` to represent that this is a fake image. This trains the discriminator to learn what a fake image looks like. We save the gradient produced in backpropogation for the next step.
3. Do a forwards and backwards pass through the discriminator using a real image from our dataset. Our label for logistic regression will now be `1` to represent real images, so our discriminator can learn to recognize a real image.
4. Update the discriminator by adding the result of the gradient generated during backpropogation on the fake image with the gradient from backpropogation on the real image.
5. Now that the discriminator has been updated for the this batch, we still need to update the generator. First, do a forward and backwards pass with the same batch on the updated discriminator, to produce a new gradient. Use the new gradient to do a backwards pass
Here's the main training loop for our DCGAN:
```
# =============train===============
print('Training...')
for epoch in range(1):
image_iter.reset()
for i, batch in enumerate(image_iter):
#Get a batch of random numbers to generate an image from the generator
rbatch = rand_iter.next()
#Forward pass on training batch
generator.forward(rbatch, is_train=True)
#Output of training batch is the 64x64x3 image
outG = generator.get_outputs()
#Pass the generated (fake) image through the discriminator, and save the gradient
#Label (for logistic regression) is an array of 0's since this image is fake
label = mx.nd.zeros((batch_size,), ctx=ctx)
#Forward pass on the output of the discriminator network
discriminator.forward(mx.io.DataBatch(outG, [label]), is_train=True)
#Do the backwards pass and save the gradient
discriminator.backward()
gradD = [[grad.copyto(grad.context) for grad in grads] for grads in discriminator._exec_group.grad_arrays]
#Pass a batch of real images from MNIST through the discriminator
#Set the label to be an array of 1's because these are the real images
label[:] = 1
batch.label = [label]
#Forward pass on a batch of MNIST images
discriminator.forward(batch, is_train=True)
#Do the backwards pass and add the saved gradient from the fake images to the gradient
#generated by this backwards pass on the real images
discriminator.backward()
for gradsr, gradsf in zip(discriminator._exec_group.grad_arrays, gradD):
for gradr, gradf in zip(gradsr, gradsf):
gradr += gradf
#Update gradient on the discriminator
discriminator.update()
#Now that we've updated the discriminator, let's update the generator
#First do a forward pass and backwards pass on the newly updated discriminator
#With the current batch
discriminator.forward(mx.io.DataBatch(outG, [label]), is_train=True)
discriminator.backward()
#Get the input gradient from the backwards pass on the discriminator,
#and use it to do the backwards pass on the generator
diffD = discriminator.get_input_grads()
generator.backward(diffD)
#Update the gradients on the generator
generator.update()
#Increment to the next batch, printing every 50 batches
i += 1
if i % 50 == 0:
print('epoch:', epoch, 'iter:', i)
print
print(" From generator: From MNIST:")
visualize(outG[0].asnumpy(), batch.data[0].asnumpy())
```
Here we have our GAN being trained and we can visualize the progress that we're making as our networks train. After every 25 iterations, we're calling the `visualize` function that we created earlier, which creates the visual plots during training.
The plot on our left is what our generator created (the fake image) in the most recent iteration. The plot on the right is the original (real) image from the MNIST dataset that was inputted to the discriminator on the same iteration.
As training goes on the generator becomes better at generating realistic images. You can see this happening since images on the left become closer to the original dataset with each iteration.
## Summary
We've now sucessfully used Apache MXNet to train a Deep Convolutional GAN using the MNIST dataset.
As a result, we've created two neural nets: a generator, which is able to create images of handwritten digits from random numbers, and a discriminator, which is able to take an image and determine if it is an image of handwritten digits.
Along the way, we've learned how to do the image manipulation and visualization that's associted with training deep neural nets. We've also learned how to some of MXNet's advanced training functionality to fit our model.
## Acknowledgements
This tutorial is based on [MXNet DCGAN codebase](https://github.com/dmlc/mxnet/blob/master/example/gan/dcgan.py), the [original paper on GANs](https://arxiv.org/abs/1406.2661), as well as [this paper](https://arxiv.org/abs/1511.06434) on deep convolutional GANs.
| true |
code
| 0.851645 | null | null | null | null |
|
# Reconstructing MNIST images using Autoencoder
Now that we have understood how autoencoders reconstruct the inputs, in this section we will learn how autoencoders reconstruct the images of handwritten digits using the MNIST dataset.
In this chapter, we use keras API from the tensorflow for building the models. So that we would be familiarized with how to use high-level APIs.
## Import Libraries
First, let us import the necessary libraries:
```
import warnings
warnings.filterwarnings('ignore')
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
#plotting
import matplotlib.pyplot as plt
%matplotlib inline
#dataset
from tensorflow.keras.datasets import mnist
import numpy as np
```
## Prepare the Dataset
Let us load the MNIST dataset. We don't need the labels for autoencoder. Since we are reconstructing the given input we don't need the labels. So, we just load x_train for training and x_test for testing:
```
(x_train, _), (x_test, _) = mnist.load_data()
```
Normalize the data by dividing with max pixel value which is 255:
```
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
```
Shape of our dataset:
```
print(x_train.shape, x_test.shape)
```
Reshape the images as 2D array:
```
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
```
Now, the shape of data would become:
```
print(x_train.shape, x_test.shape)
```
# Define the Encoder
Now, we define the encoder which takes the images as an input and returns the encodings.
Define the size of the encodings:
```
encoding_dim = 32
```
Define the placeholders for the input:
```
input_image = Input(shape=(784,))
```
Define the encoder which takes the input_image and returns the encodings:
```
encoder = Dense(encoding_dim, activation='relu')(input_image)
```
# Define the Decoder
Let us define the decoder which takes the encoded values from the encoder and returns the reconstructed image:
```
decoder = Dense(784, activation='sigmoid')(encoder)
```
# Build the model
Now that we defined encoder and decoder, we define the model which takes images as input and returns the output of the decoder which is the reconstructed image:
```
model = Model(inputs=input_image, outputs=decoder)
```
Let us look at summary of the model:
```
model.summary()
```
Compile the model with loss as binary cross entropy and we minimize the loss using AdaDelta optimizer:
```
model.compile(optimizer='adadelta', loss='binary_crossentropy')
```
Now, let us train the model.
Generally, we feed the data to the model as model.fit(x,y) where x is the input and y is the label. But since autoencoders reconstruct its inputs, the input and output to the model should be the same. So we feed the data to the model as model.fit(x_train, x_train)
```
model.fit(x_train, x_train, epochs=50, batch_size=256, shuffle=True, validation_data=(x_test, x_test))
```
## Reconstruct images
Let us see how our model is performing in the test dataset. Feed the test images to the model and get the reconstructed images:
```
reconstructed_images = model.predict(x_test)
```
## Plotting reconstructed images
First let us plot the atcual images i.e input images:
```
n = 7
plt.figure(figsize=(20, 4))
for i in range(n):
ax = plt.subplot(1, n, i+1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
```
Plot the reconstructed image:
```
n = 7
plt.figure(figsize=(20, 4))
for i in range(n):
ax = plt.subplot(2, n, i + n + 1)
plt.imshow(reconstructed_images[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
```
As you can notice, autoencoders have learned to reconstruct the given input image. In the next section, we will learn about convolutional autoencoder which uses convolutional layers in the encoder and decoder network.
| true |
code
| 0.71217 | null | null | null | null |
|
# Synthetic Images from simulated data
## Authors
Yi-Hao Chen, Sebastian Heinz, Kelle Cruz, Stephanie T. Douglas
## Learning Goals
- Assign WCS astrometry to an image using ```astropy.wcs```
- Construct a PSF using ```astropy.modeling.model```
- Convolve raw data with PSF using ```astropy.convolution```
- Calculate polarization fraction and angle from Stokes I, Q, U data
- Overplot quivers on the image
## Keywords
modeling, convolution, coordinates, WCS, FITS, radio astronomy, matplotlib, colorbar
## Summary
In this tutorial, we will:
[1. Load and examine the FITS file](#1.-Load-and-examine-the-FITS-file)
[2. Set up astrometry coordinates](#2.-Set-up-astrometry-coordinates)
[3. Prepare a Point Spread Function (PSF)](#3.-Prepare-a-Point-Spread-Function-(PSF))
>[3.a How to do this without astropy kernels](#3.a-How-to-do-this-without-astropy-kernels)
[4. Convolve image with PSF](#4.-Convolve-image-with-PSF)
[5. Convolve Stokes Q and U images](#5.-Convolve-Stokes-Q-and-U-images)
[6. Calculate polarization angle and fraction for quiver plot](#6.-Calculate-polarization-angle-and-fraction-for-quiver-plot)
```
from astropy.utils.data import download_file
from astropy.io import fits
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.wcs import WCS
from astropy.convolution import Gaussian2DKernel
from astropy.modeling.models import Lorentz1D
from astropy.convolution import convolve_fft
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
```
## 1. Load and examine the FITS file
Here we begin with a 2-dimensional data that were stored in FITS format from some simulations. We have Stokes I, Q, and U maps. We we'll first load a FITS file and examine the header.
```
file_i = download_file(
'http://data.astropy.org/tutorials/synthetic-images/synchrotron_i_lobe_0700_150MHz_sm.fits',
cache=True)
hdulist = fits.open(file_i)
hdulist.info()
hdu = hdulist['NN_EMISSIVITY_I_LOBE_150.0MHZ']
hdu.header
```
We can see this FITS file, which was created in [yt](https://yt-project.org/), has x and y coordinate in physical units (cm). We want to convert it into sky coordinates. Before we proceed, let's find out the range of the data and plot a histogram.
```
print(hdu.data.max())
print(hdu.data.min())
np.seterr(divide='ignore') #suppress the warnings raised by taking log10 of data with zeros
plt.hist(np.log10(hdu.data.flatten()), range=(-3, 2), bins=100);
```
Once we know the range of the data, we can do a visualization with the proper range (```vmin``` and ```vmax```).
```
fig = plt.figure(figsize=(6,12))
fig.add_subplot(111)
# We plot it in log-scale and add a small number to avoid nan values.
plt.imshow(np.log10(hdu.data+1E-3), vmin=-1, vmax=1, origin='lower')
```
## 2. Set up astrometry coordinates
From the header, we know that the x and y axes are in centimeter. However, in an observation we usually have RA and Dec. To convert physical units to sky coordinates, we will need to make some assumptions about where the object is located, i.e. the distance to the object and the central RA and Dec.
```
# distance to the object
dist_obj = 200*u.Mpc
# We have the RA in hh:mm:ss and DEC in dd:mm:ss format.
# We will use Skycoord to convert them into degrees later.
ra_obj = '19h59m28.3566s'
dec_obj = '+40d44m02.096s'
```
Here we convert the pixel scale from cm to degree by dividing the distance to the object.
```
cdelt1 = ((hdu.header['CDELT1']*u.cm/dist_obj.to('cm'))*u.rad).to('deg')
cdelt2 = ((hdu.header['CDELT2']*u.cm/dist_obj.to('cm'))*u.rad).to('deg')
print(cdelt1, cdelt2)
```
Use ```astropy.wcs.WCS``` to prepare a FITS header.
```
w = WCS(naxis=2)
# reference pixel coordinate
w.wcs.crpix = [hdu.data.shape[0]/2,hdu.data.shape[1]/2]
# sizes of the pixel in degrees
w.wcs.cdelt = [-cdelt1.base, cdelt2.base]
# converting ra and dec into degrees
c = SkyCoord(ra_obj, dec_obj)
w.wcs.crval = [c.ra.deg, c.dec.deg]
# the units of the axes are in degrees
w.wcs.cunit = ['deg', 'deg']
```
Now we can convert the WCS coordinate into header and update the hdu.
```
wcs_header = w.to_header()
hdu.header.update(wcs_header)
```
Let's take a look at the header. ```CDELT1```, ```CDELT2```, ```CUNIT1```, ```CUNIT2```, ```CRVAL1```, and ```CRVAL2``` are in sky coordinates now.
```
hdu.header
wcs = WCS(hdu.header)
fig = plt.figure(figsize=(6,12))
fig.add_subplot(111, projection=wcs)
plt.imshow(np.log10(hdu.data+1e-3), vmin=-1, vmax=1, origin='lower')
plt.xlabel('RA')
plt.ylabel('Dec')
```
Now we have the sky coordinate for the image!
## 3. Prepare a Point Spread Function (PSF)
Simple PSFs are included in ```astropy.convolution.kernel```. We'll use ```astropy.convolution.Gaussian2DKernel``` here.
First we need to set the telescope resolution. For a 2D Gaussian, we can calculate sigma in pixels by using our pixel scale keyword ```cdelt2``` from above.
```
# assume our telescope has 1 arcsecond resolution
telescope_resolution = 1*u.arcsecond
# calculate the sigma in pixels.
# since cdelt is in degrees, we use _.to('deg')
sigma = telescope_resolution.to('deg')/cdelt2
# By default, the Gaussian kernel will go to 4 sigma
# in each direction
psf = Gaussian2DKernel(sigma)
# let's take a look:
plt.imshow(psf.array.value)
```
## 3.a How to do this without astropy kernels
Maybe your PSF is more complicated. Here's an alternative way to do this, using a 2D Lorentzian
```
# set FWHM and psf grid
telescope_resolution = 1*u.arcsecond
gamma = telescope_resolution.to('deg')/cdelt2
x_grid = np.outer(np.linspace(-gamma*4,gamma*4,int(8*gamma)),np.ones(int(8*gamma)))
r_grid = np.sqrt(x_grid**2 + np.transpose(x_grid**2))
lorentzian = Lorentz1D(fwhm=2*gamma)
# extrude a 2D azimuthally symmetric PSF
lorentzian_psf = lorentzian(r_grid)
# normalization
lorentzian_psf /= np.sum(lorentzian_psf)
# let's take a look again:
plt.imshow(lorentzian_psf.value, interpolation='none')
```
## 4. Convolve image with PSF
Here we use ```astropy.convolution.convolve_fft``` to convolve image. This routine uses fourier transform for faster calculation. Especially since our data is $2^n$ sized, which makes it particually fast. Using a fft, however, causes boundary effects. We'll need to specify how we want to handle the boundary. Here we choose to "wrap" the data, which means making the data periodic.
```
convolved_image = convolve_fft(hdu.data, psf, boundary='wrap')
# Put a psf at the corner of the image
delta_x_psf=100 # number of pixels from the edges
xmin, xmax = -psf.shape[1]-delta_x_psf, -delta_x_psf
ymin, ymax = delta_x_psf, delta_x_psf+psf.shape[0]
convolved_image[xmin:xmax, ymin:ymax] = psf.array/psf.array.max()*10
```
Now let's take a look at the convolved image.
```
wcs = WCS(hdu.header)
fig = plt.figure(figsize=(8,12))
i_plot = fig.add_subplot(111, projection=wcs)
plt.imshow(np.log10(convolved_image+1e-3), vmin=-1, vmax=1.0, origin='lower')#, cmap=plt.cm.viridis)
plt.xlabel('RA')
plt.ylabel('Dec')
plt.colorbar()
```
## 5. Convolve Stokes Q and U images
```
hdulist.info()
file_q = download_file(
'http://data.astropy.org/tutorials/synthetic-images/synchrotron_q_lobe_0700_150MHz_sm.fits',
cache=True)
hdulist = fits.open(file_q)
hdu_q = hdulist['NN_EMISSIVITY_Q_LOBE_150.0MHZ']
file_u = download_file(
'http://data.astropy.org/tutorials/synthetic-images/synchrotron_u_lobe_0700_150MHz_sm.fits',
cache=True)
hdulist = fits.open(file_u)
hdu_u = hdulist['NN_EMISSIVITY_U_LOBE_150.0MHZ']
# Update the header with the wcs_header we created earlier
hdu_q.header.update(wcs_header)
hdu_u.header.update(wcs_header)
# Convolve the images with the the psf
convolved_image_q = convolve_fft(hdu_q.data, psf, boundary='wrap')
convolved_image_u = convolve_fft(hdu_u.data, psf, boundary='wrap')
```
Let's plot the Q and U images.
```
wcs = WCS(hdu.header)
fig = plt.figure(figsize=(16,12))
fig.add_subplot(121, projection=wcs)
plt.imshow(convolved_image_q, cmap='seismic', vmin=-0.5, vmax=0.5, origin='lower')#, cmap=plt.cm.viridis)
plt.xlabel('RA')
plt.ylabel('Dec')
plt.colorbar()
fig.add_subplot(122, projection=wcs)
plt.imshow(convolved_image_u, cmap='seismic', vmin=-0.5, vmax=0.5, origin='lower')#, cmap=plt.cm.viridis)
plt.xlabel('RA')
plt.ylabel('Dec')
plt.colorbar()
```
## 6. Calculate polarization angle and fraction for quiver plot
Note that rotating Stokes Q and I maps requires changing signs of both. Here we assume that the Stokes q and u maps were calculated defining the y/declination axis as vertical, such that Q is positive for polarization vectors along the x/right-ascention axis.
```
# First, we plot the background image
fig = plt.figure(figsize=(8,16))
i_plot = fig.add_subplot(111, projection=wcs)
i_plot.imshow(np.log10(convolved_image+1e-3), vmin=-1, vmax=1, origin='lower')
# ranges of the axis
xx0, xx1 = i_plot.get_xlim()
yy0, yy1 = i_plot.get_ylim()
# binning factor
factor = [64, 66]
# re-binned number of points in each axis
nx_new = convolved_image.shape[1] // factor[0]
ny_new = convolved_image.shape[0] // factor[1]
# These are the positions of the quivers
X,Y = np.meshgrid(np.linspace(xx0,xx1,nx_new,endpoint=True),
np.linspace(yy0,yy1,ny_new,endpoint=True))
# bin the data
I_bin = convolved_image.reshape(nx_new, factor[0], ny_new, factor[1]).sum(3).sum(1)
Q_bin = convolved_image_q.reshape(nx_new, factor[0], ny_new, factor[1]).sum(3).sum(1)
U_bin = convolved_image_u.reshape(nx_new, factor[0], ny_new, factor[1]).sum(3).sum(1)
# polarization angle
psi = 0.5*np.arctan2(U_bin, Q_bin)
# polarization fraction
frac = np.sqrt(Q_bin**2+U_bin**2)/I_bin
# mask for low signal area
mask = I_bin < 0.1
frac[mask] = 0
psi[mask] = 0
pixX = frac*np.cos(psi) # X-vector
pixY = frac*np.sin(psi) # Y-vector
# keyword arguments for quiverplots
quiveropts = dict(headlength=0, headwidth=1, pivot='middle')
i_plot.quiver(X, Y, pixX, pixY, scale=8, **quiveropts)
```
## Exercise
### Convert the units of the data from Jy/arcsec^2 to Jy/beam
The intensity of the data is given in unit of Jy/arcsec^2. Observational data usually have the intensity unit in Jy/beam. Assuming a beam size or take the psf we created earlier, you can convert the data into Jy/beam.
| true |
code
| 0.897201 | null | null | null | null |
|
# Image classification training with image format
1. [Introduction](#Introduction)
2. [Prerequisites and Preprocessing](#Prerequisites-and-Preprocessing)
1. [Permissions and environment variables](#Permissions-and-environment-variables)
2. [Prepare the data](#Prepare-the-data)
3. [Fine-tuning The Image Classification Model](#Fine-tuning-the-Image-classification-model)
1. [Training parameters](#Training-parameters)
2. [Training](#Training)
4. [Deploy The Model](#Deploy-the-model)
1. [Create model](#Create-model)
2. [Batch transform](#Batch-transform)
3. [Realtime inference](#Realtime-inference)
1. [Create endpoint configuration](#Create-endpoint-configuration)
2. [Create endpoint](#Create-endpoint)
3. [Perform inference](#Perform-inference)
4. [Clean up](#Clean-up)
## Introduction
Welcome to our end-to-end example of the image classification algorithm training with image format. In this demo, we will use the Amazon SageMaker image classification algorithm in transfer learning mode to fine-tune a pre-trained model (trained on ImageNet data) to learn to classify a new dataset. In particular, the pre-trained model will be fine-tuned using the [Caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/).
To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on.
## Prerequisites and Preprocessing
### Permissions and environment variables
Here we set up the linkage and authentication to AWS services. There are three parts to this:
* The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook
* The S3 bucket that you want to use for training and model data
* The Amazon SageMaker image classification docker image which need not be changed
```
%%time
import boto3
import sagemaker
from sagemaker import get_execution_role
from sagemaker import image_uris
role = get_execution_role()
bucket = sagemaker.session.Session().default_bucket()
training_image = image_uris.retrieve(
region=boto3.Session().region_name, framework="image-classification"
)
```
## Fine-tuning the Image classification model
### Prepare the data
The Caltech-256 dataset consist of images from 257 categories (the last one being a clutter category) and has 30k images with a minimum of 80 images and a maximum of about 800 images per category.
The image classification algorithm can take two types of input formats. The first is a [RecordIO format](https://mxnet.incubator.apache.org/tutorials/basic/record_io.html) (content type: application/x-recordio) and the other is a [lst format](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) (content type: application/x-image). Files for both these formats are available at http://data.dmlc.ml/mxnet/data/caltech-256/. In this example, we will use the lst format for training and use the training/validation split [specified here](http://data.dmlc.ml/mxnet/data/caltech-256/).
```
import os
import urllib.request
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
# Caltech-256 image files
s3 = boto3.client("s3")
s3.download_file(
"sagemaker-sample-files",
"datasets/image/caltech-256/256_ObjectCategories.tar",
"256_ObjectCategories.tar",
)
!tar -xf 256_ObjectCategories.tar
# Tool for creating lst file
download("https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py")
%%bash
mkdir -p caltech_256_train_60
for i in 256_ObjectCategories/*; do
c=`basename $i`
mkdir -p caltech_256_train_60/$c
for j in `ls $i/*.jpg | shuf | head -n 60`; do
mv $j caltech_256_train_60/$c/
done
done
python im2rec.py --list --recursive caltech-256-60-train caltech_256_train_60/
python im2rec.py --list --recursive caltech-256-60-val 256_ObjectCategories/
```
A .lst file is a tab-separated file with three columns that contains a list of image files. The first column specifies the image index, the second column specifies the class label index for the image, and the third column specifies the relative path of the image file. The image index in the first column should be unique across all of the images. Here we make an image list file using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool from MXNet. You can also create the .lst file in your own way. An example of .lst file is shown as follows.
```
!head -n 3 ./caltech-256-60-train.lst > example.lst
f = open("example.lst", "r")
lst_content = f.read()
print(lst_content)
```
When you are bringing your own image files to train, please ensure that the .lst file follows the same format as described above. In order to train with the lst format interface, passing the lst file for both training and validation in the appropriate format is mandatory. Once we have the data available in the correct format for training, the next step is to upload the image and .lst file to S3 bucket.
```
# Four channels: train, validation, train_lst, and validation_lst
s3train = "s3://{}/image-classification/train/".format(bucket)
s3validation = "s3://{}/image-classification/validation/".format(bucket)
s3train_lst = "s3://{}/image-classification/train_lst/".format(bucket)
s3validation_lst = "s3://{}/image-classification/validation_lst/".format(bucket)
# upload the image files to train and validation channels
!aws s3 cp caltech_256_train_60 $s3train --recursive --quiet
!aws s3 cp 256_ObjectCategories $s3validation --recursive --quiet
# upload the lst files to train_lst and validation_lst channels
!aws s3 cp caltech-256-60-train.lst $s3train_lst --quiet
!aws s3 cp caltech-256-60-val.lst $s3validation_lst --quiet
```
Now we have all the data stored in S3 bucket. The image and lst files will be converted to RecordIO file internally by the image classification algorithm. But if you want do the conversion, the following cell shows how to do it using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool. Note that this is just an example of creating RecordIO files. We are **_not_** using them for training in this notebook. More details on creating RecordIO files can be found in this [tutorial](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec).
```
%%bash
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-val 256_ObjectCategories/
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-train caltech_256_train_60/
```
After you created the RecordIO files, you can upload them to the train and validation channels for training. To train with RecordIO format, you can follow "[Image-classification-fulltraining.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-fulltraining.ipynb)" and "[Image-classification-transfer-learning.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-transfer-learning.ipynb)". Again, we will **_not_** use the RecordIO file for the training. The following sections will only show you how to train a model with images and list files.
Before training the model, we need to set up the training parameters. The next section will explain the parameters in detail.
## Fine-tuning the Image Classification Model
### Training parameters
There are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include:
* **Input specification**: These are the training and validation channels that specify the path where training data is present. These are specified in the "InputDataConfig" section. The main parameters that need to be set is the "ContentType" which can be set to "application/x-recordio" or "application/x-image" based on the input data format and the S3Uri which specifies the bucket and the folder where the data is present.
* **Output specification**: This is specified in the "OutputDataConfig" section. We just need to specify the path where the output can be stored after training
* **Resource config**: This section specifies the type of instance on which to run the training and the number of hosts used for training. If "InstanceCount" is more than 1, then training can be run in a distributed manner.
Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are:
* **num_layers**: The number of layers (depth) for the network. We use 18 in this sample but other values such as 50, 152 can be used.
* **image_shape**: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image.
* **num_training_samples**: This is the total number of training samples. It is set to 15240 for the Caltech dataset with the current split.
* **num_classes**: This is the number of output classes for the new dataset. ImageNet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For Caltech, we use 257 because it has 256 object categories + 1 clutter class.
* **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run.
* **epochs**: Number of training epochs.
* **learning_rate**: Learning rate for training.
* **top_k**: Report the top-k accuracy during training.
* **resize**: Resize the image before using it for training. The images are resized so that the shortest side is of this parameter. If the parameter is not set, then the training data is used as such without resizing.
* **checkpoint_frequency**: Period to store model parameters (in number of epochs).
* **use_pretrained_model**: Set to 1 to use pretrained model for transfer learning.
```
# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200
# For this training, we will use 18 layers
num_layers = 18
# we need to specify the input image shape for the training data
image_shape = "3,224,224"
# we also need to specify the number of training samples in the training set
num_training_samples = 15240
# specify the number of output classes
num_classes = 257
# batch size for training
mini_batch_size = 128
# number of epochs
epochs = 6
# learning rate
learning_rate = 0.01
# report top_5 accuracy
top_k = 5
# resize image before training
resize = 256
# period to store model parameters (in number of epochs), in this case, we will save parameters from epoch 2, 4, and 6
checkpoint_frequency = 2
# Since we are using transfer learning, we set use_pretrained_model to 1 so that weights can be
# initialized with pre-trained weights
use_pretrained_model = 1
```
### Training
Run the training using Amazon SageMaker CreateTrainingJob API
```
%%time
import time
import boto3
from time import gmtime, strftime
s3 = boto3.client("s3")
# create unique job name
job_name_prefix = "sagemaker-imageclassification-notebook"
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
job_name = job_name_prefix + timestamp
training_params = {
# specify the training docker image
"AlgorithmSpecification": {"TrainingImage": training_image, "TrainingInputMode": "File"},
"RoleArn": role,
"OutputDataConfig": {"S3OutputPath": "s3://{}/{}/output".format(bucket, job_name_prefix)},
"ResourceConfig": {"InstanceCount": 1, "InstanceType": "ml.p2.xlarge", "VolumeSizeInGB": 50},
"TrainingJobName": job_name,
"HyperParameters": {
"image_shape": image_shape,
"num_layers": str(num_layers),
"num_training_samples": str(num_training_samples),
"num_classes": str(num_classes),
"mini_batch_size": str(mini_batch_size),
"epochs": str(epochs),
"learning_rate": str(learning_rate),
"top_k": str(top_k),
"resize": str(resize),
"checkpoint_frequency": str(checkpoint_frequency),
"use_pretrained_model": str(use_pretrained_model),
},
"StoppingCondition": {"MaxRuntimeInSeconds": 360000},
# Training data should be inside a subdirectory called "train"
# Validation data should be inside a subdirectory called "validation"
# The algorithm currently only supports fullyreplicated model (where data is copied onto each machine)
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3train,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3validation,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
{
"ChannelName": "train_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3train_lst,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
{
"ChannelName": "validation_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3validation_lst,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
],
}
print("Training job name: {}".format(job_name))
print(
"\nInput Data Location: {}".format(
training_params["InputDataConfig"][0]["DataSource"]["S3DataSource"]
)
)
# create the Amazon SageMaker training job
sagemaker = boto3.client(service_name="sagemaker")
sagemaker.create_training_job(**training_params)
# confirm that the training job has started
status = sagemaker.describe_training_job(TrainingJobName=job_name)["TrainingJobStatus"]
print("Training job current status: {}".format(status))
try:
# wait for the job to finish and report the ending status
sagemaker.get_waiter("training_job_completed_or_stopped").wait(TrainingJobName=job_name)
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info["TrainingJobStatus"]
print("Training job ended with status: " + status)
except:
print("Training failed to start")
# if exception is raised, that means it has failed
message = sagemaker.describe_training_job(TrainingJobName=job_name)["FailureReason"]
print("Training failed with the following error: {}".format(message))
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info["TrainingJobStatus"]
print("Training job ended with status: " + status)
print(training_info)
```
If you see the message,
> `Training job ended with status: Completed`
then that means training sucessfully completed and the output model was stored in the output path specified by `training_params['OutputDataConfig']`.
You can also view information about and the status of a training job using the AWS SageMaker console. Just click on the "Jobs" tab.
## Deploy The Model
A trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the class label given an input image.
This section involves several steps,
1. [Create model](#CreateModel) - Create model for the training output
1. [Batch Transform](#BatchTransform) - Create a transform job to perform batch inference.
1. [Host the model for realtime inference](#HostTheModel) - Create an inference endpoint and perform realtime inference.
### Create model
We now create a SageMaker Model from the training output. Using the model we can create an Endpoint Configuration.
```
%%time
import boto3
from time import gmtime, strftime
sage = boto3.Session().client(service_name="sagemaker")
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
model_name = "image-classification-model" + timestamp
print(model_name)
info = sage.describe_training_job(TrainingJobName=job_name)
model_data = info["ModelArtifacts"]["S3ModelArtifacts"]
print(model_data)
hosting_image = image_uris.retrieve(
region=boto3.Session().region_name, framework="image-classification"
)
primary_container = {
"Image": hosting_image,
"ModelDataUrl": model_data,
}
create_model_response = sage.create_model(
ModelName=model_name, ExecutionRoleArn=role, PrimaryContainer=primary_container
)
print(create_model_response["ModelArn"])
```
### Batch transform
We now create a SageMaker Batch Transform job using the model created above to perform batch prediction.
```
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
batch_job_name = "image-classification-model" + timestamp
batch_input = s3validation + "001.ak47/"
request = {
"TransformJobName": batch_job_name,
"ModelName": model_name,
"MaxConcurrentTransforms": 16,
"MaxPayloadInMB": 6,
"BatchStrategy": "SingleRecord",
"TransformOutput": {"S3OutputPath": "s3://{}/{}/output".format(bucket, batch_job_name)},
"TransformInput": {
"DataSource": {"S3DataSource": {"S3DataType": "S3Prefix", "S3Uri": batch_input}},
"ContentType": "application/x-image",
"SplitType": "None",
"CompressionType": "None",
},
"TransformResources": {"InstanceType": "ml.p2.xlarge", "InstanceCount": 1},
}
print("Transform job name: {}".format(batch_job_name))
print("\nInput Data Location: {}".format(batch_input))
sagemaker = boto3.client("sagemaker")
sagemaker.create_transform_job(**request)
print("Created Transform job with name: ", batch_job_name)
while True:
response = sagemaker.describe_transform_job(TransformJobName=batch_job_name)
status = response["TransformJobStatus"]
if status == "Completed":
print("Transform job ended with status: " + status)
break
if status == "Failed":
message = response["FailureReason"]
print("Transform failed with the following error: {}".format(message))
raise Exception("Transform job failed")
time.sleep(30)
```
After the job completes, let's check the prediction results.
```
from urllib.parse import urlparse
import json
import numpy as np
s3_client = boto3.client("s3")
object_categories = [
"ak47",
"american-flag",
"backpack",
"baseball-bat",
"baseball-glove",
"basketball-hoop",
"bat",
"bathtub",
"bear",
"beer-mug",
"billiards",
"binoculars",
"birdbath",
"blimp",
"bonsai-101",
"boom-box",
"bowling-ball",
"bowling-pin",
"boxing-glove",
"brain-101",
"breadmaker",
"buddha-101",
"bulldozer",
"butterfly",
"cactus",
"cake",
"calculator",
"camel",
"cannon",
"canoe",
"car-tire",
"cartman",
"cd",
"centipede",
"cereal-box",
"chandelier-101",
"chess-board",
"chimp",
"chopsticks",
"cockroach",
"coffee-mug",
"coffin",
"coin",
"comet",
"computer-keyboard",
"computer-monitor",
"computer-mouse",
"conch",
"cormorant",
"covered-wagon",
"cowboy-hat",
"crab-101",
"desk-globe",
"diamond-ring",
"dice",
"dog",
"dolphin-101",
"doorknob",
"drinking-straw",
"duck",
"dumb-bell",
"eiffel-tower",
"electric-guitar-101",
"elephant-101",
"elk",
"ewer-101",
"eyeglasses",
"fern",
"fighter-jet",
"fire-extinguisher",
"fire-hydrant",
"fire-truck",
"fireworks",
"flashlight",
"floppy-disk",
"football-helmet",
"french-horn",
"fried-egg",
"frisbee",
"frog",
"frying-pan",
"galaxy",
"gas-pump",
"giraffe",
"goat",
"golden-gate-bridge",
"goldfish",
"golf-ball",
"goose",
"gorilla",
"grand-piano-101",
"grapes",
"grasshopper",
"guitar-pick",
"hamburger",
"hammock",
"harmonica",
"harp",
"harpsichord",
"hawksbill-101",
"head-phones",
"helicopter-101",
"hibiscus",
"homer-simpson",
"horse",
"horseshoe-crab",
"hot-air-balloon",
"hot-dog",
"hot-tub",
"hourglass",
"house-fly",
"human-skeleton",
"hummingbird",
"ibis-101",
"ice-cream-cone",
"iguana",
"ipod",
"iris",
"jesus-christ",
"joy-stick",
"kangaroo-101",
"kayak",
"ketch-101",
"killer-whale",
"knife",
"ladder",
"laptop-101",
"lathe",
"leopards-101",
"license-plate",
"lightbulb",
"light-house",
"lightning",
"llama-101",
"mailbox",
"mandolin",
"mars",
"mattress",
"megaphone",
"menorah-101",
"microscope",
"microwave",
"minaret",
"minotaur",
"motorbikes-101",
"mountain-bike",
"mushroom",
"mussels",
"necktie",
"octopus",
"ostrich",
"owl",
"palm-pilot",
"palm-tree",
"paperclip",
"paper-shredder",
"pci-card",
"penguin",
"people",
"pez-dispenser",
"photocopier",
"picnic-table",
"playing-card",
"porcupine",
"pram",
"praying-mantis",
"pyramid",
"raccoon",
"radio-telescope",
"rainbow",
"refrigerator",
"revolver-101",
"rifle",
"rotary-phone",
"roulette-wheel",
"saddle",
"saturn",
"school-bus",
"scorpion-101",
"screwdriver",
"segway",
"self-propelled-lawn-mower",
"sextant",
"sheet-music",
"skateboard",
"skunk",
"skyscraper",
"smokestack",
"snail",
"snake",
"sneaker",
"snowmobile",
"soccer-ball",
"socks",
"soda-can",
"spaghetti",
"speed-boat",
"spider",
"spoon",
"stained-glass",
"starfish-101",
"steering-wheel",
"stirrups",
"sunflower-101",
"superman",
"sushi",
"swan",
"swiss-army-knife",
"sword",
"syringe",
"tambourine",
"teapot",
"teddy-bear",
"teepee",
"telephone-box",
"tennis-ball",
"tennis-court",
"tennis-racket",
"theodolite",
"toaster",
"tomato",
"tombstone",
"top-hat",
"touring-bike",
"tower-pisa",
"traffic-light",
"treadmill",
"triceratops",
"tricycle",
"trilobite-101",
"tripod",
"t-shirt",
"tuning-fork",
"tweezer",
"umbrella-101",
"unicorn",
"vcr",
"video-projector",
"washing-machine",
"watch-101",
"waterfall",
"watermelon",
"welding-mask",
"wheelbarrow",
"windmill",
"wine-bottle",
"xylophone",
"yarmulke",
"yo-yo",
"zebra",
"airplanes-101",
"car-side-101",
"faces-easy-101",
"greyhound",
"tennis-shoes",
"toad",
"clutter",
]
def list_objects(s3_client, bucket, prefix):
response = s3_client.list_objects(Bucket=bucket, Prefix=prefix)
objects = [content["Key"] for content in response["Contents"]]
return objects
def get_label(s3_client, bucket, prefix):
filename = prefix.split("/")[-1]
s3_client.download_file(bucket, prefix, filename)
with open(filename) as f:
data = json.load(f)
index = np.argmax(data["prediction"])
probability = data["prediction"][index]
print("Result: label - " + object_categories[index] + ", probability - " + str(probability))
return object_categories[index], probability
inputs = list_objects(s3_client, bucket, urlparse(batch_input).path.lstrip("/"))
print("Sample inputs: " + str(inputs[:2]))
outputs = list_objects(s3_client, bucket, batch_job_name + "/output")
print("Sample output: " + str(outputs[:2]))
# Check prediction result of the first 2 images
[get_label(s3_client, bucket, prefix) for prefix in outputs[0:2]]
```
### Realtime inference
We now host the model with an endpoint and perform realtime inference.
This section involves several steps,
1. [Create endpoint configuration](#CreateEndpointConfiguration) - Create a configuration defining an endpoint.
1. [Create endpoint](#CreateEndpoint) - Use the configuration to create an inference endpoint.
1. [Perform inference](#PerformInference) - Perform inference on some input data using the endpoint.
1. [Clean up](#CleanUp) - Delete the endpoint and model
#### Create endpoint configuration
At launch, we will support configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way.
In addition, the endpoint configuration describes the instance type required for model deployment, and at launch will describe the autoscaling configuration.
```
from time import gmtime, strftime
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
endpoint_config_name = job_name_prefix + "-epc-" + timestamp
endpoint_config_response = sage.create_endpoint_config(
EndpointConfigName=endpoint_config_name,
ProductionVariants=[
{
"InstanceType": "ml.p2.xlarge",
"InitialInstanceCount": 1,
"ModelName": model_name,
"VariantName": "AllTraffic",
}
],
)
print("Endpoint configuration name: {}".format(endpoint_config_name))
print("Endpoint configuration arn: {}".format(endpoint_config_response["EndpointConfigArn"]))
```
#### Create endpoint
Next, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
```
%%time
import time
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
endpoint_name = job_name_prefix + "-ep-" + timestamp
print("Endpoint name: {}".format(endpoint_name))
endpoint_params = {
"EndpointName": endpoint_name,
"EndpointConfigName": endpoint_config_name,
}
endpoint_response = sagemaker.create_endpoint(**endpoint_params)
print("EndpointArn = {}".format(endpoint_response["EndpointArn"]))
```
Finally, now the endpoint can be created. It may take a few minutes to create the endpoint...
```
# get the status of the endpoint
response = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = response["EndpointStatus"]
print("EndpointStatus = {}".format(status))
try:
sagemaker.get_waiter("endpoint_in_service").wait(EndpointName=endpoint_name)
finally:
resp = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = resp["EndpointStatus"]
print("Arn: " + resp["EndpointArn"])
print("Create endpoint ended with status: " + status)
if status != "InService":
message = sagemaker.describe_endpoint(EndpointName=endpoint_name)["FailureReason"]
print("Training failed with the following error: {}".format(message))
raise Exception("Endpoint creation did not succeed")
```
If you see the message,
> `Endpoint creation ended with EndpointStatus = InService`
then congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console.
We will finally create a runtime object from which we can invoke the endpoint.
#### Perform inference
Finally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
```
import boto3
runtime = boto3.Session().client(service_name="runtime.sagemaker")
```
##### Download test image
```
file_name = "/tmp/test.jpg"
s3.download_file(
"sagemaker-sample-files",
"datasets/image/caltech-256/256_ObjectCategories/008.bathtub/008_0007.jpg",
file_name,
)
# test image
from IPython.display import Image
Image(file_name)
import json
import numpy as np
with open(file_name, "rb") as f:
payload = f.read()
payload = bytearray(payload)
response = runtime.invoke_endpoint(
EndpointName=endpoint_name, ContentType="application/x-image", Body=payload
)
result = response["Body"].read()
# result will be in json format and convert it to ndarray
result = json.loads(result)
# the result will output the probabilities for all classes
# find the class with maximum probability and print the class index
index = np.argmax(result)
object_categories = [
"ak47",
"american-flag",
"backpack",
"baseball-bat",
"baseball-glove",
"basketball-hoop",
"bat",
"bathtub",
"bear",
"beer-mug",
"billiards",
"binoculars",
"birdbath",
"blimp",
"bonsai-101",
"boom-box",
"bowling-ball",
"bowling-pin",
"boxing-glove",
"brain-101",
"breadmaker",
"buddha-101",
"bulldozer",
"butterfly",
"cactus",
"cake",
"calculator",
"camel",
"cannon",
"canoe",
"car-tire",
"cartman",
"cd",
"centipede",
"cereal-box",
"chandelier-101",
"chess-board",
"chimp",
"chopsticks",
"cockroach",
"coffee-mug",
"coffin",
"coin",
"comet",
"computer-keyboard",
"computer-monitor",
"computer-mouse",
"conch",
"cormorant",
"covered-wagon",
"cowboy-hat",
"crab-101",
"desk-globe",
"diamond-ring",
"dice",
"dog",
"dolphin-101",
"doorknob",
"drinking-straw",
"duck",
"dumb-bell",
"eiffel-tower",
"electric-guitar-101",
"elephant-101",
"elk",
"ewer-101",
"eyeglasses",
"fern",
"fighter-jet",
"fire-extinguisher",
"fire-hydrant",
"fire-truck",
"fireworks",
"flashlight",
"floppy-disk",
"football-helmet",
"french-horn",
"fried-egg",
"frisbee",
"frog",
"frying-pan",
"galaxy",
"gas-pump",
"giraffe",
"goat",
"golden-gate-bridge",
"goldfish",
"golf-ball",
"goose",
"gorilla",
"grand-piano-101",
"grapes",
"grasshopper",
"guitar-pick",
"hamburger",
"hammock",
"harmonica",
"harp",
"harpsichord",
"hawksbill-101",
"head-phones",
"helicopter-101",
"hibiscus",
"homer-simpson",
"horse",
"horseshoe-crab",
"hot-air-balloon",
"hot-dog",
"hot-tub",
"hourglass",
"house-fly",
"human-skeleton",
"hummingbird",
"ibis-101",
"ice-cream-cone",
"iguana",
"ipod",
"iris",
"jesus-christ",
"joy-stick",
"kangaroo-101",
"kayak",
"ketch-101",
"killer-whale",
"knife",
"ladder",
"laptop-101",
"lathe",
"leopards-101",
"license-plate",
"lightbulb",
"light-house",
"lightning",
"llama-101",
"mailbox",
"mandolin",
"mars",
"mattress",
"megaphone",
"menorah-101",
"microscope",
"microwave",
"minaret",
"minotaur",
"motorbikes-101",
"mountain-bike",
"mushroom",
"mussels",
"necktie",
"octopus",
"ostrich",
"owl",
"palm-pilot",
"palm-tree",
"paperclip",
"paper-shredder",
"pci-card",
"penguin",
"people",
"pez-dispenser",
"photocopier",
"picnic-table",
"playing-card",
"porcupine",
"pram",
"praying-mantis",
"pyramid",
"raccoon",
"radio-telescope",
"rainbow",
"refrigerator",
"revolver-101",
"rifle",
"rotary-phone",
"roulette-wheel",
"saddle",
"saturn",
"school-bus",
"scorpion-101",
"screwdriver",
"segway",
"self-propelled-lawn-mower",
"sextant",
"sheet-music",
"skateboard",
"skunk",
"skyscraper",
"smokestack",
"snail",
"snake",
"sneaker",
"snowmobile",
"soccer-ball",
"socks",
"soda-can",
"spaghetti",
"speed-boat",
"spider",
"spoon",
"stained-glass",
"starfish-101",
"steering-wheel",
"stirrups",
"sunflower-101",
"superman",
"sushi",
"swan",
"swiss-army-knife",
"sword",
"syringe",
"tambourine",
"teapot",
"teddy-bear",
"teepee",
"telephone-box",
"tennis-ball",
"tennis-court",
"tennis-racket",
"theodolite",
"toaster",
"tomato",
"tombstone",
"top-hat",
"touring-bike",
"tower-pisa",
"traffic-light",
"treadmill",
"triceratops",
"tricycle",
"trilobite-101",
"tripod",
"t-shirt",
"tuning-fork",
"tweezer",
"umbrella-101",
"unicorn",
"vcr",
"video-projector",
"washing-machine",
"watch-101",
"waterfall",
"watermelon",
"welding-mask",
"wheelbarrow",
"windmill",
"wine-bottle",
"xylophone",
"yarmulke",
"yo-yo",
"zebra",
"airplanes-101",
"car-side-101",
"faces-easy-101",
"greyhound",
"tennis-shoes",
"toad",
"clutter",
]
print("Result: label - " + object_categories[index] + ", probability - " + str(result[index]))
```
#### Clean up
When we're done with the endpoint, we can just delete it and the backing instances will be released. Uncomment and run the following cell to delete the endpoint and model
```
sage.delete_endpoint(EndpointName=endpoint_name)
```
| true |
code
| 0.500977 | null | null | null | null |
|
## PySpark Data Engineering Practice (Sandboxing)
### Olympic Athlete Data
This notebook is for data engineering practicing purposes.
During this notebook I want to explore data by using and learning PySpark.
The data is from: https://www.kaggle.com/mysarahmadbhat/120-years-of-olympic-history
```
## Imports
from pyspark.sql import SparkSession ## Create session
from pyspark.sql.types import StructType, StructField, StringType, IntegerType ## Create schema
## Create spark sessions
spark = (SparkSession.builder.appName("AthletesAnalytics").getOrCreate())
```
### Import the data
```
## Create schema
schema = StructType([
StructField("ID", StringType(), True),
StructField("Name", StringType(), True),
StructField("Sex", StringType(), True),
StructField("Age", StringType(), True),
StructField("Height", StringType(), True),
StructField("Weight", StringType(), True),
StructField("Team", StringType(), True),
StructField("NOC", StringType(), True),
StructField("Games", StringType(), True),
StructField("Year", StringType(), True),
StructField("Season", StringType(), True),
StructField("City", StringType(), True),
StructField("Sport", StringType(), True),
StructField("Event", StringType(), True),
StructField("Medal", StringType(), True),
])
## Read CSV into dataframe
file_path = "./data/athlete_events.csv"
athletes_df = (spark.read.format("csv")
.option("header", True)
.schema(schema)
.load(file_path))
## Showing first 10 rows
athletes_df.show(10, False)
## Print out schema details
athletes_df.printSchema()
athletes_df.show(3, vertical=True)
```
### Exploration & Cleansing
```
### Check for NA values by exploring columns
from pyspark.sql.functions import col
athletes_df.filter(col("Medal") == "NA").show(10)
## NA values in:
## Age, Height, Weight, Team, NOC National Olympics Committee, and Medal.
```
#### Drop rows where age, height or weight have NA values.
```
athletes_df = athletes_df.filter((col("Age") != "NA") & (col("Height") != "NA") & (col("Weight") != "NA"))
## Check if correct
athletes_df.filter((col("Age") == "NA")).show(5)
athletes_df.filter((col("Height") == "NA")).show(5)
athletes_df.filter((col("Weight") == "NA")).show(5)
```
#### Check if other columns have the right values
```
### Check if ID, Age, Height, Weight and Year are indeed all integer values
### Checking ID first on non numeric values
from pyspark.sql.types import DataType, StructField, StructType, IntegerType, StringType
test_df = athletes_df.select('ID',col('ID').cast(IntegerType()).isNotNull().alias("Value"))
test_df.filter((col("Value") == False)).show(5)
### Checking Age on non numeric values
from pyspark.sql.types import DataType, StructField, StructType, IntegerType, StringType
test_df = athletes_df.select('Age',col('Age').cast(IntegerType()).isNotNull().alias("Value"))
test_df.filter((col("Value") == False)).show(5)
### As seen something isn't going well. There are gender and even name values in Age.
### Let's see how many rows have this problem
test_df.filter((col("Value") == True)).count()
### 500 out of 206188 values have this problem
test_df.filter((col("Value") == False)).count()
### Percentage of broken rows
print(str(round(500 / 206188 * 100,2)) + '%')
athletes_df.filter((col("Age") == "M")).show(5)
### The reason for this error is that there is a , in some of the names.
### For now I'll drop these rows. This can be done with the following filter function
athletes_df = athletes_df.filter("CAST(Age AS INTEGER) IS NOT NULL")
athletes_df.filter((col("Age"))=="M").show()
### By fixing the rows, there are also no wrong values anymore in Height
test_df = athletes_df.select('Height',col('Height').cast(IntegerType()).isNotNull().alias("Value"))
test_df.filter((col("Value") == False)).show(5)
### As you can see, 500 rows where deleted.
athletes_df.count()
### Check the distinct values for seasons.
### As seen there are no odd values in this column.
athletes_df.select("Season").distinct().show()
### Check the length of NOC, as seen in the result this is always 3, so that is good.
from pyspark.sql.functions import length
test_df = athletes_df.withColumn("length_NOC", length("NOC")).filter((col("length_NOC") != 3))
test_df.show()
### Check if sex is only M and F, as seen this is correct.
athletes_df.filter((col("Sex")!="F") & (col("Sex")!="M")).show()
```
### Masking the name
To practice the idea of private information I want to explore masking the name.
#### Masking
```
### Masks name showing the first and last two characters.
### If name is less than 5 characters, it will only show the first character.
from pyspark.sql.functions import udf
def mask_name(columnValue):
if len(columnValue) < 5:
nameList=list(columnValue)
start = "".join(nameList[:1])
masking = 'x'*(len(nameList)-1)
masked_name = start+masking
else:
nameList=list(columnValue)
start = "".join(nameList[:2])
end = "".join(nameList[-2:])
masking = 'x'*(len(nameList)-4)
masked_name = start+masking+end
return masked_name
### Make the function work with PySpark
mask_name_udf = udf(mask_name, StringType())
### Test function
athletes_df.select("Name",mask_name_udf(athletes_df["Name"])).distinct().show(5, truncate=False)
athletes_df = athletes_df.withColumn("MaskedName",mask_name_udf(athletes_df["Name"])).drop(col("Name"))
athletes_df.show(1,vertical=True)
```
### Fixing Schema
```
athletes_df.printSchema()
### ID, Age Height, Weight and Year should be integer
athletes_final_df = (athletes_df.withColumn("PlayerID", col("ID").cast(IntegerType()))
.drop(col("ID"))
.withColumn("Name", col("MaskedName").cast(StringType()))
.withColumn("Age", col("Age").cast(IntegerType()))
.withColumn("Height", col("Height").cast(IntegerType()))
.withColumn("Weight", col("Weight").cast(IntegerType()))
.withColumn("Year", col("Year").cast(IntegerType()))
)
athletes_final_df.printSchema()
### Sort column order
athletes_sorted_df = athletes_final_df.select(
[athletes_final_df.columns[-2]]
+ [athletes_final_df.columns[-1]]
+ athletes_final_df.columns[:-3])
athletes_sorted_df.show(1, vertical=True)
athletes_sorted_df.printSchema()
```
### Save to parquet
```
## Write to parquet file, but this crashes laptop
#output_path = './output/athlete_data'
#athletes_sorted_df.write.partitionBy("Games").mode("overwrite").parquet(output_path)
```
### Aggregations
```
from pyspark.sql.functions import min, max, sum, sumDistinct, avg, col, expr, round, count
```
#### Medals per year
```
### Get year and medal
medals_per_year_df = athletes_sorted_df.select(
col("Year"),
col("Medal")
)
medals_per_year_df.show(5)
### Filter out all rows with NA
medals_per_year_df = medals_per_year_df.filter(col("Medal")!="NA")
medals_per_year_df.show(5)
### show amount of medals per Year
medals_per_year_df.groupBy("Year").agg(count("Medal").alias("Medals Amount")).orderBy("Year", ascending=False).show(5)
```
#### Medals per country
```
### Show distinct medal values.
athletes_sorted_df.select("Medal").distinct().show()
### create new dataframe and filter out NA values for the medal column.
medals_per_country_df = athletes_sorted_df.select(
col("Team"),
col("Medal")
)
medals_per_country_df = medals_per_country_df.filter(col("Medal")!="NA")
medals_per_country_df.show(5)
### Aggregate and order by medal amount
medals_per_country_df = medals_per_country_df.groupBy("Team","Medal").agg(count("Medal").alias("Amount")).orderBy("Amount", ascending=False)
medals_per_country_df.show(10)
```
#### Show information about height and weight
```
### This could also be used to make sure there are no odd values in the columns
athletes_sorted_df.select("Height", "Weight").describe().show()
### Weight of only 25?? Let's check out why that is.
athletes_sorted_df.select("Weight","Height","Age","PlayerID","Name","Team").filter(col("Weight")==25).distinct().show()
```
#### Which country has the most medals in basketball?
```
athletes_sorted_df.show(2)
best_in_basketball_df = athletes_sorted_df.select(
col("Team"),
col("Sport"),
col("Medal")
)
best_in_basketball_df = best_in_basketball_df.filter(col("Sport")=="Basketball")
best_in_basketball_df.show(3)
best_in_basketball_df = best_in_basketball_df.groupBy("Team","Sport").agg(count("Medal").alias("Amount")).orderBy("Amount", ascending=False)
best_in_basketball_df.show(5)
```
As you could expect, US has the most medals in Basketball.
| true |
code
| 0.405419 | null | null | null | null |
|
# Ridge Regressor with StandardScaler
### Required Packages
```
import warnings
import numpy as np
import pandas as pd
import seaborn as se
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
warnings.filterwarnings('ignore')
```
### Initialization
Filepath of CSV file
```
#filepath
file_path= ""
```
List of features which are required for model training .
```
#x_values
features=[]
```
Target feature for prediction.
```
#y_value
target=''
```
### Data fetching
pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df=pd.read_csv(file_path)
df.head()
```
### Feature Selections
It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
We will assign all the required input features to X and target/outcome to Y.
```
X=df[features]
Y=df[target]
```
### Data Preprocessing
Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
```
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
```
Calling preprocessing functions on the feature and target set.
```
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=NullClearner(Y)
X.head()
```
#### Correlation Map
In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
```
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
```
### Data Splitting
The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
```
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
```
### Model
Ridge regression addresses some of the problems of Ordinary Least Squares by imposing a penalty on the size of the coefficients. The ridge coefficients minimize a penalized residual sum of squares:
\begin{equation*}
\min_{w} || X w - y||_2^2 + \alpha ||w||_2^2
\end{equation*}
The complexity parameter controls the amount of shrinkage: the larger the value of , the greater the amount of shrinkage and thus the coefficients become more robust to collinearity.
This model solves a regression model where the loss function is the linear least squares function and regularization is given by the l2-norm. Also known as Ridge Regression or Tikhonov regularization. This estimator has built-in support for multi-variate regression (i.e., when y is a 2d-array of shape (n_samples, n_targets)).
#### Model Tuning Parameters
> **alpha** -> Regularization strength; must be a positive float. Regularization improves the conditioning of the problem and reduces the variance of the estimates. Larger values specify stronger regularization.
> **solver** -> Solver to use in the computational routines {‘auto’, ‘svd’, ‘cholesky’, ‘lsqr’, ‘sparse_cg’, ‘sag’, ‘saga’}
```
Input=[("standard",StandardScaler()),("model",Ridge(random_state=123))]
model=Pipeline(Input)
model.fit(x_train,y_train)
```
#### Model Accuracy
We will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.
> **score**: The **score** function returns the coefficient of determination <code>R<sup>2</sup></code> of the prediction.
```
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
```
> **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions.
> **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model.
> **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model.
```
y_pred=model.predict(x_test)
print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100))
print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred)))
print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred)))
```
#### Prediction Plot
First, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis.
For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis.
```
plt.figure(figsize=(14,10))
plt.plot(range(20),y_test[0:20], color = "green")
plt.plot(range(20),model.predict(x_test[0:20]), color = "red")
plt.legend(["Actual","prediction"])
plt.title("Predicted vs True Value")
plt.xlabel("Record number")
plt.ylabel(target)
plt.show()
```
#### Creator: Thilakraj Devadiga , Github: [Profile](https://github.com/Thilakraj1998)
| true |
code
| 0.494507 | null | null | null | null |
|
```
import numpy as np
import matplotlib.pyplot as plt
from latency import run_latency, run_latency_changing_topo, run_latency_per_round, run_latency_per_round_changing_topo, nodes_latency
import sys
sys.path.append('..')
from utils import create_mixing_matrix, load_data, run, consensus
```
# Base case
```
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs = run(train_loader, test_loader, comm_matrix, num_rounds, epochs, num_clients)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs)
plt.show()
```
# Latency with fixed topology
```
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 2)
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs2 = run_latency(train_loader, test_loader, comm_matrix,
num_rounds, epochs, num_clients, latency_nodes)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs2)
plt.show()
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 4)
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs4 = run_latency(train_loader, test_loader, comm_matrix,
num_rounds, epochs, num_clients, latency_nodes)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs4)
plt.show()
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 8)
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs8 = run_latency(train_loader, test_loader, comm_matrix,
num_rounds, epochs, num_clients, latency_nodes)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs8)
plt.show()
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 16)
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs16 = run_latency(train_loader, test_loader, comm_matrix,
num_rounds, epochs, num_clients, latency_nodes)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs16)
plt.show()
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 32)
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs32 = run_latency(train_loader, test_loader, comm_matrix,
num_rounds, epochs, num_clients, latency_nodes)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs32)
plt.show()
fig, ax = plt.subplots(1, figsize=(12, 9))
ax.set_ylim([0, 1])
x = np.array(range(10))
ax.plot(x, accs, color="red", label="base case")
ax.plot(x, accs2, color="lime", label="two delayed nodes")
ax.plot(x, accs4, color="green", label="four delayed nodes")
ax.plot(x, accs8, color="purple", label="eight delayed nodes")
ax.plot(x, accs16, color="blue", label="sixteen delayed nodes")
ax.plot(x, accs32, color="cyan", label="thirty-two delayed nodes")
plt.legend(loc="lower right", title="Number of delayed nodes")
plt.title("Accuracy curve depending on number of delayed nodes")
plt.xlabel("Round")
plt.ylabel("Accuracy")
plt.show()
```
# Latency with changing topology
```
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 2)
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs2_ = run_latency_changing_topo(train_loader, test_loader,
num_rounds, epochs, num_clients, latency_nodes)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs2_)
plt.show()
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 4)
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs4_ = run_latency_changing_topo(train_loader, test_loader,
num_rounds, epochs, num_clients, latency_nodes)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs4_)
plt.show()
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 8)
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs8_ = run_latency_changing_topo(train_loader, test_loader,
num_rounds, epochs, num_clients, latency_nodes)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs8_)
plt.show()
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 16)
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs16_ = run_latency_changing_topo(train_loader, test_loader,
num_rounds, epochs, num_clients, latency_nodes)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs16_)
plt.show()
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 32)
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs32_ = run_latency_changing_topo(train_loader, test_loader,
num_rounds, epochs, num_clients, latency_nodes)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs32_)
plt.show()
fig, ax = plt.subplots(1, figsize=(12, 9))
ax.set_ylim([0, 1])
x = np.array(range(10))
ax.plot(x, accs, color="red", label="base case")
ax.plot(x, accs2_, color="lime", label="two delayed nodes")
ax.plot(x, accs4_, color="green", label="four delayed nodes")
ax.plot(x, accs8_, color="purple", label="eight delayed nodes")
ax.plot(x, accs16_, color="blue", label="sixteen delayed nodes")
ax.plot(x, accs32_, color="cyan", label="thirty-two delayed nodes")
plt.legend(loc="lower right", title="Number of delayed nodes")
plt.title("Accuracy curve depending on number of delayed nodes with changing topology")
plt.xlabel("Round")
plt.ylabel("Accuracy")
plt.show()
```
# Latency on a few rounds
```
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 2)
latency_rounds = np.array([3, 7])
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs1 = run_latency_per_round(train_loader, test_loader, comm_matrix,
num_rounds, epochs, num_clients, latency_nodes, latency_rounds)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs1)
plt.show()
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 4)
latency_rounds = np.array([3, 7])
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs2 = run_latency_per_round(train_loader, test_loader, comm_matrix,
num_rounds, epochs, num_clients, latency_nodes, latency_rounds)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs2)
plt.show()
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 8)
latency_rounds = np.array([3, 7])
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs3 = run_latency_per_round(train_loader, test_loader, comm_matrix,
num_rounds, epochs, num_clients, latency_nodes, latency_rounds)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs3)
plt.show()
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 16)
latency_rounds = np.array([3, 7])
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs4 = run_latency_per_round(train_loader, test_loader, comm_matrix,
num_rounds, epochs, num_clients, latency_nodes, latency_rounds)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs4)
plt.show()
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 32)
latency_rounds = np.array([3, 7])
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs5 = run_latency_per_round(train_loader, test_loader, comm_matrix,
num_rounds, epochs, num_clients, latency_nodes, latency_rounds)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs5)
plt.show()
fig, ax = plt.subplots(1, figsize=(12, 9))
ax.set_ylim([0, 1])
x = np.array(range(10))
ax.plot(x, accs, color="red", label="base case")
ax.plot(x, accs1, color="lime", label="two delayed nodes")
ax.plot(x, accs2, color="green", label="four delayed nodes")
ax.plot(x, accs3, color="purple", label="eight delayed nodes")
ax.plot(x, accs4, color="blue", label="sixteen delayed nodes")
ax.plot(x, accs5, color="cyan", label="thirty-two delayed nodes")
plt.legend(loc="lower right", title="Number of delayed nodes")
plt.title("Accuracy curve depending on number of delayed nodes with delays only on specific rounds")
plt.xlabel("Round")
plt.ylabel("Accuracy")
plt.show()
```
# Latency on a few rounds with changing topology
```
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 2)
latency_rounds = np.array([3, 7])
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs1_ = run_latency_per_round_changing_topo(train_loader, test_loader,
num_rounds, epochs, num_clients, latency_nodes, latency_rounds)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs1_)
plt.show()
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 4)
latency_rounds = np.array([3, 7])
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs2_ = run_latency_per_round_changing_topo(train_loader, test_loader,
num_rounds, epochs, num_clients, latency_nodes, latency_rounds)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs2_)
plt.show()
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 8)
latency_rounds = np.array([3, 7])
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs3_ = run_latency_per_round_changing_topo(train_loader, test_loader,
num_rounds, epochs, num_clients, latency_nodes, latency_rounds)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs3_)
plt.show()
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 16)
latency_rounds = np.array([3, 7])
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs4_ = run_latency_per_round_changing_topo(train_loader, test_loader,
num_rounds, epochs, num_clients, latency_nodes, latency_rounds)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs4_)
plt.show()
# IID case: all the clients have images of all the classes
# Grid graph topology: each client is connected to exactly 4 neighbours
# Hyperparameters
num_clients = 100
num_rounds = 10
epochs = 1
batch_size = 32
latency_nodes = nodes_latency(num_clients, 32)
latency_rounds = np.array([3, 7])
# Communication matrix
comm_matrix = create_mixing_matrix('grid', num_clients)
# Creating decentralized datasets
train_loader, test_loader = load_data(batch_size, num_clients)
# Instantiate models and optimizers and run decentralized training
global_model, client_models, accs5_ = run_latency_per_round_changing_topo(train_loader, test_loader,
num_rounds, epochs, num_clients, latency_nodes, latency_rounds)
cons = consensus(global_model, client_models)
print(cons)
axes = plt.gca()
axes.set_ylim([0,1])
plt.plot(range(num_rounds), accs5_)
plt.show()
fig, ax = plt.subplots(1, figsize=(12, 9))
ax.set_ylim([0, 1])
x = np.array(range(10))
ax.plot(x, accs, color="red", label="base case")
ax.plot(x, accs1_, color="lime", label="two delayed nodes")
ax.plot(x, accs2_, color="green", label="four delayed nodes")
ax.plot(x, accs3_, color="purple", label="eight delayed nodes")
ax.plot(x, accs4_, color="blue", label="sixteen delayed nodes")
ax.plot(x, accs5_, color="cyan", label="thirty-two delayed nodes")
plt.legend(loc="lower right", title="Number of delayed nodes")
plt.title("Accuracy curve depending on number of delayed nodes with changing topology and delays only on specific rounds")
plt.xlabel("Round")
plt.ylabel("Accuracy")
plt.show()
```
| true |
code
| 0.686094 | null | null | null | null |
|
```
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
%matplotlib inline
style.use('ggplot')
x = [20,30,50]
y = [ 10,50,13]
x2 = [4,10,47,]
y2= [56,4,30]
plt.plot(x, y, 'r', label='line one', linewidth=5)
plt.plot(x2, y2, 'c', label ='line two', linewidth=5)
plt.title('Interactive plot')
plt.xlabel('X axis')
plt.ylabel('Y axis')
plt.legend()
#plt.grid(True, color='k')
plt.show()
#BAR GRAPH
plt.bar([1,4,5,3,2],[4,7,8,10,11], label='Type 1')
plt.bar([9,7,6,8,10],[3,6,9,11,15], label = 'Type 2', color='k')
plt.legend()
plt.xlabel('Bar Number')
plt.ylabel('Bar Height')
plt.title('Bar Graph')
plt.show()
```
HISTOGRAM
```
#Bar plots have cateogrical variables while histogram has quantitative variables
population_ages = [22,34,45,78,23,65,47,98,70,56,54,87,23,54,31,35,
64,76,87,80,60,73,47,63,79,52,75,64,51,46,83,62,36,74,63]
from numpy.random import seed
from numpy.random import randint
seed(1)
#generate some random integers
population_ages_2 = randint(10,50,40)
#print(population_ages_2)
bins = [20,30,40,50,60,70,80,90,100]
plt.hist(population_ages, bins, histtype='bar', color = 'm', rwidth = 0.5)
plt.hist(population_ages_2, bins, histtype='bar', color = 'c', rwidth = 0.5)
plt.xlabel('X asis')
plt.ylabel('Y axis')
plt.title('Histogram')
plt.legend()
plt.show()
```
AREA PLOT AND STACK PLOT
```
days = randint(1,5,5)
seed(0)
sleeping = randint(10,30,5)
eating = randint(40,60,5)
working = randint(70,100,5)
playing = randint(100,150,5)
plt.plot([],[], color = 'm', label = 'sleeping', linewidth = 5)
plt.plot([],[], color = 'c', label = 'eating', linewidth = 5)
plt.plot([],[], color = 'r', label = 'working', linewidth = 5)
plt.plot([],[], color = 'k', label = 'playing', linewidth = 5)
plt.stackplot(days, sleeping, eating, working, playing, colors = ['m','c','r','k'])
plt.legend()
```
PIE CHART
```
seed(0)
slices = randint(20,100,5)
activities = ['balling','playing','sleeping','praying','eating']
cols = ['c','m','r','b','y']
plt.pie(slices,
labels = activities,
startangle = 90,
shadow = True,
colors = cols,
autopct = '%.1f%%', #formats the percentage of the data given
explode=(0,0.2,0,0,0.1)) #this is to explode the chart and takes positional argument
plt.title('Pie Chart')
plt.show()
#working with Multiple Plots
def f(t):
return np.exp(-t) * np.cos(2*np.pi*t)
t1 = np.arange(0.0,5.0,0.1)
t2 = np.arange(0.0,6.0,0.4)
plt.subplot(211)
plt.plot(t1, f(t1),'bo',
t2, f(t2))
plt.subplot(212)
plt.plot(t1, np.cos(2*np.pi*t1), color = 'k')
plt.show()
```
FURTHER PLOTTING IN MATPLOTLIB/PYLAB
```
from matplotlib import pylab
pylab.__version__
import numpy as np
x = np.linspace(0,10,25)
y = x*x+2
print()
print(x)
print()
print(y)
#print(np.array([x,y]).reshape(25,2)) # to join the array together
pylab.plot(x,y, 'r') #'r' stands for red
#drawing a subgraph
pylab.subplot(1,2,1) #rows, columns and indexes
pylab.plot(x,y, 'b--')
pylab.subplot(1,2,2)
pylab.plot(y,x, 'g*-')
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
fig = plt.figure()
ax = fig.add_axes([0.5,0.1,0.8,0.8]) #this controls the left,bottom,width and height of the canvas
ax.plot(x,y, 'r')
#we can also draw subgraphs
fig, ax = plt.subplots(nrows=1, ncols=2)
for ax in ax:
ax.plot(x,y, 'r')
#we can drw a picture or a graph inside of another graph
fig = plt.figure()
ax1 = fig.add_axes([0.5,0.1,0.8,0.8]) #Big axes
ax2 = fig.add_axes([0.6,0.5,0.35,0.3]) #small canvas
ax1.plot(x,y,'r')
ax2.plot(y,x, 'g')
fig, ax = plt.subplots(dpi=100)
ax.set_xlabel('X-axis')
ax.set_ylabel('Y-axis')
ax.set_title('tutorial plots')
#ax.plot(x,y, 'r')
ax.plot(x,x**2)
ax.plot(x, x**3)
#ax.legend(['label 1', 'label 2'])
ax.legend(['y = x**2', 'y = x**3'], loc=2) #plotting the legend
#you can also set other properties such as line color, transparency and more
fig, ax = plt.subplots(dpi=100)
ax.plot(x, x**2, 'r', alpha=0.5) #alpha sets the line colour transparency
ax.plot(x, x+2, alpha=.5)
ax.plot(x, x+3, alpha=.5)
fig, ax = plt.subplots(dpi=100)
#line width
ax.plot(x, x+1, 'b', lw=0.5 )
ax.plot(x, x+2, 'b', lw=1.5)
ax.plot(x, x+3, 'b', lw=3)
ax.plot(x, x+4, 'b', lw=3.5)
fig, ax = plt.subplots(dpi=100)
ax.plot(x, x+1, 'b', lw=0.5, linestyle='-')
ax.plot(x, x+2, 'b', lw=1.5, linestyle='-.')
ax.plot(x, x+3, 'b', lw=3, linestyle=':')
ax.plot(x, x+4, 'b', lw=3.5, linestyle='-')
fig, ax = plt.subplots(dpi=100)
ax.plot(x, x+1, 'b', lw=0.5 , marker='o', markersize=5, markerfacecolor='r')
ax.plot(x, x+2, 'b', lw=1.5, marker='+')
ax.plot(x, x+3, 'b', lw=3, marker='s')
ax.plot(x, x+4, 'b', lw=3.5, marker='1', markersize=10)
```
LIMITING OUR DATA
```
fig, ax = plt.subplots(1,2, figsize=(10,5))
ax[0].plot(x,x**2, x,x**3, lw=3)
#ax[0].grid(True) this applies if we are not using ggplot
ax[1].plot(x,x**2, x,x**3, lw=3)
#we set the x and y limit on the second plot
ax[1].set_ylim([0,60])
ax[1].set_xlim([2,5])
```
Other 2_d Graphs
```
n = np.array([0,1,2,3,4,5])
fig, ax = plt.subplots(1,4, figsize=(16,5))
ax[0].set_title('scatter')
ax[0].scatter(x, x + 0.25*np.random.randn(len(x)))
ax[1].set_title('step plot')
ax[1].step(n, n**2, lw=2, color='b')
ax[2].set_title('Bar')
ax[2].bar(n, n**2, align='center', color ='g', alpha=0.5)
ax[3].set_title('fill between')
ax[3].fill_between(x, x**2, x**3, color ='g', alpha=0.5)
plt.show()
#Draw a Histogram '''Very important''
x = np.random.randn(10000)
fig, ax = plt.subplots(1,2, figsize=(12,4))
ax[0].set_title('Histogram')
ax[0].hist(x, color='g', alpha=0.8)
ax[1].set_title('Cumulative detailed histogram')
ax[1].hist(x, cumulative=True, bins=9)
plt.show()
#draw a contour map
#lets create some data where X and Y are coordinates and Z is the depth or height
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
delta = 0.0075
x = np.arange(-3, 3, delta)
y = np.arange(-2, 2, delta)
X, Y = np.meshgrid(x,y)
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(-X - 1)**2 - (Y - 1)**2)
Z = (Z1 - Z2)*2
fig, ax = plt.subplots(dpi=100)
CS = ax.contour(X,Y,Z) #CS is contour surface
ax.clabel(CS, inline=1, fontsize=10)
ax.set_title('Contour Map')
```
3 D MAPS
```
from mpl_toolkits.mplot3d.axes3d import Axes3D
fig = plt.figure(figsize=(14,6), dpi=100)
#Specify the 3D graphics to draw with projection='3d'
ax = fig.add_subplot(1,2,1, projection='3d')
ax.plot_surface(X, Y, Z, rstride=10, cstride=10, lw=0, color='c')
#write a program to create a pie chart of the popularity of programming languages
popularity = [200,334,890,290,679,300,980] #No of users of programming languages
prog_lang = ['Java', 'C#', 'C++', 'CSS', 'Java Script', 'Python', 'R']
fig = plt.figure(figsize=(14,6), dpi=100)
plt.pie(popularity,
shadow = True,
autopct= '%.f%%', startangle = 180,
explode=[0,0,0,0,0,0,0.1],
labels = prog_lang)
plt.title('Popularity of Programming languages')
plt.show()
```
| true |
code
| 0.49939 | null | null | null | null |
|
# Svenskt Kvinnobiografiskt lexikon part 5
version part 5 - 0.1
Check SKBL women if Alvin has an authority for the women
* this [Jupyter Notebook](https://github.com/salgo60/open-data-examples/blob/master/Svenskt%20Kvinnobiografiskt%20lexikon%20part%205.ipynb)
* [part 1](https://github.com/salgo60/open-data-examples/blob/master/Svenskt%20Kvinnobiografiskt%20lexikon.ipynb) check Wikidata and SKBL
* [part 2](https://github.com/salgo60/open-data-examples/blob/master/Svenskt%20Kvinnobiografiskt%20lexikon%20part%202.ipynb) more queries etc.
* [part 4](https://github.com/salgo60/open-data-examples/blob/master/Svenskt%20Kvinnobiografiskt%20lexikon%20part%204.ipynb) get archives
# Wikidata
get SKBL women not connected to Alvin
```
from datetime import datetime
now = datetime.now()
print("Last run: ", datetime.now())
# pip install sparqlwrapper
# https://rdflib.github.io/sparqlwrapper/
import sys,json
import pandas as pd
from SPARQLWrapper import SPARQLWrapper, JSON
endpoint_url = "https://query.wikidata.org/sparql"
querySKBLAlvin = """SELECT ?item (REPLACE(STR(?item), ".*Q", "Q") AS ?wid) ?SKBL (URI(CONCAT("https://www.alvin-portal.org/alvin/resultList.jsf?query=", ENCODE_FOR_URI(?itemLabel), "&searchType=PERSON")) AS ?Alvin) WHERE {
?item wdt:P4963 ?id.
OPTIONAL { ?item wdt:P569 ?birth. }
MINUS { ?item wdt:P6821 ?value. }
BIND(URI(CONCAT("https://www.skbl.se/sv/artikel/", ?id)) AS ?SKBL)
SERVICE wikibase:label {
bd:serviceParam wikibase:language "sv".
?item rdfs:label ?itemLabel.
}
}
ORDER BY (?itemLabel)"""
def get_sparql_dataframe(endpoint_url, query):
"""
Helper function to convert SPARQL results into a Pandas data frame.
"""
user_agent = "salgo60/%s.%s" % (sys.version_info[0], sys.version_info[1])
sparql = SPARQLWrapper(endpoint_url, agent=user_agent)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
result = sparql.query()
processed_results = json.load(result.response)
cols = processed_results['head']['vars']
out = []
for row in processed_results['results']['bindings']:
item = []
for c in cols:
item.append(row.get(c, {}).get('value'))
out.append(item)
return pd.DataFrame(out, columns=cols)
SKBLmissingAlvin = get_sparql_dataframe(endpoint_url, querySKBLAlvin )
SKBLmissingAlvin.info()
import csv
import urllib3, json
http = urllib3.PoolManager()
listNewItems =[]
for index,row in SKBLmissingAlvin.iterrows():
url = row["Alvin"]
print(url)
r = http.request('GET', url)
print(len(r.data),url)
#listNewItems.append(new_item)
#print (len(listNewItems) ," antal poster")
```
| true |
code
| 0.229104 | null | null | null | null |
|
[[source]](../api/alibi.explainers.shap_wrappers.rst)
# Tree SHAP
<div class="alert alert-info">
Note
To enable SHAP support, you may need to run:
```bash
pip install alibi[shap]
```
</div>
## Overview
The tree SHAP (**SH**apley **A**dditive ex**P**lanations) algorithm is based on the paper [From local explanations to global understanding with explainable AI for trees](https://www.nature.com/articles/s42256-019-0138-9) by Lundberg et al. and builds on the open source [shap library](https://github.com/slundberg/shap) from the paper's first author.
The algorithm provides human interpretable explanations suitable for regression and classification of models with tree structure applied to tabular data. This method is a member of the *additive feature attribution methods* class; feature attribution refers to the fact that the change of an outcome to be explained (e.g., a class probability in a classification problem) with respect to a *baseline* (e.g., average prediction probability for that class in the training set) can be attributed in different proportions to the model input features.
A simple illustration of the explanation process is shown in Figure 1. Here we see depicted a tree-based model which takes as an input features such as `Age`, `BMI` or `Blood pressure` and outputs `Mortality risk score`, a continuous value. Let's assume that we aim to explain the difference between and observed outcome and no risk, corresponding to a base value of `0.0`. Using the Tree SHAP algorithm, we attribute the `4.0` difference to the input features. Because the sum of the attribute values equals `output - base value`, this method is _additive_. We can see for example that the `Sex` feature contributes negatively to this prediction whereas the remainder of the features have a positive contribution (i.e., increase the mortality risk). For explaining this particular data point, the `Blood Pressure` feature seems to have the largest effect, and corresponds to an increase in the mortality risk. See our example on how to perform explanations with this algorithm and visualise the results using the `shap` library visualisations [here](../examples/interventional_tree_shap_adult_xgb.ipynb) and [here](../examples/path_dependent_tree_shap_adult_xgb.ipynb).

Figure 1: Cartoon ilustration of explanation models with Tree SHAP.
Image Credit: Scott Lundberg (see source [here](https://www.nature.com/articles/s42256-019-0138-9))
## Usage
In order to compute the shap values , the following arguments can optionally be set when calling the `explain` method:
- `interactions`: set to `True` to decompose the shap value of every feature for every example into a main effect and interaction effects
- `approximate`: set to `True` to calculate an approximation to shap values (see our [example](../examples/path_dependent_tree_shap_adult_xgb.ipynb))
- `check_additivity`: if the explainer is initialised with `model_output = raw` and this option is `True` the explainer checks that the sum of the shap values is equal to model output - expected value
- `tree_limit`: it an `int` is passed, an ensemble formed of only `tree_limit` trees is explained
If the dataset contains categorical variables that have been encoded before being passed to the explainer and a single shap value is desired for each categorical variable, the the following options should be specified:
- `summarise_result`: set to `True`
- `cat_var_start_idx`: a sequence of integers containing the column indices where categorical variables start. If the feature matrix contains a categorical feature starting at index 0 and one at index 10, then `cat_var_start_idx=[0, 10]`
- `cat_vars_enc_dim`: a list containing the dimension of the encoded categorical variables. The number of columns specified in this list is summed for each categorical variable starting with the corresponding index in `cat_var_start_idx`. So if `cat_var_start_idx=[0, 10]` and `cat_vars_enc_dim=[3, 5]`, then the columns with indices `0, 1` and `2` and `10, 11, 12, 13` and `14` will be combined to return one shap value for each categorical variable, as opposed to `3` and `5`.
### Path-dependent feature perturbation algorithm
#### Initialiastion and fit
The explainer is initialised with the following agruments:
- a model, which could be an `sklearn`, `xgboost`, `catboost` or `lightgbm` model. Note that some of the models in these packages or models trained with specific objectives may not be supported. In particular, passing raw strings as categorical levels for `catboost` and `lightgbm` is not supported
- `model_output` should always default to `raw` for this algorithm
- optionally, set `task` to `'classification'` or `'regression'` to indicate the type of prediction the model makes. If set to `regression` the `prediction` field of the response is empty
- optionally, a list of feature names via `feature_names`. This is used to provide information about feature importances in the response
- optionally, a dictionary, `category_names`, that maps the columns of the categorical variables to a list of strings representing the names of the categories. This may be used for visualisation in the future.
```python
from alibi.explainers import TreeShap
explainer = TreeShap(
model,
feature_names=['size', 'age'],
categorical_names={0: ['S', 'M', 'L', 'XL', 'XXL']}
)
```
For this algorithm, fit is called with no arguments:
```python
explainer.fit()
```
#### Explanation
To explain an instance `X`, we simply pass it to the explain method:
```python
explanation = explainer.explain(X)
```
The returned explanation object has the following fields:
* `explanation.meta`:
```python
{'name': 'TreeShap',
'type': ['whitebox'],
'task': 'classification',
'explanations': ['local', 'global'],
'params': {'summarise_background': False, 'algorithm': 'tree_path_dependent' ,'kwargs': {}}
}
```
This field contains metadata such as the explainer name and type as well as the type of explanations this method can generate. In this case, the `params` attribute shows the Tree SHAP variant that will be used to explain the model in the `algorithm` attribute.
* `explanation.data`:
```python
data={'shap_values': [
array([[ 5.0661433e-01, 2.7620478e-02],
[-4.1725192e+00, 4.4859368e-03],
[ 4.1338313e-01, -5.5618007e-02]],
dtype=float32)
],
'shap_interaction_values': [array([], dtype=float64)],
'expected_value': array([-0.06472124]),
'model_output': 'raw',
'categorical_names': {0: ['S', 'M', 'L', 'XL', 'XXL']},
'feature_names': ['size', 'age'],
'raw': {
'raw_prediction': array([-0.73818872, -8.8434663 , -3.24204564]),
'loss': [],
'prediction': array([0, 0, 0]),
'instances': array([[0, 23],
[4, 55],
[2, 43]]),
'labels': array([], dtype=float64),
'importances': {
'0': {
'ranked_effect': array([1.6975055 , 1.3598266], dtype=float32),
'names': [
'size',
'age',
]
},
'aggregated': {
'ranked_effect': array([1.6975055 , 1.3598266], dtype=float32),
'names': [
'size',
'age',
]
}
}
}
}
```
This field contains:
* `shap_values`: a list of length equal to the number of model outputs, where each entry is an array of dimension samples x features of shap values. For the example above , 3 instances with 2 features has been explained so the shap values for each class are of dimension 3 x 2
* `shap_interaction_values`: an empty list since this `interactions` was set to `False` in the explain call
* `expected_value`: an array containing expected value for each model output
* `model_output`: `raw` indicates that the model raw output was explained, the only option for the path dependent algorithm
* `feature_names`: a list with the feature names
* `categorical_names`: a mapping of the categorical variables (represented by indices in the shap_values columns) to the description of the category
* `raw`: this field contains:
* `raw_prediction`: a samples x n_outputs array of predictions for each instance to be explained.
* `prediction`: an array containing the index of the maximum value in the `raw_prediction` array
* `instances`: a samples x n_features array of instances which have been explained
* `labels`: an array containing the labels for the instances to be explained
* `importances`: a dictionary where each entry is a dictionary containing the sorted average magnitude of the shap value (ranked_effect) along with a list of feature names corresponding to the re-ordered shap values (names). There are n_outputs + 1 keys, corresponding to n_outputs and the aggregated output (obtained by summing all the arrays in shap_values)
Please see our examples on how to visualise these outputs using the shap library visualisations library visualisations [here](../examples/interventional_tree_shap_adult_xgb.ipynb) and [here](../examples/path_dependent_tree_shap_adult_xgb.ipynb).
#### Shapley interaction values
##### Initialisation and fit
Shapley interaction values can only be calculated using the path-dependent feature perturbation algorithm in this release, so no arguments are passed to the `fit` method:
```python
explainer = TreeShap(
model,
model_output='raw',
)
explainer.fit()
```
##### Explanation
To obtain the Shapley interaction values, the `explain` method is called with the option `interactions=True`:
```python
explanation = explainer.explain(X, interactions=True)
```
The explanation contains a list with the shap interaction values for each model output in the `shap_interaction_values` field of the `data` property.
### Interventional feature perturbation algorithm
#### Explaining model output
##### Initialiastion and fit
```python
explainer = TreeShap(
model,
model_output='raw',
)
explainer.fit(X_reference)
```
Model output can be set to `model_output='probability'` to explain models which return probabilities. Note that this requires the model to be trained with specific objectives. Please the footnote to our path-dependent feature perturbation [example](../examples/path_dependent_tree_shap_adult_xgb.ipynb) for an example of how to set the model training objective in order to explain probability outputs.
##### Explanation
To explain instances in `X`, the explainer is called as follows:
```python
explanation = explainer.explain(X)
```
#### Explaining loss functions
##### Initialisation and fit
To explain loss function, the following configuration and fit steps are necessary:
```python
explainer = TreeShap(
model,
model_output='log_loss',
)
explainer.fit(X_reference)
```
Only square loss regression objectives and cross-entropy classification objectives are supported in this release.
##### Explanation
Note that the labels need to be passed to the `explain` method in order to obtain the explanation:
```python
explanation = explainer.explain(X, y)
```
### Miscellaneous
#### Runtime considerations
##### Adjusting the size of the reference dataset
The algorithm automatically warns the user if a background dataset size of more than `1000` samples is passed. If the runtime of an explanation with the original dataset is too large, then the algorithm can automatically subsample the background dataset during the `fit` step. This can be achieve by specifying the fit step as
```python
explainer.fit(
X_reference,
summarise_background=True,
n_background_samples=300,
)
```
or
```python
explainer.fit(
X_reference,
summarise_background='auto'
)
```
The `auto` option will select `1000` examples, whereas using the boolean argument allows the user to directly control the size of the reference set. If categorical variables are specified, the algorithm uses subsampling of the data. Otherwise, a kmeans clustering algorithm is used to select the background dataset.
As describe above, the explanations are performed with respect to the expected output over this dataset so the shap values will be affected by the dataset selection. We recommend experimenting with various ways to choose the background dataset before deploying explanations.
## Theoretical overview
Recall that, for a model $f$, the Kernel SHAP algorithm [[1]](#References) explains a certain outcome with respect to a chosen reference (or an expected value) by estimating the shap values of each feature $i$ from $\{1, ..., M\}$, as follows:
- enumerate all subsets $S$ of the set $F \setminus \{i\}$
- for each $S \subseteq F \setminus \{i\}$, compute the contribution of feature $i$ as $C(i|S) = f(S \cup \{i\}) - f(S)$
- compute the shap value according to
\begin{equation}\tag{1}
\phi_i := \frac{1}{M} \sum \limits_{{S \subseteq F \setminus \{i\}}} \frac{1}{\binom{M - 1}{|S|}} C(i|S).
\end{equation}
Since most models do not accept arbitrary patterns of missing values at inference time, $f(S)$ needs to be approximated. The original formulation of the Kernel Shap algorithm [[1]](#References) proposes to compute $f(S)$ as the _observational conditional expectation_
\begin{equation}\tag{2}
f(S) := \mathbb{E}\left[f(\mathbf{x}_{S}, \mathbf{X}_{\bar{S}} | \mathbf{X}_S = \mathbf{x}_S) \right]
\end{equation}
where the expectation is taken over a *background dataset*, $\mathcal{D}$, after conditioning. Computing this expectation involves drawing sufficiently many samples from $\mathbf{X}_{\bar{S}}$ for every sample from $\mathbf{X}_S$, which is expensive. Instead, $(2)$ is approximated by
$$
f(S) := \mathbb{E} \left[f(\mathbf{x}_{S}, \mathbf{X}_{\bar{S}})\right]
$$
where features in a subset $S$ are fixed and features in $\bar{S}$ are sampled from the background dataset. This quantity is referred to as _marginal_ or *interventional conditional expectation*, to emphasise that setting features in $S$ to the values $\mathbf{x}_{S}$ can be viewed as an intervention on the instance to be explained.
As described in [[2]](#References), if estimating impact of a feature $i$ on the function value by $\mathbb{E} \left[ f | X_i = x_i \right]$, one should bear in mind that observing $X_i = x_i$ changes the distribution of the features $X_{j \neq i}$ if these variables are correlated. Hence, if the conditional expectation if used to estimate $f(S)$, the Shapley values might not be accurate since they also depend on the remaining variables, effect which becomes important if there are strong correlations amongst the independent variables. Furthermore, the authors show that estimating $f(S)$ using the conditional expectation violates the *sensitivity principle*, according to which the Shapley value of a redundant variable should be 0. On the other hand, the intervention breaks the dependencies, ensuring that the sensitivity holds. One potential drawback of this method is that setting a subset of values to certain values without regard to the values of the features in the complement (i.e., $\bar{S}$) can generate instances that are outside the training data distribution, which will affect the model prediction and hence the contributions.
The following sections detail how these methods work and how, unlike Kernel SHAP, compute the exact shap values in polynomial time. The algorithm estimating contributions using interventional expectations is presented, with the remaining sections being dedicated to presenting an approximate algorithm for evaluating the interventional expectation that does not require a background dataset and Shapley interaction values.
<a id='source_1'></a>
### Interventional feature perturbation
<a id='interventional'></a>
The interventional feature perturbation algorithm provides an efficient way to calculate the expectation $f(S) := \mathbb{E} \left[f(\mathbf{x}_{S}, \mathbf{X}_{\bar{S}})\right]$ for all possible subsets $S$, and to combine these values according to equation $(1)$ in order to obtain the Shapley value. Intuitively, one can proceed as follows:
- choose a background sample $r \in \mathcal{D}$
- for each feature $i$, enumerate all subsets $S \subseteq F \setminus \{i\}$
- for each such subset, $S$, compute $f(S)$ by traversing the tree with a _hybrid sample_ where the features in $\bar{S}$ are replaced by their corresponding values in $r$
- combine results according to equation $(1)$
If $R$ samples from the background distribution are used, then the complexity of this algorithm is $O(RM2^M)$ since we perform $2^M$ enumerations for each of the $M$ features, $R$ times. The key insight into this algorithm is that multiple hybrid samples will end up traversing identical paths and that this can be avoided if the shap values' calculation is reformulated as a summation over the paths in the tree (see [[4]](#References) for a proof):
$$
\phi_i = \sum_{P}\phi_{i}^P
$$
where the summation is over paths $P$ in the tree descending from $i$. The value and sign of the contribution of each path descending through a node depends on whether the split from the node is due to a foreground or a background feature, as explained in the practical example below.
<a id='source_4'></a>
#### Computing contributions with interventional Tree SHAP: a practical example.

Figure 2: Ilustration of the feature contribution and expected value estimation process using interventional perturbation Tree SHAP. The positive and the negative contributions of a node are represented in <span style="color:green">green</span> and <span style="color:red">red</span>, respectively.
In the figure above, the paths followed due the instance to be explained $x$ are coloured in red, paths followed due to the background sample in red, and common paths in yellow.
The instance to be explained is perturbed using a reference sample by the values of the features $F1$, $F3$ and $F5$ in $x$ with the corresponding values in $r$. This process gives the name of the algorithm since following the paths indicated by the background sample is akin to intervening on the instance to be explained with features from the background sample. Therefore, one defines the set $F$ in the previous section as $F = \{ j: x_{j} \neq r_{j}\}$ for this case. Note that these are the only features for which one can estimate a contribution given this background sample; the same path is followed for features $F2$ and $F4$ for both the original and the perturbed sample, so these features do not contribute to explain the difference between the observed outcome ($v_6$) and the outcome that would have been observed if the tree had been traversed according to the reference $(v_{10})$.
Considering the structure of the tree for the given $x$ and $r$ together with equation $(1)$ reveals that the left subtree can be traversed to compute the negative terms in the summation whereas the right subtree will provide positive terms. This is because the nodes in the left subtree can only be reached if $F1$ takes the value from the background sample, that is, only $F1$ is missing. Because $F2$ and $F4$ do not contribute to explaining $f(x) - f(r)$, the negative contribution of the left subtree will be equal to the negative contribution of node $8$. This node sums two negative components: one when the downstream feature $F5$ is also missing (corresponding to evaluating $f$ at $S = \varnothing$) and one when $F5$ is present (corresponding to evaluating $f$ at $S=\{F5\}$). These negative values are weighted according to the combinatorial factor in equation $(1)$. By a similar reasoning, the nodes in the right subtree are reached only if $F1$ is present and they provide the positive terms for the shap value computation. Note that the combinatorial factor in $(1)$ should be evaluated with $|S| \gets |S| - 1$ for positive contributions since $|S|$ is increased by $1$ because of the feature whose contribution is calculated is present in the right subtree.
A similar reasoning is applied to compute the contributions of the downstream nodes. For example, to estimate the contribution of $F5$, one considers a set $S = \varnothing$ and observes the value of node $10$, and weighs that with the combinatorial factor from equation $(1)$ where $M-1 = 1$ and $|S|=0$ (because there are no features present on the path) and a positive contribution from node $9$ weighted by the same combinatorial factor (because $S = \{F5\}$ so $|S| - 1 = 0$).
To summarise, the efficient algorithm relies on the following key ideas:
- each node in the tree is assigned a positive contribution reflecting membership of the splitting feature in a subset $S$ and a negative contribution to indicate the feature is missing ($i\in \bar{S}$)
- the positive and negative contributions of a node can be computed by summing the positive and negative contributions of the children nodes, in keeping with the fact that the Shapley value can be computed by summing a contribution from each path the feature is on
- to compute the contribution of a feature at a node, one adds a positive contribution from the node reached by splitting on the feature from the instance to be explained and a negative contribution from the node reached by splitting on the feature in the background sample
- features for which the instance to be explained and the reference follow the same path are assigned $0$ contribution.
#### Explaining loss functions
One advantage of the interventional approach is that it allows to approximately transform the shap values to account for nonlinear transformation of outputs, such as the loss function. Recall that given $\phi_i, ..., \phi_M$ the local accuracy property guarantees that given $\phi_0 = \mathbb{E}[f(x)]$
\begin{equation}\tag{3}
f(x) = \phi_0 + \sum \limits_{i=1}^M \phi_i.
\end{equation}
Hence, in order to account for the effect of the nonlinear transformation $h$, one has to find the functions $g_0, ..., g_M$ such that
\begin{equation}\tag{4}
h(f(x)) = g(\phi_0) + \sum \limits_{i=1}^M g_i(\phi_i)
\end{equation}
For simplicity, let $y=h(x)$. Then using a first-order Taylor series expansion around $\mathbb{E}[y]$ one obtains
\begin{equation}\tag{5}
h(y) \approx h(\mathbb{E}[y]) + \frac{\partial h(y) }{\partial y} \Bigr|_{y=\mathbb{E}[y]}(y - \mathbb{E}[y]).
\end{equation}
Substituting $(3)$ in $(5)$ and comparing coefficients with $(4)$ yields
\begin{equation*}
\begin{split}
g_0 & \approx h(\mathbb{E}[y]) \\
g_i &\approx \phi_i \frac{\partial h(y) }{\partial y} \Bigr|_{y=\mathbb{E}[y]} .
\end{split}
\end{equation*}
Hence, an approximate correction is given by simply scaling the shap values using the gradient of the nonlinear function. Note that in practice one may take the Taylor series expansion at a reference point $r$ from the background dataset and average over the entire background dataset to compute the scaling factor. This introduces an additional source of noise since $h(\mathbb{E}[y]) = \mathbb{E}[h(y)]$ only when $h$ is linear.
#### Computational complexity
For a single foreground and background sample and a single tree, the algorithm runs in $O(LD)$ time. Thus, using $R$ background samples and a model containing $T$ trees, yields a complexity of $O(TRLD)$.
### Path dependent feature perturbation
<a id='path_dependent'></a>
Another way to approximate equation $(2)$ to compute $f(S)$ given an instance $x$ and a set of missing features $\bar{S}$ is to recursively follow the decision path through the tree and:
- return the node value if a split on a feature $i \in S$ is performed
- take a weighted average of the values returned by children if $i \in \bar{S}$, where the weighing factor is equal to the proportion of training examples flowing down each branch. This proportion is a property of each node, sometimes referred to as _weight_ or _cover_ and measures how important is that node with regard to classifying the training data.
Therefore, in the path-dependent perturbation method, we compute the expectations with respect to the training data distribution by weighting the leaf values according to the proportion of the training examples that flow to that leaf.
To avoid repeating the above recursion $M2^M$ times, one first notices that for a single decision tree, applying a perturbation would result in the sample ending up in a different leaf. Therefore, following each path from the root to a leaf in the tree is equivalent to perturbing subsets of features of varying cardinalities. Consequently, each leaf will contain a certain proportion of all possible subsets $S \subseteq F$. Therefore, to compute the shap values, the following quantities are computed at each leaf, *for every feature $i$ on the path leading to that leaf*:
- the proportion of subsets $S$ at the leaf that contain $i$ and the proportion of subsets $S$ that do not contain $i$
- for each cardinality, the proportion of the sets of that cardinality contained at the leaf. Tracking each cardinality as opposed to a single count of subsets falling into a given leaf is necessary since it allows to apply the weighting factor in equation (1), which depends on the subset size, $|S|$.
This intuition can be summarised as follows:
\begin{equation}\tag{6}
\phi_i := \sum \limits_{j=1}^L \sum \limits_{P \in {S_j}} \frac {w(|P|, j)}{ M_j {\binom{M_j - 1}{|P|}}} (p_o^{i,j} - p_z^{i, j}) v_j
\end{equation}
where $S_j$ is the set of present feature subsets at leaf $j$, $M_j$ is the length of the path and $w(|P|, j)$ is the proportion of all subsets of cardinality $P$ at leaf $j$, $p_o^{i, j}$ and $p_z^{i, j}$ represent the fractions of subsets that contain or do not contain feature $i$ respectively.
#### Computational complexity
Using the above quantities, one can compute the _contribution_ of each leaf to the Shapley value of every feature. This algorithm has complexity $O(TLD^2)$ for an ensemble of trees where $L$ is the number of leaves, $T$ the number of trees in the ensemble and $D$ the maximum tree depth. If the tree is balanced, then $D=\log L$ and the complexity of our algorithm is $O(TL\log^2L)$
#### Expected value for the path-dependent perturbation algorithm
Note that although a background dataset is not provided, the expected value is computed using the node cover information, stored at each node. The computation proceeds recursively, starting at the root. The contribution of a node to the expected value of the tree is a function of the expected values of the children and is computed as follows:
$$
c_j = \frac{c_{r(j)}r_{r(j)} + c_{l(j)}r_{l(j)}}{r_j}
$$
where $j$ denotes the node index, $c_j$ denotes the node expected value, $r_j$ is the cover of the $j$th node and $r(j)$ and $l(j)$ represent the indices of the right and left children, respectively. The expected value used by the tree is simply $c_{root}$. Note that for tree ensembles, the expected values of the ensemble members is weighted according to the tree weight and the weighted expected values of all trees are summed to obtain a single value.
The cover depends on the objective function and the model chosen. For example, in a gradient boosted tree trained with squared loss objective, $r_j$ is simply the number of training examples flowing through $j$. For an arbitrary objective, this is the sum of the Hessian of the loss function evaluated at each point flowing through $j$, as explained [here](../examples/xgboost_model_fitting_adult.ipynb).
### Shapley interaction values
While the Shapley values provide a solution to the problem of allocating a function variation to the input features, in practice it might be of interest to understand how the importance of a feature depends on the other features. The Shapley interaction values can solve this problem, by allocating the change in the function amongst the individual features (*main effects*) and all pairs of features (*interaction effects*). Thus, they are defined as
\begin{equation}\tag{7}
\Phi_{i, j}(f, x) = \sum_{S \subseteq {F \setminus \{i, j\}}} \frac{1}{2|S| {\binom{M-1}{|S| - 1}}} \nabla_{ij}(f, x, S), \; i \neq j
\end{equation}
and
\begin{equation}\tag{8}
\nabla_{ij}(f, x, S) = \underbrace{f_{x}(S \cup \{i, j\}) - f_x(S \cup \{j\})}_{j \; present} - \underbrace{[f_x(S \cup \{i\}) - f_x(S)]}_{j \; not \; present}.
\end{equation}
Therefore, the interaction of features $i$ and $j$ can be computed by taking the difference between the shap values of $i$ when $j$ is present and when $j$ is not present. The main effects are defined as
$$
\Phi_{i,i}(f, x) = \phi_i(f, x) - \sum_{i \neq j} \Phi_{i, j}(f, x),
$$
Setting $\Phi_{0, 0} = f_x(\varnothing)$ yields the local accuracy property for Shapley interaction values:
$$f(x) = \sum \limits_{i=0}^M \sum \limits_{j=0}^M \Phi_{i, j}.(f, x) $$.
The interaction is split equally between feature $i$ and $j$, which is why the division by two appears in equation $(7)$. The total interaction effect is defined as $\Phi_{i, j}(f, x) + \Phi_{j, i}(f,x)$.
#### Computational complexity
According to equation $(8)$, the interaction values can be computed by applying either the interventional or path-dependent feature perturbation algorithm twice: once by fixing the value of feature $j$ to $x_j$ and computing the shapley value for feature $i$ in this configuration, and once by fixing $x_j$ to a "missing" value and performing the same computation. Thus, the interaction values can be computed in $O(TMLD^2)$ with the path-dependent perturbation algorithm and $O(TMLDR)$ with the interventional feature perturbation algorithm.
### Comparison to other methods
Tree-based models are widely used in areas where model interpretability is of interest because node-level statistics gathered from the training data can be used to provide insights into the behaviour of the model across the training dataset, providing a _global explanation_ technique. As shown in our [example](../examples/path_dependent_tree_shap_adult_xgb.ipynb), considering different statistics gives rise to different importance rankings. As discussed in [[1]](#References) and [[3]](#References), depending on the statistic chosen, feature importances derived from trees are not *consistent*, meaning that a model where a feature is known to have a bigger impact might fail to have a larger importance. As such, feature importances cannot be compared across models. In contrast, both the path-dependent and interventional perturbation algorithms tackle this limitation.
In contrast to feature importances derived from tree statistics, the Tree SHAP algorithms can also provide local explanations, allowing the identification of features that are globally "not important", but can affect specific outcomes significantly, as might be the case in healthcare applications. Additionally, it provides a means to succinctly summarise the effect magnitude and direction (positive or negative) across potentially large samples. Finally, as shown in [[1]](#References) (see [here](https://static-content.springer.com/esm/art%3A10.1038%2Fs42256-019-0138-9/MediaObjects/42256_2019_138_MOESM1_ESM.pdf), p. 26), averaging the instance-level shap values importance to derive a global score for each feature can result in improvements in feature selection tasks.
Another method to derive instance-level explanations for tree-based model has been proposed by Sabaas [here](https://github.com/andosa/treeinterpreter). This feature attribution method is similar in spirit to Shapley value, but does not account for the effect of variable order as explained [here](https://static-content.springer.com/esm/art%3A10.1038%2Fs42256-019-0138-9/MediaObjects/42256_2019_138_MOESM1_ESM.pdf) (pp. 10-11) as well as not satisfying consistency ([[3]](#References)).
Finally, both Tree SHAP algorithms exploit model structure to provide exact Shapley values computation albeit using different estimates for the effect of missing features, achieving explanations in low-order polynomial time. The KernelShap method relies on post-hoc (black-box) function modelling and approximations to approximate the same quantities and given enough samples has been shown to to the exact values (see experiments [here](https://static-content.springer.com/esm/art%3A10.1038%2Fs42256-019-0138-9/MediaObjects/42256_2019_138_MOESM1_ESM.pdf) and our [example](../examples/interventional_tree_shap_adult_xgb.ipynb)). Our Kernel SHAP [documentation](KernelSHAP.ipynb) provides comparisons of feature attribution methods based on Shapley values with other algorithms such as LIME and [anchors](Anchors.ipynb).
<a id='source_3'></a>
## References
<a id='References'></a>
[[1]](#source_1) Lundberg, S.M. and Lee, S.I., 2017. A unified approach to interpreting model predictions. In Advances in neural information processing systems (pp. 4765-4774).
[[2]](#source_2) Janzing, D., Minorics, L. and Blöbaum, P., 2019. Feature relevance quantification in explainable AI: A causality problem. arXiv preprint arXiv:1910.13413.
[[3]](#source_3) Lundberg, S.M., Erion, G.G. and Lee, S.I., 2018. Consistent individualized feature attribution for tree ensembles. arXiv preprint arXiv:1802.03888.
[[4]](#source_4) Chen, H., Lundberg, S.M. and Lee, S.I., 2018. Understanding Shapley value explanation algorithms for trees. Under review for publication in Distill, draft available [here](https://hughchen.github.io/its_blog/index.html).
## Examples
### Path-dependent Feature Perturbation Tree SHAP
[Explaing tree models with path-dependent feature perturbation Tree SHAP](../examples/path_dependent_tree_shap_adult_xgb.ipynb)
### Interventional Feature Perturbation Tree SHAP
[Explaing tree models with path-dependent feature perturbation Tree SHAP](../examples/interventional_tree_shap_adult_xgb.ipynb)
| true |
code
| 0.78206 | null | null | null | null |
|
# Using PyTorch with TensorRT through ONNX:
TensorRT is a great way to take a trained PyTorch model and optimize it to run more efficiently during inference on an NVIDIA GPU.
One approach to convert a PyTorch model to TensorRT is to export a PyTorch model to ONNX (an open format exchange for deep learning models) and then convert into a TensorRT engine. Essentially, we will follow this path to convert and deploy our model:

Both TensorFlow and PyTorch models can be exported to ONNX, as well as many other frameworks. This allows models created using either framework to flow into common downstream pipelines.
To get started, let's take a well-known computer vision model and follow five key steps to deploy it to the TensorRT Python runtime:
1. __What format should I save my model in?__
2. __What batch size(s) am I running inference at?__
3. __What precision am I running inference at?__
4. __What TensorRT path am I using to convert my model?__
5. __What runtime am I targeting?__
## 1. What format should I save my model in?
We are going to use ResNet50, a widely used CNN architecture first described in <a href=https://arxiv.org/abs/1512.03385>this paper</a>.
Let's start by loading dependencies and downloading the model:
```
import torchvision.models as models
import torch
import torch.onnx
# load the pretrained model
resnet50 = models.resnet50(pretrained=True, progress=False)
```
Next, we will select our batch size and export the model:
```
# set up a dummy input tensor and export the model to ONNX
BATCH_SIZE = 32
dummy_input=torch.randn(BATCH_SIZE, 3, 224, 224)
torch.onnx.export(resnet50, dummy_input, "resnet50_pytorch.onnx", verbose=False)
```
Note that we are picking a BATCH_SIZE of 4 in this example.
Let's use a benchmarking function included in this guide to time this model:
```
from benchmark import benchmark
resnet50.to("cuda").eval()
benchmark(resnet50)
```
Now, let's restart our Jupyter Kernel so PyTorch doesn't collide with TensorRT:
```
import os
os._exit(0) # Shut down all kernels so TRT doesn't fight with PyTorch for GPU memory
```
## 2. What batch size(s) am I running inference at?
We are going to run with a fixed batch size of 4 for this example. Note that above we set BATCH_SIZE to 4 when saving our model to ONNX. We need to create another dummy batch of the same size (this time it will need to be in our target precision) to test out our engine.
First, as before, we will set our BATCH_SIZE to 4. Note that our trtexec command above includes the '--explicitBatch' flag to signal to TensorRT that we will be using a fixed batch size at runtime.
```
BATCH_SIZE = 32
```
Importantly, by default TensorRT will use the input precision you give the runtime as the default precision for the rest of the network. So before we create our new dummy batch, we also need to choose a precision as in the next section:
## 3. What precision am I running inference at?
Remember that lower precisions than FP32 tend to run faster. There are two common reduced precision modes - FP16 and INT8. Graphics cards that are designed to do inference well often have an affinity for one of these two types. This guide was developed on an NVIDIA V100, which favors FP16, so we will use that here by default. INT8 is a more complicated process that requires a calibration step.
```
import numpy as np
USE_FP16 = True
target_dtype = np.float16 if USE_FP16 else np.float32
dummy_input_batch = np.zeros((BATCH_SIZE, 224, 224, 3), dtype = np.float32)
```
## 4. What TensorRT path am I using to convert my model?
We can use trtexec, a command line tool for working with TensorRT, in order to convert an ONNX model originally from PyTorch to an engine file.
Let's make sure we have TensorRT installed (this comes with trtexec):
```
import tensorrt
```
To convert the model we saved in the previous step, we need to point to the ONNX file, give trtexec a name to save the engine as, and last specify that we want to use a fixed batch size instead of a dynamic one.
```
# step out of Python for a moment to convert the ONNX model to a TRT engine using trtexec
if USE_FP16:
!trtexec --onnx=resnet50_pytorch.onnx --saveEngine=resnet_engine_pytorch.trt --explicitBatch --fp16
else:
!trtexec --onnx=resnet50_pytorch.onnx --saveEngine=resnet_engine_pytorch.trt --explicitBatch
```
This will save our model as 'resnet_engine.trt'.
## 5. What TensorRT runtime am I targeting?
Now, we have a converted our model to a TensorRT engine. Great! That means we are ready to load it into the native Python TensorRT runtime. This runtime strikes a balance between the ease of use of the high level Python APIs used in frameworks and the fast, low level C++ runtimes available in TensorRT.
```
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
f = open("resnet_engine_pytorch.trt", "rb")
runtime = trt.Runtime(trt.Logger(trt.Logger.WARNING))
engine = runtime.deserialize_cuda_engine(f.read())
context = engine.create_execution_context()
```
Now allocate input and output memory, give TRT pointers (bindings) to it:
```
# need to set input and output precisions to FP16 to fully enable it
output = np.empty([BATCH_SIZE, 1000], dtype = target_dtype)
# allocate device memory
d_input = cuda.mem_alloc(1 * dummy_input_batch.nbytes)
d_output = cuda.mem_alloc(1 * output.nbytes)
bindings = [int(d_input), int(d_output)]
stream = cuda.Stream()
```
Next, set up the prediction function.
This involves a copy from CPU RAM to GPU VRAM, executing the model, then copying the results back from GPU VRAM to CPU RAM:
```
def predict(batch): # result gets copied into output
# transfer input data to device
cuda.memcpy_htod_async(d_input, batch, stream)
# execute model
context.execute_async_v2(bindings, stream.handle, None)
# transfer predictions back
cuda.memcpy_dtoh_async(output, d_output, stream)
# syncronize threads
stream.synchronize()
return output
```
Finally, let's time the function!
Note that we're going to include the extra CPU-GPU copy time in this evaluation, so it won't be directly comparable with our TRTorch model performance as it also includes additional overhead.
```
print("Warming up...")
predict(dummy_input_batch)
print("Done warming up!")
%%timeit
pred = predict(dummy_input_batch)
```
However, even with the CPU-GPU copy, this is still faster than our raw PyTorch model!
## Next Steps:
<h4> Profiling </h4>
This is a great next step for further optimizing and debugging models you are working on productionizing
You can find it here: https://docs.nvidia.com/deeplearning/tensorrt/best-practices/index.html
<h4> TRT Dev Docs </h4>
Main documentation page for the ONNX, layer builder, C++, and legacy APIs
You can find it here: https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html
<h4> TRT OSS GitHub </h4>
Contains OSS TRT components, sample applications, and plugin examples
You can find it here: https://github.com/NVIDIA/TensorRT
#### TRT Supported Layers:
https://github.com/NVIDIA/TensorRT/tree/master/samples/opensource/samplePlugin
#### TRT ONNX Plugin Example:
https://docs.nvidia.com/deeplearning/tensorrt/support-matrix/index.html#layers-precision-matrix
| true |
code
| 0.682693 | null | null | null | null |
|
DIFAX Replication
=================
This example replicates the traditional DIFAX images for upper-level
observations.
By: Kevin Goebbert
Observation data comes from Iowa State Archive, accessed through the
Siphon package. Contour data comes from the GFS 0.5 degree analysis.
Classic upper-level data of Geopotential Height and Temperature are
plotted.
```
import urllib.request
from datetime import datetime, timedelta
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import metpy.calc as mpcalc
import numpy as np
import xarray as xr
from metpy.plots import StationPlot
from metpy.units import units
from siphon.simplewebservice.iastate import IAStateUpperAir
```
Plotting High/Low Symbols
-------------------------
A helper function to plot a text symbol (e.g., H, L) for relative
maximum/minimum for a given field (e.g., geopotential height).
```
def plot_maxmin_points(lon, lat, data, extrema, nsize, symbol, color='k',
plotValue=True, transform=None):
"""
This function will find and plot relative maximum and minimum for a 2D grid. The function
can be used to plot an H for maximum values (e.g., High pressure) and an L for minimum
values (e.g., low pressue). It is best to used filetered data to obtain a synoptic scale
max/min value. The symbol text can be set to a string value and optionally the color of the
symbol and any plotted value can be set with the parameter color.
Parameters
----------
lon : 2D array
Plotting longitude values
lat : 2D array
Plotting latitude values
data : 2D array
Data that you wish to plot the max/min symbol placement
extrema : str
Either a value of max for Maximum Values or min for Minimum Values
nsize : int
Size of the grid box to filter the max and min values to plot a reasonable number
symbol : str
Text to be placed at location of max/min value
color : str
Name of matplotlib colorname to plot the symbol (and numerical value, if plotted)
plot_value : Boolean (True/False)
Whether to plot the numeric value of max/min point
Return
------
The max/min symbol will be plotted on the current axes within the bounding frame
(e.g., clip_on=True)
"""
from scipy.ndimage.filters import maximum_filter, minimum_filter
if (extrema == 'max'):
data_ext = maximum_filter(data, nsize, mode='nearest')
elif (extrema == 'min'):
data_ext = minimum_filter(data, nsize, mode='nearest')
else:
raise ValueError('Value for hilo must be either max or min')
if lon.ndim == 1:
lon, lat = np.meshgrid(lon, lat)
mxx, mxy = np.where(data_ext == data)
for i in range(len(mxy)):
ax.text(lon[mxx[i], mxy[i]], lat[mxx[i], mxy[i]], symbol, color=color, size=36,
clip_on=True, horizontalalignment='center', verticalalignment='center',
transform=transform)
ax.text(lon[mxx[i], mxy[i]], lat[mxx[i], mxy[i]],
'\n' + str(np.int(data[mxx[i], mxy[i]])),
color=color, size=12, clip_on=True, fontweight='bold',
horizontalalignment='center', verticalalignment='top', transform=transform)
ax.plot(lon[mxx[i], mxy[i]], lat[mxx[i], mxy[i]], marker='o', markeredgecolor='black',
markerfacecolor='white', transform=transform)
ax.plot(lon[mxx[i], mxy[i]], lat[mxx[i], mxy[i]],
marker='x', color='black', transform=transform)
```
Station Information
-------------------
A helper function for obtaining radiosonde station information (e.g.,
latitude/longitude) requried to plot data obtained from each station.
Original code by github user sgdecker.
```
def station_info(stid):
r"""Provide information about weather stations.
Parameters
----------
stid: str or iterable object containing strs
The ICAO or IATA code(s) for which station information is requested.
with_units: bool
Whether to include units for values that have them. Default True.
Returns
-------
info: dict
Information about the station(s) within a dictionary with these keys:
'state': Two-character ID of the state/province where the station is located,
if applicable
'name': The name of the station
'lat': The latitude of the station [deg]
'lon': The longitude of the station [deg]
'elevation': The elevation of the station [m]
'country': Two-character ID of the country where the station is located
Modified code from Steven Decker, Rutgers University
"""
# Provide a helper function for later usage
def str2latlon(s):
deg = float(s[:3])
mn = float(s[-3:-1])
if s[-1] == 'S' or s[-1] == 'W':
deg = -deg
mn = -mn
return deg + mn / 60.
# Various constants describing the underlying data
url = 'https://www.aviationweather.gov/docs/metar/stations.txt'
# file = 'stations.txt'
state_bnds = slice(0, 2)
name_bnds = slice(3, 19)
icao_bnds = slice(20, 24)
iata_bnds = slice(26, 29)
lat_bnds = slice(39, 45)
lon_bnds = slice(47, 54)
z_bnds = slice(55, 59)
cntry_bnds = slice(81, 83)
# Generalize to any number of IDs
if isinstance(stid, str):
stid = [stid]
# Get the station dataset
infile = urllib.request.urlopen(url)
data = infile.readlines()
# infile = open(file, 'rb')
# data = infile.readlines()
state = []
name = []
lat = []
lon = []
z = []
cntry = []
for s in stid:
s = s.upper()
for line_bytes in data:
line = line_bytes.decode('UTF-8')
icao = line[icao_bnds]
iata = line[iata_bnds]
if len(s) == 3 and s in iata or len(s) == 4 and s in icao:
state.append(line[state_bnds].strip())
name.append(line[name_bnds].strip())
lat.append(str2latlon(line[lat_bnds]))
lon.append(str2latlon(line[lon_bnds]))
z.append(float(line[z_bnds]))
cntry.append(line[cntry_bnds])
break
else:
state.append('NA')
name.append('NA')
lat.append(np.nan)
lon.append(np.nan)
z.append(np.nan)
cntry.append('NA')
infile.close()
return {'state': np.array(state), 'name': np.array(name), 'lat': np.array(lat),
'lon': np.array(lon), 'elevation': np.array(z), 'country': np.array(cntry),
'units': {'lat': 'deg', 'lon': 'deg', 'z': 'm'}}
```
Observation Data
----------------
Set a date and time for upper-air observations (should only be 00 or 12
UTC for the hour).
Request all data from Iowa State using the Siphon package. The result is
a pandas DataFrame containing all of the sounding data from all
available stations.
```
# Set date for desired UPA data
today = datetime.utcnow()
# Go back one day to ensure data availability
date = datetime(today.year, today.month, today.day, 0) - timedelta(days=1)
# Request data using Siphon request for data from Iowa State Archive
data = IAStateUpperAir.request_all_data(date)
```
Subset Observational Data
-------------------------
From the request above will give all levels from all radisonde sites
available through the service. For plotting a pressure surface map there
is only need to have the data from that level. Below the data is subset
and a few parameters set based on the level chosen. Additionally, the
station information is obtained and latitude and longitude data is added
to the DataFrame.
```
level = 500
if (level == 925) | (level == 850) | (level == 700):
cint = 30
def hght_format(v): return format(v, '.0f')[1:]
elif level == 500:
cint = 60
def hght_format(v): return format(v, '.0f')[:3]
elif level == 300:
cint = 120
def hght_format(v): return format(v, '.0f')[:3]
elif level < 300:
cint = 120
def hght_format(v): return format(v, '.0f')[1:4]
# Create subset of all data for a given level
data_subset = data.pressure == level
df = data[data_subset]
# Get station lat/lon from look-up file; add to Dataframe
stn_info = station_info(list(df.station.values))
df.insert(10, 'latitude', stn_info['lat'])
df.insert(11, 'longitude', stn_info['lon'])
```
Gridded Data
------------
Obtain GFS gridded output for contour plotting. Specifically,
geopotential height and temperature data for the given level and subset
for over North America. Data are smoothed for aesthetic reasons.
```
# Get GFS data and subset to North America for Geopotential Height and Temperature
ds = xr.open_dataset('https://thredds.ucar.edu/thredds/dodsC/grib/NCEP/GFS/Global_0p5deg_ana/'
'GFS_Global_0p5deg_ana_{0:%Y%m%d}_{0:%H}00.grib2'.format(
date)).metpy.parse_cf()
# Geopotential height and smooth
hght = ds.Geopotential_height_isobaric.metpy.sel(
vertical=level*units.hPa, time=date, lat=slice(70, 15), lon=slice(360-145, 360-50))
smooth_hght = mpcalc.smooth_n_point(hght, 9, 10)
# Temperature, smooth, and convert to Celsius
tmpk = ds.Temperature_isobaric.metpy.sel(
vertical=level*units.hPa, time=date, lat=slice(70, 15), lon=slice(360-145, 360-50))
smooth_tmpc = (mpcalc.smooth_n_point(tmpk, 9, 10)).to('degC')
```
Create DIFAX Replication
------------------------
Plot the observational data and contours on a Lambert Conformal map and
add features that resemble the historic DIFAX maps.
```
# Set up map coordinate reference system
mapcrs = ccrs.LambertConformal(
central_latitude=45, central_longitude=-100, standard_parallels=(30, 60))
# Set up station locations for plotting observations
point_locs = mapcrs.transform_points(
ccrs.PlateCarree(), df['longitude'].values, df['latitude'].values)
# Start figure and set graphics extent
fig = plt.figure(1, figsize=(17, 15))
ax = plt.subplot(111, projection=mapcrs)
ax.set_extent([-125, -70, 20, 55])
# Add map features for geographic reference
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), edgecolor='grey')
ax.add_feature(cfeature.LAND.with_scale('50m'), facecolor='white')
ax.add_feature(cfeature.STATES.with_scale('50m'), edgecolor='grey')
# Plot plus signs every degree lat/lon
plus_lat = []
plus_lon = []
other_lat = []
other_lon = []
for x in hght.lon.values[::2]:
for y in hght.lat.values[::2]:
if (x % 5 == 0) | (y % 5 == 0):
plus_lon.append(x)
plus_lat.append(y)
else:
other_lon.append(x)
other_lat.append(y)
ax.scatter(other_lon, other_lat, s=5, marker='o',
transform=ccrs.PlateCarree(), color='lightgrey', zorder=-1)
ax.scatter(plus_lon, plus_lat, s=30, marker='+',
transform=ccrs.PlateCarree(), color='lightgrey', zorder=-1)
# Add gridlines for every 5 degree lat/lon
ax.gridlines(linestyle='solid', ylocs=range(15, 71, 5), xlocs=range(-150, -49, 5))
# Start the station plot by specifying the axes to draw on, as well as the
# lon/lat of the stations (with transform). We also the fontsize to 10 pt.
stationplot = StationPlot(ax, df['longitude'].values, df['latitude'].values, clip_on=True,
transform=ccrs.PlateCarree(), fontsize=10)
# Plot the temperature and dew point to the upper and lower left, respectively, of
# the center point.
stationplot.plot_parameter('NW', df['temperature'], color='black')
stationplot.plot_parameter('SW', df['dewpoint'], color='black')
# A more complex example uses a custom formatter to control how the geopotential height
# values are plotted. This is set in an earlier if-statement to work appropriate for
# different levels.
stationplot.plot_parameter('NE', df['height'], formatter=hght_format)
# Add wind barbs
stationplot.plot_barb(df['u_wind'], df['v_wind'], length=7, pivot='tip')
# Plot Solid Contours of Geopotential Height
cs = ax.contour(hght.lon, hght.lat, smooth_hght,
range(0, 20000, cint), colors='black', transform=ccrs.PlateCarree())
clabels = plt.clabel(cs, fmt='%d', colors='white', inline_spacing=5, use_clabeltext=True)
# Contour labels with black boxes and white text
for t in cs.labelTexts:
t.set_bbox({'facecolor': 'black', 'pad': 4})
t.set_fontweight('heavy')
# Plot Dashed Contours of Temperature
cs2 = ax.contour(hght.lon, hght.lat, smooth_tmpc, range(-60, 51, 5),
colors='black', transform=ccrs.PlateCarree())
clabels = plt.clabel(cs2, fmt='%d', colors='white', inline_spacing=5, use_clabeltext=True)
# Set longer dashes than default
for c in cs2.collections:
c.set_dashes([(0, (5.0, 3.0))])
# Contour labels with black boxes and white text
for t in cs.labelTexts:
t.set_bbox({'facecolor': 'black', 'pad': 4})
t.set_fontweight('heavy')
# Plot filled circles for Radiosonde Obs
ax.scatter(df['longitude'].values, df['latitude'].values, s=12,
marker='o', color='black', transform=ccrs.PlateCarree())
# Use definition to plot H/L symbols
plot_maxmin_points(hght.lon, hght.lat, smooth_hght.m, 'max', 50,
symbol='H', color='black', transform=ccrs.PlateCarree())
plot_maxmin_points(hght.lon, hght.lat, smooth_hght.m, 'min', 25,
symbol='L', color='black', transform=ccrs.PlateCarree())
# Add titles
plt.title('Upper-air Observations at {}-hPa Analysis Heights/Temperature'.format(level),
loc='left')
plt.title(f'Valid: {date}', loc='right');
```
| true |
code
| 0.679764 | null | null | null | null |
|
# Description
This notebook contains the interpretation of a cluster (which features/latent variables in the original data are useful to distinguish traits in the cluster).
See section [LV analysis](#lv_analysis) below
# Modules loading
```
%load_ext autoreload
%autoreload 2
import pickle
import re
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import HTML
from clustering.methods import ClusterInterpreter
from data.recount2 import LVAnalysis
from data.cache import read_data
import conf
```
# Settings
```
PARTITION_K = None
PARTITION_CLUSTER_ID = None
```
# Load MultiPLIER summary
```
multiplier_model_summary = read_data(conf.MULTIPLIER["MODEL_SUMMARY_FILE"])
multiplier_model_summary.shape
multiplier_model_summary.head()
```
# Load data
## Original data
```
INPUT_SUBSET = "z_score_std"
INPUT_STEM = "projection-smultixcan-efo_partial-mashr-zscores"
input_filepath = Path(
conf.RESULTS["DATA_TRANSFORMATIONS_DIR"],
INPUT_SUBSET,
f"{INPUT_SUBSET}-{INPUT_STEM}.pkl",
).resolve()
display(input_filepath)
assert input_filepath.exists(), "Input file does not exist"
input_filepath_stem = input_filepath.stem
display(input_filepath_stem)
data = pd.read_pickle(input_filepath)
data.shape
data.head()
```
## Clustering partitions
```
CONSENSUS_CLUSTERING_DIR = Path(
conf.RESULTS["CLUSTERING_DIR"], "consensus_clustering"
).resolve()
display(CONSENSUS_CLUSTERING_DIR)
input_file = Path(CONSENSUS_CLUSTERING_DIR, "best_partitions_by_k.pkl").resolve()
display(input_file)
best_partitions = pd.read_pickle(input_file)
best_partitions.shape
best_partitions.head()
```
# Functions
```
def show_cluster_stats(data, partition, cluster):
cluster_traits = data[partition == cluster].index
display(f"Cluster '{cluster}' has {len(cluster_traits)} traits")
display(cluster_traits)
```
# LV analysis
<a id="lv_analysis"></a>
## Associated traits
```
display(best_partitions.loc[PARTITION_K])
part = best_partitions.loc[PARTITION_K, "partition"]
show_cluster_stats(data, part, PARTITION_CLUSTER_ID)
```
## Associated latent variables
```
ci = ClusterInterpreter(
threshold=1.0,
max_features=20,
max_features_to_explore=100,
)
ci.fit(data, part, PARTITION_CLUSTER_ID)
ci.features_
# save interpreter instance
output_dir = Path(
conf.RESULTS["CLUSTERING_INTERPRETATION"]["BASE_DIR"],
"cluster_lvs",
f"part{PARTITION_K}",
)
output_dir.mkdir(exist_ok=True, parents=True)
output_file = Path(
output_dir, f"cluster_interpreter-part{PARTITION_K}_k{PARTITION_CLUSTER_ID}.pkl"
)
display(output_file)
ci.features_.to_pickle(output_file)
```
## Top attributes
Here we go through the list of associated latent variables and, for each, we show associated pathways (prior knowledge), top traits, top genes and the top tissues/cell types where those genes are expressed.
```
for lv_idx, lv_info in ci.features_.iterrows():
display(HTML(f"<h2>LV{lv_idx}</h2>"))
lv_name = lv_info["name"]
lv_obj = lv_exp = LVAnalysis(lv_name, data)
# show lv prior knowledge match (pathways)
lv_pathways = multiplier_model_summary[
multiplier_model_summary["LV index"].isin((lv_name[2:],))
& (
(multiplier_model_summary["FDR"] < 0.05)
| (multiplier_model_summary["AUC"] >= 0.75)
)
]
display(lv_pathways)
lv_data = lv_obj.get_experiments_data()
display("")
display(lv_obj.lv_traits.head(20))
display("")
display(lv_obj.lv_genes.head(10))
lv_attrs = lv_obj.get_attributes_variation_score()
_tmp = pd.Series(lv_attrs.index)
lv_attrs = lv_attrs[
_tmp.str.match(
"(?:cell.+type$)|(?:tissue$)|(?:tissue.+type$)",
case=False,
flags=re.IGNORECASE,
).values
].sort_values(ascending=False)
display(lv_attrs)
for _lva in lv_attrs.index:
display(HTML(f"<h3>{_lva}</h3>"))
display(lv_data[_lva].dropna().reset_index()["project"].unique())
with sns.plotting_context("paper", font_scale=1.0), sns.axes_style("whitegrid"):
fig, ax = plt.subplots(figsize=(14, 8))
ax = lv_obj.plot_attribute(_lva, top_x_values=20)
if ax is None:
plt.close(fig)
continue
display(fig)
plt.close(fig)
```
| true |
code
| 0.561816 | null | null | null | null |
|
# Lecture 55: Adversarial Autoencoder for Classification
## Load Packages
```
%matplotlib inline
import os
import math
import torch
import itertools
import torch.nn as nn
import torch.optim as optim
from IPython import display
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torchvision.datasets as dsets
import torchvision.transforms as transforms
print(torch.__version__) # This code has been updated for PyTorch 1.0.0
```
## Load Data
```
# MNIST Dataset
dataset = dsets.MNIST(root='./MNIST', train=True, transform=transforms.ToTensor(), download=True)
testset = dsets.MNIST(root='./MNIST', train=False, transform=transforms.ToTensor(), download=True)
# Data Loader (Input Pipeline)
data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=100, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=testset, batch_size=100, shuffle=False)
# Check availability of GPU
use_gpu = torch.cuda.is_available()
# use_gpu = False # Uncomment in case of GPU memory error
if use_gpu:
print('GPU is available!')
device = "cuda"
else:
print('GPU is not available!')
device = "cpu"
```
## Defining network architecture
```
#Encoder
class Q_net(nn.Module):
def __init__(self,X_dim,N,z_dim):
super(Q_net, self).__init__()
self.lin1 = nn.Linear(X_dim, N)
self.lin2 = nn.Linear(N, N)
self.lin3gauss = nn.Linear(N, z_dim)
def forward(self, x):
x = F.dropout(self.lin1(x), p=0.25, training=self.training)
x = F.relu(x)
x = F.dropout(self.lin2(x), p=0.25, training=self.training)
x = F.relu(x)
x = self.lin3gauss(x)
return x
# Decoder
class P_net(nn.Module):
def __init__(self,X_dim,N,z_dim):
super(P_net, self).__init__()
self.lin1 = nn.Linear(z_dim, N)
self.lin2 = nn.Linear(N, N)
self.lin3 = nn.Linear(N, X_dim)
def forward(self, x):
x = F.dropout(self.lin1(x), p=0.25, training=self.training)
x = F.relu(x)
x = F.dropout(self.lin2(x), p=0.25, training=self.training)
x = self.lin3(x)
return torch.sigmoid(x)
# Discriminator
class D_net_gauss(nn.Module):
def __init__(self,N,z_dim):
super(D_net_gauss, self).__init__()
self.lin1 = nn.Linear(z_dim, N)
self.lin2 = nn.Linear(N, N)
self.lin3 = nn.Linear(N, 1)
def forward(self, x):
x = F.dropout(self.lin1(x), p=0.2, training=self.training)
x = F.relu(x)
x = F.dropout(self.lin2(x), p=0.2, training=self.training)
x = F.relu(x)
return torch.sigmoid(self.lin3(x))
```
## Define optimizer
```
z_red_dims = 100
Q = Q_net(784,1000,z_red_dims).to(device)
P = P_net(784,1000,z_red_dims).to(device)
D_gauss = D_net_gauss(500,z_red_dims).to(device)
# Set learning rates
gen_lr = 0.0001
reg_lr = 0.00005
#encode/decode optimizers
optim_P = optim.Adam(P.parameters(), lr=gen_lr)
optim_Q_enc = optim.Adam(Q.parameters(), lr=gen_lr)
#regularizing optimizers
optim_Q_gen = optim.Adam(Q.parameters(), lr=reg_lr)
optim_D = optim.Adam(D_gauss.parameters(), lr=reg_lr)
```
## Test Data
```
num_test_samples = 100
test_noise = torch.randn(num_test_samples,z_red_dims).to(device)
```
## Training
```
# create figure for plotting
size_figure_grid = int(math.sqrt(num_test_samples))
fig, ax = plt.subplots(size_figure_grid, size_figure_grid, figsize=(6, 6))
for i, j in itertools.product(range(size_figure_grid), range(size_figure_grid)):
ax[i,j].get_xaxis().set_visible(False)
ax[i,j].get_yaxis().set_visible(False)
data_iter = iter(data_loader)
iter_per_epoch = len(data_loader)
total_step = 5#5000
# Start training
for step in range(total_step):
# Reset the data_iter
if (step+1) % iter_per_epoch == 0:
data_iter = iter(data_loader)
# Fetch the images and labels and convert them to variables
images, labels = next(data_iter)
images, labels = images.view(images.size(0), -1).to(device), labels.to(device)
#reconstruction loss
P.zero_grad()
Q.zero_grad()
D_gauss.zero_grad()
z_sample = Q(images) #encode to z
X_sample = P(z_sample) #decode to X reconstruction
recon_loss = F.binary_cross_entropy(X_sample,images)
recon_loss.backward()
optim_P.step()
optim_Q_enc.step()
# Discriminator
## true prior is random normal (randn)
## this is constraining the Z-projection to be normal!
Q.eval()
z_real_gauss = torch.randn(images.size()[0], z_red_dims).to(device)
D_real_gauss = D_gauss(z_real_gauss)
z_fake_gauss = Q(images)
D_fake_gauss = D_gauss(z_fake_gauss)
D_loss = -torch.mean(torch.log(D_real_gauss) + torch.log(1 - D_fake_gauss))
D_loss.backward()
optim_D.step()
# Generator
Q.train()
z_fake_gauss = Q(images)
D_fake_gauss = D_gauss(z_fake_gauss)
G_loss = -torch.mean(torch.log(D_fake_gauss))
G_loss.backward()
optim_Q_gen.step()
P.eval()
test_images = P(test_noise)
P.train()
if use_gpu:
test_images = test_images.cpu().detach()
for k in range(num_test_samples):
i = k//10
j = k%10
ax[i,j].cla()
ax[i,j].imshow(test_images[k,:].numpy().reshape(28, 28), cmap='Greys')
display.clear_output(wait=True)
display.display(plt.gcf())
```
## Classifier
```
#Encoder
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
self.l1 = Q
self.l2 = nn.Linear(100,10)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
return x
net = Classifier().to(device)
print(net)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=1e-4)
```
## Training
```
iterations = 10
for epoch in range(iterations): # loop over the dataset multiple times
runningLoss = 0.0
for i, data in enumerate(data_loader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.view(inputs.size(0), -1).to(device), labels.to(device)
net.train()
optimizer.zero_grad() # zeroes the gradient buffers of all parameters
outputs = net(inputs) # forward
loss = criterion(outputs, labels) # calculate loss
loss.backward() # backpropagate the loss
optimizer.step()
correct = 0
total = 0
net.eval()
with torch.no_grad():
for data in test_loader:
inputs, labels = data
inputs, labels = inputs.view(inputs.size(0), -1).to(device), labels.to(device)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.data).sum()
print('At Iteration : %d / %d ;Test Accuracy : %f'%(epoch + 1,iterations,100 * float(correct) /float(total)))
print('Finished Training')
```
| true |
code
| 0.821778 | null | null | null | null |
|
# Introduction to Linear Algebra
This is a tutorial designed to introduce you to the basics of linear algebra.
Linear algebra is a branch of mathematics dedicated to studying the properties of matrices and vectors,
which are used extensively in quantum computing to represent quantum states and operations on them.
This tutorial doesn't come close to covering the full breadth of the topic, but it should be enough to get you comfortable with the main concepts of linear algebra used in quantum computing.
This tutorial assumes familiarity with complex numbers; if you need a review of this topic, we recommend that you complete the [Complex Arithmetic](../ComplexArithmetic/ComplexArithmetic.ipynb) tutorial before tackling this one.
This tutorial covers the following topics:
* Matrices and vectors
* Basic matrix operations
* Operations and properties of complex matrices
* Inner and outer vector products
* Tensor product
* Eigenvalues and eigenvectors
If you need to look up some formulas quickly, you can find them in [this cheatsheet](https://github.com/microsoft/QuantumKatas/blob/main/quickref/qsharp-quick-reference.pdf).
This notebook has several tasks that require you to write Python code to test your understanding of the concepts. If you are not familiar with Python, [here](https://docs.python.org/3/tutorial/index.html) is a good introductory tutorial for it.
> The exercises use Python's built-in representation of complex numbers. Most of the operations (addition, multiplication, etc.) work as you expect them to. Here are a few notes on Python-specific syntax:
>
> * If `z` is a complex number, `z.real` is the real component, and `z.imag` is the coefficient of the imaginary component.
> * To represent an imaginary number, put `j` after a real number: $3.14i$ would be `3.14j`.
> * To represent a complex number, simply add a real number and an imaginary number.
> * The built-in function `abs` computes the modulus of a complex number.
>
> You can find more information in the [official documentation](https://docs.python.org/3/library/cmath.html).
Let's start by importing some useful mathematical functions and constants, and setting up a few things necessary for testing the exercises. **Do not skip this step.**
Click the cell with code below this block of text and press `Ctrl+Enter` (`⌘+Enter` on Mac).
```
# Run this cell using Ctrl+Enter (⌘+Enter on Mac).
from testing import exercise, create_empty_matrix
from typing import List
import math, cmath
Matrix = List[List[complex]]
```
# Part I. Matrices and Basic Operations
## Matrices and Vectors
A **matrix** is set of numbers arranged in a rectangular grid. Here is a $2$ by $2$ matrix:
$$A =
\begin{bmatrix} 1 & 2 \\ 3 & 4 \end{bmatrix}$$
$A_{i,j}$ refers to the element in row $i$ and column $j$ of matrix $A$ (all indices are 0-based). In the above example, $A_{0,1} = 2$.
An $n \times m$ matrix will have $n$ rows and $m$ columns, like so:
$$\begin{bmatrix}
x_{0,0} & x_{0,1} & \dotsb & x_{0,m-1} \\
x_{1,0} & x_{1,1} & \dotsb & x_{1,m-1} \\
\vdots & \vdots & \ddots & \vdots \\
x_{n-1,0} & x_{n-1,1} & \dotsb & x_{n-1,m-1}
\end{bmatrix}$$
A $1 \times 1$ matrix is equivalent to a scalar:
$$\begin{bmatrix} 3 \end{bmatrix} = 3$$
Quantum computing uses complex-valued matrices: the elements of a matrix can be complex numbers. This, for example, is a valid complex-valued matrix:
$$\begin{bmatrix}
1 & i \\
-2i & 3 + 4i
\end{bmatrix}$$
Finally, a **vector** is an $n \times 1$ matrix. Here, for example, is a $3 \times 1$ vector:
$$V = \begin{bmatrix} 1 \\ 2i \\ 3 + 4i \end{bmatrix}$$
Since vectors always have a width of $1$, vector elements are sometimes written using only one index. In the above example, $V_0 = 1$ and $V_1 = 2i$.
## Matrix Addition
The easiest matrix operation is **matrix addition**. Matrix addition works between two matrices of the same size, and adds each number from the first matrix to the number in the same position in the second matrix:
$$\begin{bmatrix}
x_{0,0} & x_{0,1} & \dotsb & x_{0,m-1} \\
x_{1,0} & x_{1,1} & \dotsb & x_{1,m-1} \\
\vdots & \vdots & \ddots & \vdots \\
x_{n-1,0} & x_{n-1,1} & \dotsb & x_{n-1,m-1}
\end{bmatrix}
+
\begin{bmatrix}
y_{0,0} & y_{0,1} & \dotsb & y_{0,m-1} \\
y_{1,0} & y_{1,1} & \dotsb & y_{1,m-1} \\
\vdots & \vdots & \ddots & \vdots \\
y_{n-1,0} & y_{n-1,1} & \dotsb & y_{n-1,m-1}
\end{bmatrix}
=
\begin{bmatrix}
x_{0,0} + y_{0,0} & x_{0,1} + y_{0,1} & \dotsb & x_{0,m-1} + y_{0,m-1} \\
x_{1,0} + y_{1,0} & x_{1,1} + y_{1,1} & \dotsb & x_{1,m-1} + y_{1,m-1} \\
\vdots & \vdots & \ddots & \vdots \\
x_{n-1,0} + y_{n-1,0} & x_{n-1,1} + y_{n-1,1} & \dotsb & x_{n-1,m-1} + y_{n-1,m-1}
\end{bmatrix}$$
Similarly, we can compute $A - B$ by subtracting elements of $B$ from corresponding elements of $A$.
Matrix addition has the following properties:
* Commutativity: $A + B = B + A$
* Associativity: $(A + B) + C = A + (B + C)$
### <span style="color:blue">Exercise 1</span>: Matrix addition.
**Inputs:**
1. An $n \times m$ matrix $A$, represented as a two-dimensional list.
2. An $n \times m$ matrix $B$, represented as a two-dimensional list.
**Output:** Return the sum of the matrices $A + B$ - an $n \times m$ matrix, represented as a two-dimensional list.
> When representing matrices as lists, each sub-list represents a row.
>
> For example, list `[[1, 2], [3, 4]]` represents the following matrix:
>
> $$\begin{bmatrix}
1 & 2 \\
3 & 4
\end{bmatrix}$$
Fill in the missing code and run the cell below to test your work.
<br/>
<details>
<summary><b>Need a hint? Click here</b></summary>
A video explanation can be found <a href="https://www.youtube.com/watch?v=WR9qCSXJlyY">here</a>.
</details>
```
@exercise
def matrix_add(a : Matrix, b : Matrix) -> Matrix:
# You can get the size of a matrix like this:
rows = len(a)
columns = len(a[0])
# You can use the following function to initialize a rows×columns matrix filled with 0s to store your answer
c = create_empty_matrix(rows, columns)
# You can use a for loop to execute its body several times;
# in this loop variable i will take on each value from 0 to n-1, inclusive
for i in range(rows):
# Loops can be nested
for j in range(columns):
# You can access elements of a matrix like this:
x = a[i][j]
y = b[i][j]
# You can modify the elements of a matrix like this:
c[i][j] = x + y
return c
```
*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-1:-Matrix-addition.).*
## Scalar Multiplication
The next matrix operation is **scalar multiplication** - multiplying the entire matrix by a scalar (real or complex number):
$$a \cdot
\begin{bmatrix}
x_{0,0} & x_{0,1} & \dotsb & x_{0,m-1} \\
x_{1,0} & x_{1,1} & \dotsb & x_{1,m-1} \\
\vdots & \vdots & \ddots & \vdots \\
x_{n-1,0} & x_{n-1,1} & \dotsb & x_{n-1,m-1}
\end{bmatrix}
=
\begin{bmatrix}
a \cdot x_{0,0} & a \cdot x_{0,1} & \dotsb & a \cdot x_{0,m-1} \\
a \cdot x_{1,0} & a \cdot x_{1,1} & \dotsb & a \cdot x_{1,m-1} \\
\vdots & \vdots & \ddots & \vdots \\
a \cdot x_{n-1,0} & a \cdot x_{n-1,1} & \dotsb & a \cdot x_{n-1,m-1}
\end{bmatrix}$$
Scalar multiplication has the following properties:
* Associativity: $x \cdot (yA) = (x \cdot y)A$
* Distributivity over matrix addition: $x(A + B) = xA + xB$
* Distributivity over scalar addition: $(x + y)A = xA + yA$
### <span style="color:blue">Exercise 2</span>: Scalar multiplication.
**Inputs:**
1. A scalar $x$.
2. An $n \times m$ matrix $A$.
**Output:** Return the $n \times m$ matrix $x \cdot A$.
<br/>
<details>
<summary><b>Need a hint? Click here</b></summary>
A video explanation can be found <a href="https://www.youtube.com/watch?v=TbaltFbJ3wE">here</a>.
</details>
```
@exercise
def scalar_mult(x : complex, a : Matrix) -> Matrix:
# Fill in the missing code and run the cell to check your work.
rows = len(a)
columns = len(a[0])
c = create_empty_matrix(rows, columns)
# You can use a for loop to execute its body several times;
# in this loop variable i will take on each value from 0 to n-1, inclusive
for i in range(rows):
# Loops can be nested
for j in range(columns):
# You can access elements of a matrix like this:
current_cell = a[i][j]
# You can modify the elements of a matrix like this:
c[i][j] = x * current_cell
return c
```
*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-2:-Scalar-multiplication.).*
## Matrix Multiplication
**Matrix multiplication** is a very important and somewhat unusual operation. The unusual thing about it is that neither its operands nor its output are the same size: an $n \times m$ matrix multiplied by an $m \times k$ matrix results in an $n \times k$ matrix.
That is, for matrix multiplication to be applicable, the number of columns in the first matrix must equal the number of rows in the second matrix.
Here is how matrix product is calculated: if we are calculating $AB = C$, then
$$C_{i,j} = A_{i,0} \cdot B_{0,j} + A_{i,1} \cdot B_{1,j} + \dotsb + A_{i,m-1} \cdot B_{m-1,j} = \sum_{t = 0}^{m-1} A_{i,t} \cdot B_{t,j}$$
Here is a small example:
$$\begin{bmatrix}
\color{blue} 1 & \color{blue} 2 & \color{blue} 3 \\
\color{red} 4 & \color{red} 5 & \color{red} 6
\end{bmatrix}
\begin{bmatrix}
1 \\
2 \\
3
\end{bmatrix}
=
\begin{bmatrix}
(\color{blue} 1 \cdot 1) + (\color{blue} 2 \cdot 2) + (\color{blue} 3 \cdot 3) \\
(\color{red} 4 \cdot 1) + (\color{red} 5 \cdot 2) + (\color{red} 6 \cdot 3)
\end{bmatrix}
=
\begin{bmatrix}
14 \\
32
\end{bmatrix}$$
Matrix multiplication has the following properties:
* Associativity: $A(BC) = (AB)C$
* Distributivity over matrix addition: $A(B + C) = AB + AC$ and $(A + B)C = AC + BC$
* Associativity with scalar multiplication: $xAB = x(AB) = A(xB)$
> Note that matrix multiplication is **not commutative:** $AB$ rarely equals $BA$.
Another very important property of matrix multiplication is that a matrix multiplied by a vector produces another vector.
An **identity matrix** $I_n$ is a special $n \times n$ matrix which has $1$s on the main diagonal, and $0$s everywhere else:
$$I_n =
\begin{bmatrix}
1 & 0 & \dotsb & 0 \\
0 & 1 & \dotsb & 0 \\
\vdots & \vdots & \ddots & \vdots \\
0 & 0 & \dotsb & 1
\end{bmatrix}$$
What makes it special is that multiplying any matrix (of compatible size) by $I_n$ returns the original matrix. To put it another way, if $A$ is an $n \times m$ matrix:
$$AI_m = I_nA = A$$
This is why $I_n$ is called an identity matrix - it acts as a **multiplicative identity**. In other words, it is the matrix equivalent of the number $1$.
### <span style="color:blue">Exercise 3</span>: Matrix multiplication.
**Inputs:**
1. An $n \times m$ matrix $A$.
2. An $m \times k$ matrix $B$.
**Output:** Return the $n \times k$ matrix equal to the matrix product $AB$.
<br/>
<details>
<summary><strong>Need a hint? Click here</strong></summary>
To solve this exercise, you will need 3 <code>for</code> loops: one to go over $n$ rows of the output matrix, one to go over $k$ columns, and one to add up $m$ products that form each element of the output:
<pre>
<code>
for i in range(n):
for j in range(k):
sum = 0
for t in range(m):
sum = sum + ...
c[i][j] = sum
</code>
</pre>
A video explanation can be found <a href="https://www.youtube.com/watch?v=OMA2Mwo0aZg">here</a>.
</details>
```
@exercise
def matrix_mult(a : Matrix, b : Matrix) -> Matrix:
n = len(a)
m = len(a[0])
k = len(b[0])
c = create_empty_matrix(n, k)
def calc_sum_this_cell(i, j, m):
sum_cell = 0
for t in range(m):
sum_cell += a[i][t] * b[t][j]
return sum_cell
for i in range(n):
for j in range(k):
c[i][j] = calc_sum_this_cell(i, j, m)
return c
```
*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-3:-Matrix-multiplication.).*
## Inverse Matrices
A square $n \times n$ matrix $A$ is **invertible** if it has an inverse $n \times n$ matrix $A^{-1}$ with the following property:
$$AA^{-1} = A^{-1}A = I_n$$
In other words, $A^{-1}$ acts as the **multiplicative inverse** of $A$.
Another, equivalent definition highlights what makes this an interesting property. For any matrices $B$ and $C$ of compatible sizes:
$$A^{-1}(AB) = A(A^{-1}B) = B \\
(CA)A^{-1} = (CA^{-1})A = C$$
A square matrix has a property called the **determinant**, with the determinant of matrix $A$ being written as $|A|$. A matrix is invertible if and only if its determinant isn't equal to $0$.
For a $2 \times 2$ matrix $A$, the determinant is defined as $|A| = (A_{0,0} \cdot A_{1,1}) - (A_{0,1} \cdot A_{1,0})$.
For larger matrices, the determinant is defined through determinants of sub-matrices. You can learn more from [Wikipedia](https://en.wikipedia.org/wiki/Determinant) or from [Wolfram MathWorld](http://mathworld.wolfram.com/Determinant.html).
### <span style="color:blue">Exercise 4</span>: Matrix Inversion.
**Input:** An invertible $2 \times 2$ matrix $A$.
**Output:** Return the inverse of $A$, a $2 \times 2$ matrix $A^{-1}$.
<br/>
<details>
<summary><strong>Need a hint? Click here</strong></summary>
Try to come up with a general method of doing it by hand first. If you get stuck, you may find <a href="https://en.wikipedia.org/wiki/Invertible_matrix#Inversion_of_2_%C3%97_2_matrices">this Wikipedia article</a> useful. For this exercise, $|A|$ is guaranteed to be non-zero. <br>
A video explanation can be found <a href="https://www.youtube.com/watch?v=01c12NaUQDw">here</a>.
</details>
```
@exercise
def matrix_inverse(m : Matrix) -> Matrix:
#inverse must be same size as original (and should be square, which we could verify)
m_inverse = create_empty_matrix(len(m), len(m[0]))
a = m[0][0]
b = m[0][1]
c = m[1][0]
d = m[1][1]
determinant_m = a * d - b * c
if determinant_m != 0:
m_inverse[0][0] = d / determinant_m
m_inverse[0][1] = -b / determinant_m
m_inverse[1][0] = -c / determinant_m
m_inverse[1][1] = a / determinant_m
return m_inverse
```
*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-4:-Matrix-Inversion.).*
## Transpose
The **transpose** operation, denoted as $A^T$, is essentially a reflection of the matrix across the diagonal: $(A^T)_{i,j} = A_{j,i}$.
Given an $n \times m$ matrix $A$, its transpose is the $m \times n$ matrix $A^T$, such that if:
$$A =
\begin{bmatrix}
x_{0,0} & x_{0,1} & \dotsb & x_{0,m-1} \\
x_{1,0} & x_{1,1} & \dotsb & x_{1,m-1} \\
\vdots & \vdots & \ddots & \vdots \\
x_{n-1,0} & x_{n-1,1} & \dotsb & x_{n-1,m-1}
\end{bmatrix}$$
then:
$$A^T =
\begin{bmatrix}
x_{0,0} & x_{1,0} & \dotsb & x_{n-1,0} \\
x_{0,1} & x_{1,1} & \dotsb & x_{n-1,1} \\
\vdots & \vdots & \ddots & \vdots \\
x_{0,m-1} & x_{1,m-1} & \dotsb & x_{n-1,m-1}
\end{bmatrix}$$
For example:
$$\begin{bmatrix}
1 & 2 \\
3 & 4 \\
5 & 6
\end{bmatrix}^T
=
\begin{bmatrix}
1 & 3 & 5 \\
2 & 4 & 6
\end{bmatrix}$$
A **symmetric** matrix is a square matrix which equals its own transpose: $A = A^T$. To put it another way, it has reflection symmetry (hence the name) across the main diagonal. For example, the following matrix is symmetric:
$$\begin{bmatrix}
1 & 2 & 3 \\
2 & 4 & 5 \\
3 & 5 & 6
\end{bmatrix}$$
The transpose of a matrix product is equal to the product of transposed matrices, taken in reverse order:
$$(AB)^T = B^TA^T$$
### <span style="color:blue">Exercise 5</span>: Transpose.
**Input:** An $n \times m$ matrix $A$.
**Output:** Return an $m \times n$ matrix $A^T$, the transpose of $A$.
<br/>
<details>
<summary><b>Need a hint? Click here</b></summary>
A video explanation can be found <a href="https://www.youtube.com/watch?v=TZrKrNVhbjI">here</a>.
</details>
```
@exercise
def transpose(a : Matrix) -> Matrix:
n = len(a)
m = len(a[0])
#transpose of n x m is m x n
transpose_of_a = create_empty_matrix(m, n)
#for each row, make it a column
for i in range(n):
for j in range(m):
transpose_of_a[j][i] = a[i][j]
return transpose_of_a
```
*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-5:-Transpose.).*
## Conjugate
The next important single-matrix operation is the **matrix conjugate**, denoted as $\overline{A}$. This, as the name might suggest, involves taking the [complex conjugate](../ComplexArithmetic/ComplexArithmetic.ipynb#Complex-Conjugate) of every element of the matrix: if
$$A =
\begin{bmatrix}
x_{0,0} & x_{0,1} & \dotsb & x_{0,m-1} \\
x_{1,0} & x_{1,1} & \dotsb & x_{1,m-1} \\
\vdots & \vdots & \ddots & \vdots \\
x_{n-1,0} & x_{n-1,1} & \dotsb & x_{n-1,m-1}
\end{bmatrix}$$
Then:
$$\overline{A} =
\begin{bmatrix}
\overline{x}_{0,0} & \overline{x}_{0,1} & \dotsb & \overline{x}_{0,m-1} \\
\overline{x}_{1,0} & \overline{x}_{1,1} & \dotsb & \overline{x}_{1,m-1} \\
\vdots & \vdots & \ddots & \vdots \\
\overline{x}_{n-1,0} & \overline{x}_{n-1,1} & \dotsb & \overline{x}_{n-1,m-1}
\end{bmatrix}$$
The conjugate of a matrix product equals to the product of conjugates of the matrices:
$$\overline{AB} = (\overline{A})(\overline{B})$$
### <span style="color:blue">Exercise 6</span>: Conjugate.
**Input:** An $n \times m$ matrix $A$.
**Output:** Return an $n \times m$ matrix $\overline{A}$, the conjugate of $A$.
> As a reminder, you can get the real and imaginary components of complex number `z` using `z.real` and `z.imag`, respectively.
<details>
<summary><b>Need a hint? Click here</b></summary>
To calculate the conjugate of a matrix take the conjugate of each element, check the <a href="../ComplexArithmetic/ComplexArithmetic.ipynb#Exercise-4:-Complex-conjugate.">complex arithmetic tutorial</a> to see how to calculate the conjugate of a complex number.
</details>
```
@exercise
def conjugate(a : Matrix) -> Matrix:
# result is same size
n = len(a)
m = len(a[0])
conjugate_of_a = create_empty_matrix(n, m)
for i in range(n):
for j in range(m):
conjugate_of_a[i][j] = a[i][j].real + (-1)* a[i][j].imag * 1j #1j is i in python ugh
return conjugate_of_a
```
*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-6:-Conjugate.).*
## Adjoint
The final important single-matrix operation is a combination of the above two. The **conjugate transpose**, also called the **adjoint** of matrix $A$, is defined as $A^\dagger = \overline{(A^T)} = (\overline{A})^T$.
A matrix is known as **Hermitian** or **self-adjoint** if it equals its own adjoint: $A = A^\dagger$. For example, the following matrix is Hermitian:
$$\begin{bmatrix}
1 & i \\
-i & 2
\end{bmatrix}$$
The adjoint of a matrix product can be calculated as follows:
$$(AB)^\dagger = B^\dagger A^\dagger$$
### <span style="color:blue">Exercise 7</span>: Adjoint.
**Input:** An $n \times m$ matrix $A$.
**Output:** Return an $m \times n$ matrix $A^\dagger$, the adjoint of $A$.
> Don't forget, you can re-use functions you've written previously.
```
@exercise
def adjoint(a : Matrix) -> Matrix:
#first do transpose, then do conjugate
#size of result will be m x n because of the transpose
n = len(a)
m = len(a[0])
adjoint_of_a = create_empty_matrix(m, n)
#transpose - for each row, make it a column
for i in range(n):
for j in range(m):
adjoint_of_a[j][i] = a[i][j]
#conjugate let a + bi become a - bi
for i in range(m):
for j in range(n):
adjoint_of_a[i][j] = adjoint_of_a[i][j].real + (-1)* adjoint_of_a[i][j].imag * 1j
return adjoint_of_a
```
*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-7:-Adjoint.).*
## Unitary Matrices
**Unitary matrices** are very important for quantum computing. A matrix is unitary when it is invertible, and its inverse is equal to its adjoint: $U^{-1} = U^\dagger$. That is, an $n \times n$ square matrix $U$ is unitary if and only if $UU^\dagger = U^\dagger U = I_n$.
For example, the following matrix is unitary:
$$\begin{bmatrix}
\frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \\
\frac{i}{\sqrt{2}} & \frac{-i}{\sqrt{2}} \\
\end{bmatrix}$$
### <span style="color:blue">Exercise 8</span>: Unitary Verification.
**Input:** An $n \times n$ matrix $A$.
**Output:** Check if the matrix is unitary and return `True` if it is, or `False` if it isn't.
> Because of inaccuracy when dealing with floating point numbers on a computer (rounding errors), you won't always get the exact result you are expecting from a long series of calculations. To get around this, Python has a function `approx` which can be used to check if two numbers are "close enough:" `a == approx(b)`.
<br/>
<details>
<summary><strong>Need a hint? Click here</strong></summary>
Keep in mind, you have only implemented matrix inverses for $2 \times 2$ matrices, and this exercise may give you larger inputs. There is a way to solve this without taking the inverse.
</details>
```
from pytest import approx
@exercise
def is_matrix_unitary(a : Matrix) -> bool:
#if a is unitary, then a multiplied by its adjoint yields I
#this will automatically handle the zero matrix corner case
#this is for square nxn matrix
n = len(a)
product_matrix = matrix_mult(a, adjoint(a))
#check whether product_matrix is I
is_unitary = True
for i in range(n):
for j in range(n):
#diagonal must be 1, all others must be zero
#holy ugly code batman
if (i == j and product_matrix[i][j] != approx(1)) or (i != j and product_matrix[i][j] != approx(0)):
is_unitary = False
break;
return is_unitary
```
*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-8:-Unitary-Verification.).*
## Next Steps
Congratulations! At this point, you should understand enough linear algebra to be able to get started with the tutorials on [the concept of qubit](../Qubit/Qubit.ipynb) and on [single-qubit quantum gates](../SingleQubitGates/SingleQubitGates.ipynb). The next section covers more advanced matrix operations that help explain the properties of qubits and quantum gates.
# Part II. Advanced Operations
## Inner Product
The **inner product** is yet another important matrix operation that is only applied to vectors. Given two vectors $V$ and $W$ of the same size, their inner product $\langle V , W \rangle$ is defined as a product of matrices $V^\dagger$ and $W$:
$$\langle V , W \rangle = V^\dagger W$$
Let's break this down so it's a bit easier to understand. A $1 \times n$ matrix (the adjoint of an $n \times 1$ vector) multiplied by an $n \times 1$ vector results in a $1 \times 1$ matrix (which is equivalent to a scalar). The result of an inner product is that scalar.
To put it another way, to calculate the inner product of two vectors, take the corresponding elements $V_k$ and $W_k$, multiply the complex conjugate of $V_k$ by $W_k$, and add up those products:
$$\langle V , W \rangle = \sum_{k=0}^{n-1}\overline{V_k}W_k$$
Here is a simple example:
$$\langle
\begin{bmatrix}
-6 \\
9i
\end{bmatrix}
,
\begin{bmatrix}
3 \\
-8
\end{bmatrix}
\rangle =
\begin{bmatrix}
-6 \\
9i
\end{bmatrix}^\dagger
\begin{bmatrix}
3 \\
-8
\end{bmatrix}
=
\begin{bmatrix} -6 & -9i \end{bmatrix}
\begin{bmatrix}
3 \\
-8
\end{bmatrix}
= (-6) \cdot (3) + (-9i) \cdot (-8) = -18 + 72i$$
If you are familiar with the **dot product**, you will notice that it is equivalent to inner product for real-numbered vectors.
> We use our definition for these tutorials because it matches the notation used in quantum computing. You might encounter other sources which define the inner product a little differently: $\langle V , W \rangle = W^\dagger V = V^T\overline{W}$, in contrast to the $V^\dagger W$ that we use. These definitions are almost equivalent, with some differences in the scalar multiplication by a complex number.
An immediate application for the inner product is computing the **vector norm**. The norm of vector $V$ is defined as $||V|| = \sqrt{\langle V , V \rangle}$. This condenses the vector down to a single non-negative real value. If the vector represents coordinates in space, the norm happens to be the length of the vector. A vector is called **normalized** if its norm is equal to $1$.
The inner product has the following properties:
* Distributivity over addition: $\langle V + W , X \rangle = \langle V , X \rangle + \langle W , X \rangle$ and $\langle V , W + X \rangle = \langle V , W \rangle + \langle V , X \rangle$
* Partial associativity with scalar multiplication: $x \cdot \langle V , W \rangle = \langle \overline{x}V , W \rangle = \langle V , xW \rangle$
* Skew symmetry: $\langle V , W \rangle = \overline{\langle W , V \rangle}$
* Multiplying a vector by a unitary matrix **preserves the vector's inner product with itself** (and therefore the vector's norm): $\langle UV , UV \rangle = \langle V , V \rangle$
> Note that just like matrix multiplication, the inner product is **not commutative**: $\langle V , W \rangle$ won't always equal $\langle W , V \rangle$.
### <span style="color:blue">Exercise 9</span>: Inner product.
**Inputs:**
1. An $n \times 1$ vector $V$.
2. An $n \times 1$ vector $W$.
**Output:** Return a complex number - the inner product $\langle V , W \rangle$.
<br/>
<details>
<summary><b>Need a hint? Click here</b></summary>
A video explanation can be found <a href="https://www.youtube.com/watch?v=FCmH4MqbFGs">here</a>.
</details>
```
@exercise
def inner_prod(v : Matrix, w : Matrix) -> complex:
n = len(v)
conjugate_of_v = conjugate(v)
inner_product = 0
for k in range(n):
inner_product += conjugate_of_v[k][0] * w[k][0]
return inner_product
```
*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-9:-Inner-product.).*
### <span style="color:blue">Exercise 10</span>: Normalized vectors.
**Input:** A non-zero $n \times 1$ vector $V$.
**Output:** Return an $n \times 1$ vector $\frac{V}{||V||}$ - the normalized version of the vector $V$.
<br/>
<details>
<summary><strong>Need a hint? Click here</strong></summary>
You might need the square root function to solve this exercise. As a reminder, <a href=https://docs.python.org/3/library/math.html#math.sqrt>Python's square root function</a> is available in the <code>math</code> library.<br>
A video explanation can be found <a href="https://www.youtube.com/watch?v=7fn03DIW3Ak">here</a>. Note that when this method is used with complex vectors, you should take the modulus of the complex number for the division.
</details>
```
@exercise
def normalize(v : Matrix) -> Matrix:
# sqrt of complex number?? norm = math.sqrt(inner_prod(v, v))
#try modulus of result of inner prod bc it's a complex number
prod = inner_prod(v, v)
modulus_of_prod = math.sqrt(prod.real**2 + prod.imag**2)
norm = math.sqrt(modulus_of_prod)
v_normalized = create_empty_matrix(len(v), 1)
for k in range(len(v)):
v_normalized[k][0] = v[k][0] / norm
return v_normalized
```
*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-10:-Normalized-vectors.).*
## Outer Product
The **outer product** of two vectors $V$ and $W$ is defined as $VW^\dagger$. That is, the outer product of an $n \times 1$ vector and an $m \times 1$ vector is an $n \times m$ matrix. If we denote the outer product of $V$ and $W$ as $X$, then $X_{i,j} = V_i \cdot \overline{W_j}$.
Here is a simple example:
outer product of $\begin{bmatrix} -3i \\ 9 \end{bmatrix}$ and $\begin{bmatrix} 9i \\ 2 \\ 7 \end{bmatrix}$ is:
$$\begin{bmatrix} \color{blue} {-3i} \\ \color{blue} 9 \end{bmatrix}
\begin{bmatrix} \color{red} {9i} \\ \color{red} 2 \\ \color{red} 7 \end{bmatrix}^\dagger
=
\begin{bmatrix} \color{blue} {-3i} \\ \color{blue} 9 \end{bmatrix}
\begin{bmatrix} \color{red} {-9i} & \color{red} 2 & \color{red} 7 \end{bmatrix}
=
\begin{bmatrix}
\color{blue} {-3i} \cdot \color{red} {(-9i)} & \color{blue} {-3i} \cdot \color{red} 2 & \color{blue} {-3i} \cdot \color{red} 7 \\
\color{blue} 9 \cdot \color{red} {(-9i)} & \color{blue} 9 \cdot \color{red} 2 & \color{blue} 9 \cdot \color{red} 7
\end{bmatrix}
=
\begin{bmatrix}
-27 & -6i & -21i \\
-81i & 18 & 63
\end{bmatrix}$$
### <span style="color:blue">Exercise 11</span>: Outer product.
**Inputs:**
1. An $n \times 1$ vector $V$.
2. An $m \times 1$ vector $W$.
**Output:** Return an $n \times m$ matrix that represents the outer product of $V$ and $W$.
```
@exercise
def outer_prod(v : Matrix, w : Matrix) -> Matrix:
#outer product equals v times adjoint of w
return matrix_mult(v, adjoint(w))
```
*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-11:-Outer-product.).*
## Tensor Product
The **tensor product** is a different way of multiplying matrices. Rather than multiplying rows by columns, the tensor product multiplies the second matrix by every element of the first matrix.
Given $n \times m$ matrix $A$ and $k \times l$ matrix $B$, their tensor product $A \otimes B$ is an $(n \cdot k) \times (m \cdot l)$ matrix defined as follows:
$$A \otimes B =
\begin{bmatrix}
A_{0,0} \cdot B & A_{0,1} \cdot B & \dotsb & A_{0,m-1} \cdot B \\
A_{1,0} \cdot B & A_{1,1} \cdot B & \dotsb & A_{1,m-1} \cdot B \\
\vdots & \vdots & \ddots & \vdots \\
A_{n-1,0} \cdot B & A_{n-1,1} \cdot B & \dotsb & A_{n-1,m-1} \cdot B
\end{bmatrix}
=
\begin{bmatrix}
A_{0,0} \cdot \color{red} {\begin{bmatrix}B_{0,0} & \dotsb & B_{0,l-1} \\ \vdots & \ddots & \vdots \\ B_{k-1,0} & \dotsb & b_{k-1,l-1} \end{bmatrix}} & \dotsb &
A_{0,m-1} \cdot \color{blue} {\begin{bmatrix}B_{0,0} & \dotsb & B_{0,l-1} \\ \vdots & \ddots & \vdots \\ B_{k-1,0} & \dotsb & B_{k-1,l-1} \end{bmatrix}} \\
\vdots & \ddots & \vdots \\
A_{n-1,0} \cdot \color{blue} {\begin{bmatrix}B_{0,0} & \dotsb & B_{0,l-1} \\ \vdots & \ddots & \vdots \\ B_{k-1,0} & \dotsb & B_{k-1,l-1} \end{bmatrix}} & \dotsb &
A_{n-1,m-1} \cdot \color{red} {\begin{bmatrix}B_{0,0} & \dotsb & B_{0,l-1} \\ \vdots & \ddots & \vdots \\ B_{k-1,0} & \dotsb & B_{k-1,l-1} \end{bmatrix}}
\end{bmatrix}
= \\
=
\begin{bmatrix}
A_{0,0} \cdot \color{red} {B_{0,0}} & \dotsb & A_{0,0} \cdot \color{red} {B_{0,l-1}} & \dotsb & A_{0,m-1} \cdot \color{blue} {B_{0,0}} & \dotsb & A_{0,m-1} \cdot \color{blue} {B_{0,l-1}} \\
\vdots & \ddots & \vdots & \dotsb & \vdots & \ddots & \vdots \\
A_{0,0} \cdot \color{red} {B_{k-1,0}} & \dotsb & A_{0,0} \cdot \color{red} {B_{k-1,l-1}} & \dotsb & A_{0,m-1} \cdot \color{blue} {B_{k-1,0}} & \dotsb & A_{0,m-1} \cdot \color{blue} {B_{k-1,l-1}} \\
\vdots & \vdots & \vdots & \ddots & \vdots & \vdots & \vdots \\
A_{n-1,0} \cdot \color{blue} {B_{0,0}} & \dotsb & A_{n-1,0} \cdot \color{blue} {B_{0,l-1}} & \dotsb & A_{n-1,m-1} \cdot \color{red} {B_{0,0}} & \dotsb & A_{n-1,m-1} \cdot \color{red} {B_{0,l-1}} \\
\vdots & \ddots & \vdots & \dotsb & \vdots & \ddots & \vdots \\
A_{n-1,0} \cdot \color{blue} {B_{k-1,0}} & \dotsb & A_{n-1,0} \cdot \color{blue} {B_{k-1,l-1}} & \dotsb & A_{n-1,m-1} \cdot \color{red} {B_{k-1,0}} & \dotsb & A_{n-1,m-1} \cdot \color{red} {B_{k-1,l-1}}
\end{bmatrix}$$
Here is a simple example:
$$\begin{bmatrix} 1 & 2 \\ 3 & 4 \end{bmatrix} \otimes \begin{bmatrix} 5 & 6 \\ 7 & 8 \end{bmatrix} =
\begin{bmatrix}
1 \cdot \begin{bmatrix} 5 & 6 \\ 7 & 8 \end{bmatrix} & 2 \cdot \begin{bmatrix} 5 & 6 \\ 7 & 8 \end{bmatrix} \\
3 \cdot \begin{bmatrix} 5 & 6 \\ 7 & 8 \end{bmatrix} & 4 \cdot \begin{bmatrix} 5 & 6 \\ 7 & 8 \end{bmatrix}
\end{bmatrix}
=
\begin{bmatrix}
1 \cdot 5 & 1 \cdot 6 & 2 \cdot 5 & 2 \cdot 6 \\
1 \cdot 7 & 1 \cdot 8 & 2 \cdot 7 & 2 \cdot 8 \\
3 \cdot 5 & 3 \cdot 6 & 4 \cdot 5 & 4 \cdot 6 \\
3 \cdot 7 & 3 \cdot 8 & 4 \cdot 7 & 4 \cdot 8
\end{bmatrix}
=
\begin{bmatrix}
5 & 6 & 10 & 12 \\
7 & 8 & 14 & 16 \\
15 & 18 & 20 & 24 \\
21 & 24 & 28 & 32
\end{bmatrix}$$
Notice that the tensor product of two vectors is another vector: if $V$ is an $n \times 1$ vector, and $W$ is an $m \times 1$ vector, $V \otimes W$ is an $(n \cdot m) \times 1$ vector.
The tensor product has the following properties:
* Distributivity over addition: $(A + B) \otimes C = A \otimes C + B \otimes C$, $A \otimes (B + C) = A \otimes B + A \otimes C$
* Associativity with scalar multiplication: $x(A \otimes B) = (xA) \otimes B = A \otimes (xB)$
* Mixed-product property (relation with matrix multiplication): $(A \otimes B) (C \otimes D) = (AC) \otimes (BD)$
### <span style="color:blue">Exercise 12</span>*: Tensor Product.
**Inputs:**
1. An $n \times m$ matrix $A$.
2. A $k \times l$ matrix $B$.
**Output:** Return an $(n \cdot k) \times (m \cdot l)$ matrix $A \otimes B$, the tensor product of $A$ and $B$.
```
@exercise
def tensor_product(a : Matrix, b : Matrix) -> Matrix:
n = len(a)
m = len(a[0])
k = len(b)
l = len(b[0])
result = create_empty_matrix(n*k, m*l)
#for each element in a, which is n x m
for arow in range(n):
for acol in range(m):
acurrent = a[arow][acol]
#copy B elements into result, multiplying by acurrent as we go
for brow in range(k):
for bcol in range(l):
bcurrent = b[brow][bcol]
#trick is indices in result
result[arow*k + brow][acol*l + bcol] = acurrent * bcurrent
return result
```
*Can't come up with a solution? See the explained solution in the* <i><a href="./Workbook_LinearAlgebra.ipynb#Exercise-12*:-Tensor-Product.">Linear Algebra Workbook</a></i>.
## Next Steps
At this point, you know enough to complete the tutorials on [the concept of qubit](../Qubit/Qubit.ipynb), [single-qubit gates](../SingleQubitGates/SingleQubitGates.ipynb), [multi-qubit systems](../MultiQubitSystems/MultiQubitSystems.ipynb), and [multi-qubit gates](../MultiQubitGates/MultiQubitGates.ipynb).
The last part of this tutorial is a brief introduction to eigenvalues and eigenvectors, which are used for more advanced topics in quantum computing.
Feel free to move on to the next tutorials, and come back here once you encounter eigenvalues and eigenvectors elsewhere.
# Part III: Eigenvalues and Eigenvectors
Consider the following example of multiplying a matrix by a vector:
$$\begin{bmatrix}
1 & -3 & 3 \\
3 & -5 & 3 \\
6 & -6 & 4
\end{bmatrix}
\begin{bmatrix}
1 \\
1 \\
2
\end{bmatrix}
=
\begin{bmatrix}
4 \\
4 \\
8
\end{bmatrix}$$
Notice that the resulting vector is just the initial vector multiplied by a scalar (in this case 4). This behavior is so noteworthy that it is described using a special set of terms.
Given a nonzero $n \times n$ matrix $A$, a nonzero vector $V$, and a scalar $x$, if $AV = xV$, then $x$ is an **eigenvalue** of $A$, and $V$ is an **eigenvector** of $A$ corresponding to that eigenvalue.
The properties of eigenvalues and eigenvectors are used extensively in quantum computing. You can learn more about eigenvalues, eigenvectors, and their properties at [Wolfram MathWorld](http://mathworld.wolfram.com/Eigenvector.html) or on [Wikipedia](https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors).
### <span style="color:blue">Exercise 13</span>: Finding an eigenvalue.
**Inputs:**
1. An $n \times n$ matrix $A$.
2. An eigenvector $V$ of matrix $A$.
**Output:** Return a real number - the eigenvalue of $A$ that is associated with the given eigenvector.
<br/>
<details>
<summary><strong>Need a hint? Click here</strong></summary>
Multiply the matrix by the vector, then divide the elements of the result by the elements of the original vector. Don't forget though, some elements of the vector may be $0$.
</details>
```
@exercise
def find_eigenvalue(a : Matrix, v : Matrix) -> float:
#eigenvalue = AV / V
#AV will be (nxn) * (n * 1) = n * 1, so can divide each element
n = len(a)
prod_av = matrix_mult(a, v)
result = create_empty_matrix(n, 1)
eigenvalue = 0
for i in range(n):
if (v[i][0] != 0):
result[i][0] = prod_av[i][0] / v[i][0]
#find first non-zero result for eigenvalue
if result[i][0] != 0:
eigenvalue = result[i][0]
break;
return eigenvalue
```
*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-13:-Finding-an-eigenvalue.).*
### <span style="color:blue">Exercise 14</span>**: Finding an eigenvector.
**Inputs:**
1. A $2 \times 2$ matrix $A$.
2. An eigenvalue $x$ of matrix $A$.
**Output:** Return any non-zero eigenvector of $A$ that is associated with $x$.
<br/>
<details>
<summary><strong>Need a hint? Click here</strong></summary>
A matrix and an eigenvalue will have multiple eigenvectors (infinitely many, in fact), but you only need to find one.<br/>
Try treating the elements of the vector as variables in a system of two equations. Watch out for division by $0$!
</details>
```
@exercise
def find_eigenvector(a : Matrix, x : float) -> Matrix:
result = create_empty_matrix(len(a), 1)
return result
```
*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-14**:-Finding-an-eigenvector.).*
| true |
code
| 0.626924 | null | null | null | null |
|
```
#load watermark
%load_ext watermark
%watermark -a 'Gopala KR' -u -d -v -p watermark,numpy,pandas,matplotlib,nltk,sklearn,tensorflow,theano,mxnet,chainer,seaborn,keras,tflearn,bokeh,gensim
from preamble import *
%matplotlib inline
```
## Algorithm Chains and Pipelines
```
from sklearn.svm import SVC
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
# load and split the data
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, random_state=0)
# compute minimum and maximum on the training data
scaler = MinMaxScaler().fit(X_train)
# rescale the training data
X_train_scaled = scaler.transform(X_train)
svm = SVC()
# learn an SVM on the scaled training data
svm.fit(X_train_scaled, y_train)
# scale the test data and score the scaled data
X_test_scaled = scaler.transform(X_test)
print("Test score: {:.2f}".format(svm.score(X_test_scaled, y_test)))
```
### Parameter Selection with Preprocessing
```
from sklearn.model_selection import GridSearchCV
# for illustration purposes only, don't use this code!
param_grid = {'C': [0.001, 0.01, 0.1, 1, 10, 100],
'gamma': [0.001, 0.01, 0.1, 1, 10, 100]}
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=5)
grid.fit(X_train_scaled, y_train)
print("Best cross-validation accuracy: {:.2f}".format(grid.best_score_))
print("Best parameters: ", grid.best_params_)
print("Test set accuracy: {:.2f}".format(grid.score(X_test_scaled, y_test)))
mglearn.plots.plot_improper_processing()
```
### Building Pipelines
```
from sklearn.pipeline import Pipeline
pipe = Pipeline([("scaler", MinMaxScaler()), ("svm", SVC())])
pipe.fit(X_train, y_train)
print("Test score: {:.2f}".format(pipe.score(X_test, y_test)))
```
### Using Pipelines in Grid-searches
```
param_grid = {'svm__C': [0.001, 0.01, 0.1, 1, 10, 100],
'svm__gamma': [0.001, 0.01, 0.1, 1, 10, 100]}
grid = GridSearchCV(pipe, param_grid=param_grid, cv=5)
grid.fit(X_train, y_train)
print("Best cross-validation accuracy: {:.2f}".format(grid.best_score_))
print("Test set score: {:.2f}".format(grid.score(X_test, y_test)))
print("Best parameters: {}".format(grid.best_params_))
mglearn.plots.plot_proper_processing()
rnd = np.random.RandomState(seed=0)
X = rnd.normal(size=(100, 10000))
y = rnd.normal(size=(100,))
from sklearn.feature_selection import SelectPercentile, f_regression
select = SelectPercentile(score_func=f_regression, percentile=5).fit(X, y)
X_selected = select.transform(X)
print("X_selected.shape: {}".format(X_selected.shape))
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import Ridge
print("Cross-validation accuracy (cv only on ridge): {:.2f}".format(
np.mean(cross_val_score(Ridge(), X_selected, y, cv=5))))
pipe = Pipeline([("select", SelectPercentile(score_func=f_regression,
percentile=5)),
("ridge", Ridge())])
print("Cross-validation accuracy (pipeline): {:.2f}".format(
np.mean(cross_val_score(pipe, X, y, cv=5))))
```
### The General Pipeline Interface
```
def fit(self, X, y):
X_transformed = X
for name, estimator in self.steps[:-1]:
# iterate over all but the final step
# fit and transform the data
X_transformed = estimator.fit_transform(X_transformed, y)
# fit the last step
self.steps[-1][1].fit(X_transformed, y)
return self
def predict(self, X):
X_transformed = X
for step in self.steps[:-1]:
# iterate over all but the final step
# transform the data
X_transformed = step[1].transform(X_transformed)
# predict using the last step
return self.steps[-1][1].predict(X_transformed)
```

### Convenient Pipeline creation with ``make_pipeline``
```
from sklearn.pipeline import make_pipeline
# standard syntax
pipe_long = Pipeline([("scaler", MinMaxScaler()), ("svm", SVC(C=100))])
# abbreviated syntax
pipe_short = make_pipeline(MinMaxScaler(), SVC(C=100))
print("Pipeline steps:\n{}".format(pipe_short.steps))
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
pipe = make_pipeline(StandardScaler(), PCA(n_components=2), StandardScaler())
print("Pipeline steps:\n{}".format(pipe.steps))
```
#### Accessing step attributes
```
# fit the pipeline defined before to the cancer dataset
pipe.fit(cancer.data)
# extract the first two principal components from the "pca" step
components = pipe.named_steps["pca"].components_
print("components.shape: {}".format(components.shape))
```
#### Accessing Attributes in a Pipeline inside GridSearchCV
```
from sklearn.linear_model import LogisticRegression
pipe = make_pipeline(StandardScaler(), LogisticRegression())
param_grid = {'logisticregression__C': [0.01, 0.1, 1, 10, 100]}
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, random_state=4)
grid = GridSearchCV(pipe, param_grid, cv=5)
grid.fit(X_train, y_train)
print("Best estimator:\n{}".format(grid.best_estimator_))
print("Logistic regression step:\n{}".format(
grid.best_estimator_.named_steps["logisticregression"]))
print("Logistic regression coefficients:\n{}".format(
grid.best_estimator_.named_steps["logisticregression"].coef_))
```
### Grid-searching preprocessing steps and model parameters
```
from sklearn.datasets import load_boston
boston = load_boston()
X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target,
random_state=0)
from sklearn.preprocessing import PolynomialFeatures
pipe = make_pipeline(
StandardScaler(),
PolynomialFeatures(),
Ridge())
param_grid = {'polynomialfeatures__degree': [1, 2, 3],
'ridge__alpha': [0.001, 0.01, 0.1, 1, 10, 100]}
grid = GridSearchCV(pipe, param_grid=param_grid, cv=5, n_jobs=-1)
grid.fit(X_train, y_train)
mglearn.tools.heatmap(grid.cv_results_['mean_test_score'].reshape(3, -1),
xlabel="ridge__alpha", ylabel="polynomialfeatures__degree",
xticklabels=param_grid['ridge__alpha'],
yticklabels=param_grid['polynomialfeatures__degree'], vmin=0)
print("Best parameters: {}".format(grid.best_params_))
print("Test-set score: {:.2f}".format(grid.score(X_test, y_test)))
param_grid = {'ridge__alpha': [0.001, 0.01, 0.1, 1, 10, 100]}
pipe = make_pipeline(StandardScaler(), Ridge())
grid = GridSearchCV(pipe, param_grid, cv=5)
grid.fit(X_train, y_train)
print("Score without poly features: {:.2f}".format(grid.score(X_test, y_test)))
pipe = Pipeline([('preprocessing', StandardScaler()), ('classifier', SVC())])
from sklearn.ensemble import RandomForestClassifier
param_grid = [
{'classifier': [SVC()], 'preprocessing': [StandardScaler(), None],
'classifier__gamma': [0.001, 0.01, 0.1, 1, 10, 100],
'classifier__C': [0.001, 0.01, 0.1, 1, 10, 100]},
{'classifier': [RandomForestClassifier(n_estimators=100)],
'preprocessing': [None], 'classifier__max_features': [1, 2, 3]}]
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, random_state=0)
grid = GridSearchCV(pipe, param_grid, cv=5)
grid.fit(X_train, y_train)
print("Best params:\n{}\n".format(grid.best_params_))
print("Best cross-validation score: {:.2f}".format(grid.best_score_))
print("Test-set score: {:.2f}".format(grid.score(X_test, y_test)))
```
### Summary and Outlook
```
test complete ; Gopal
```
| true |
code
| 0.654481 | null | null | null | null |
|
[](https://colab.research.google.com/github/jfcrenshaw/pzflow/blob/main/examples/marginalization.ipynb)
If running in Colab, to switch to GPU, go to the menu and select Runtime -> Change runtime type -> Hardware accelerator -> GPU.
In addition, uncomment and run the following code:
```
# !pip install pzflow
```
-------------------
## Marginalization during posterior calculation
This example notebook demonstrates how to marginalize over missing variables during posterior calculation.
We will use the Flow trained in the [redshift example](https://github.com/jfcrenshaw/pzflow/blob/main/examples/redshift_example.ipynb).
```
import jax.numpy as np
import matplotlib.pyplot as plt
from pzflow.examples import get_example_flow
```
First let's load the pre-trained flow, and use it to generate some samples:
```
flow = get_example_flow()
samples = flow.sample(2, seed=123)
samples
```
Remember that we can calculate posteriors for the data in samples. For example, let's plot redshift posteriors:
```
grid = np.linspace(0.25, 1.45, 100)
pdfs = flow.posterior(samples, column="redshift", grid=grid)
fig, axes = plt.subplots(1, 2, figsize=(5.5, 2), dpi=120, constrained_layout=True)
for i, ax in enumerate(axes.flatten()):
ax.plot(grid, pdfs[i], label="Redshift posterior")
ztrue = samples["redshift"][i]
ax.axvline(ztrue, c="C3", label="True redshift")
ax.set(
xlabel="redshift",
xlim=(ztrue - 0.25, ztrue + 0.25),
yticks=[]
)
axes[0].legend(
bbox_to_anchor=(0.55, 1.05, 1, 0.2),
loc="lower left",
mode="expand",
borderaxespad=0,
ncol=2,
fontsize=8,
)
plt.show()
```
But what if we have missing values? E.g. let's imagine that galaxy 1 wasn't observed in the u band, while galaxy 2 wasn't observed in the u or y bands. We will mark these non-observations with the value 99:
```
# make a new copy of the samples
samples2 = samples.copy()
# make the non-observations
samples2.iloc[0, 1] = 99
samples2.iloc[1, 1] = 99
samples2.iloc[1, -1] = 99
# print the new samples
samples2
```
Now if we want to calculate posteriors, we can't simply call `flow.posterior()` as before because the flow will think that 99 is the actual value for those bands, rather than just a flag for a missing value. What we can do, however, is pass `marg_rules`, which is a dictionary of rules that tells the Flow how to marginalize over missing variables.
`marg_rules` must include:
- "flag": 99, which tells the posterior method that 99 is the flag for missing values
- "u": callable, which returns an array of values for the u band over which to marginalize
- "y": callable, which returns an array of values for the y band over which to marginalize
"u" and "y" both map to callable, because you can use a function of the other values to decide what values of u and y to marginalize over. For example, maybe you expect the value of u to be close to the value of g. In which case you might use:
```
"u": lambda row: np.linspace(row["g"] - 1, row["g"] + 1, 100)
```
The only constraint is that regardless of the values of the other variables, the callable must *always* return an array of the same length.
For this example, we won't make the marginalization rules a function of the other variables, but will instead return a fixed array.
```
marg_rules = {
"flag": 99, # tells the posterior method that 99 means missing value
"u": lambda row: np.linspace(26, 28, 40), # the array of u values to marginalize over
"y": lambda row: np.linspace(24, 26, 40), # the array of y values to marginalize over
}
pdfs2 = flow.posterior(samples2, column="redshift", grid=grid, marg_rules=marg_rules)
fig, axes = plt.subplots(1, 2, figsize=(5.5, 2), dpi=120, constrained_layout=True)
for i, ax in enumerate(axes.flatten()):
ax.plot(grid, pdfs[i], label="Posterior w/ all bands")
ax.plot(grid, pdfs2[i], label="Posterior w/ missing bands marginalized")
ztrue = samples["redshift"][i]
ax.axvline(ztrue, c="C3", label="True redshift")
ax.set(
xlabel="redshift",
xlim=(ztrue - 0.25, ztrue + 0.25),
yticks=[]
)
axes[0].legend(
bbox_to_anchor=(0, 1.05, 2, 0.2),
loc="lower left",
mode="expand",
borderaxespad=0,
ncol=3,
fontsize=7.5,
)
plt.show()
```
You can see that marginalizing over the bands (aka throwing out information), degrades the posteriors.
Warning that marginalizing over fine grids quickly gets very computationally expensive, especially when you have rows in your data frame that are missing multiple values.
| true |
code
| 0.735502 | null | null | null | null |
|
```
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
import os
from PIL import Image
import PIL.ImageOps
from skimage.morphology import watershed
from skimage.feature import peak_local_max
from skimage.filters import threshold_otsu
from skimage.morphology import binary_closing
from skimage.color import rgb2gray
```
Watershed with binarization first
```
arraydirectory= './edge_array/'
photodirectory='./photos/'
image=np.array(Image.open(photodirectory + '1449.jpg'))
image = rgb2gray(image)
thresh = threshold_otsu(image)
img_bin = image > thresh
image_closed=binary_closing(img_bin)
# Now we want to separate the two objects in image
# Generate the markers as local maxima of the distance to the background
distance = ndi.distance_transform_edt(image_closed)
local_maxi = peak_local_max(distance, indices=False)
markers = ndi.label(local_maxi)[0]
labels = watershed(-distance, markers, mask=image_closed)
fig, axes = plt.subplots(ncols=3, figsize=(9, 3), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax = axes.ravel()
ax[0].imshow(image_closed, cmap=plt.cm.gray, interpolation='nearest')
ax[0].set_title('Overlapping objects')
ax[1].imshow(-distance, cmap=plt.cm.gray, interpolation='nearest')
ax[1].set_title('Distances')
ax[2].imshow(labels, cmap=plt.cm.spectral, interpolation='nearest')
ax[2].set_title('Separated objects')
for a in ax:
a.set_axis_off()
fig.tight_layout()
plt.show()
```
Watershed on image itself
```
arraydirectory= './edge_array/'
photodirectory='./photos/'
image=np.array(Image.open(photodirectory + '1449.jpg'))
# Now we want to separate the two objects in image
# Generate the markers as local maxima of the distance to the background
distance = ndi.distance_transform_edt(image)
local_maxi = peak_local_max(distance, indices=False)
markers = ndi.label(local_maxi)[0]
labels = watershed(-distance, markers, mask=image)
fig, axes = plt.subplots(ncols=3, figsize=(9, 3), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax = axes.ravel()
ax[0].imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax[0].set_title('Overlapping objects')
ax[1].imshow(-distance, cmap=plt.cm.gray, interpolation='nearest')
ax[1].set_title('Distances')
ax[2].imshow(labels, cmap=plt.cm.spectral, interpolation='nearest')
ax[2].set_title('Separated objects')
for a in ax:
a.set_axis_off()
fig.tight_layout()
plt.show()
```
So we use Watershed on the binary picture.
```
arraydirectory= '../FeatureSampleFoodClassification/watershed_array/'
photodirectory='../SampleFoodClassifier_Norm/'
if not os.path.exists(arraydirectory):
os.makedirs(arraydirectory)
for fn in os.listdir(photodirectory):
if os.path.isfile(photodirectory + fn) and '.jpg' in fn:
img=np.array(Image.open(photodirectory + fn))
img = rgb2gray(img)
thresh = threshold_otsu(img)
img_bin = img > thresh
img_closed=binary_closing(img_bin)
# Now we want to separate the two objects in image
# Generate the markers as local maxima of the distance to the background
distance = ndi.distance_transform_edt(img_closed)
local_maxi = peak_local_max(distance, indices=False)
markers = ndi.label(local_maxi)[0]
ws = watershed(-distance, markers, mask=img_closed)
ws_flat=[item for sublist in ws for item in sublist]
np.save(arraydirectory + fn,ws_flat)
print('done')
```
| true |
code
| 0.60326 | null | null | null | null |
|
# Access Computation
This tutorial demonstrates how to compute access.
## Setup
```
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from ostk.mathematics.objects import RealInterval
from ostk.physics.units import Length
from ostk.physics.units import Angle
from ostk.physics.time import Scale
from ostk.physics.time import Instant
from ostk.physics.time import Duration
from ostk.physics.time import Interval
from ostk.physics.time import DateTime
from ostk.physics.time import Time
from ostk.physics.coordinate.spherical import LLA
from ostk.physics.coordinate.spherical import AER
from ostk.physics.coordinate import Position
from ostk.physics.coordinate import Frame
from ostk.physics import Environment
from ostk.physics.environment.objects.celestial_bodies import Earth
from ostk.astrodynamics import Trajectory
from ostk.astrodynamics.trajectory import Orbit
from ostk.astrodynamics.trajectory.orbit.models import Kepler
from ostk.astrodynamics.trajectory.orbit.models.kepler import COE
from ostk.astrodynamics.trajectory.orbit.models import SGP4
from ostk.astrodynamics.trajectory.orbit.models.sgp4 import TLE
from ostk.astrodynamics import Access
from ostk.astrodynamics.access import Generator as AccessGenerator
```
---
## Access
An access represents an object-to-object visibility period.
In this example, let's compute accesses between a fixed position on the ground and a satellite in LEO.
## Environment
Let's setup an environment (which describes where planets are, etc...):
```
environment = Environment.default() ;
```
### Origin
Let's define a fixed ground position, using its geographic coordinates:
```
latitude = Angle.degrees(50.0)
longitude = Angle.degrees(20.0)
altitude = Length.meters(30.0)
from_lla = LLA(latitude, longitude, altitude)
from_position = Position.meters(from_lla.to_cartesian(Earth.equatorial_radius, Earth.flattening), Frame.ITRF())
```
And derive a trajectory, fixed at that position:
```
from_trajectory = Trajectory.position(from_position)
```
### Target
Let's consider a satellite in **Low-Earth Orbit**.
```
earth = environment.access_celestial_object_with_name("Earth")
```
We can define its orbit with **Classical Orbital Elements**:
```
a = Earth.equatorial_radius + Length.kilometers(500.0)
e = 0.000
i = Angle.degrees(97.8893)
raan = Angle.degrees(100.372)
aop = Angle.degrees(0.0)
nu = Angle.degrees(0.0201851)
coe = COE(a, e, i, raan, aop, nu)
```
... and by using a **Keplerian** orbital model:
```
epoch = Instant.date_time(DateTime(2018, 1, 1, 0, 0, 0), Scale.UTC)
keplerian_model = Kepler(coe, epoch, earth, Kepler.PerturbationType.J2)
```
Or with a **Two-Line Element** (TLE) set:
```
tle = TLE(
"ISS (ZARYA)",
"1 25544U 98067A 18268.86272795 .00002184 00000-0 40781-4 0 9990",
"2 25544 51.6405 237.0010 0003980 205.4375 242.3358 15.53733046134172"
)
```
... along with its associated **SGP4** orbital model:
```
sgp4_model = SGP4(tle)
```
Below, we select which orbital model to use:
```
orbital_model = keplerian_model
# orbital_model = sgp4_model
```
We then obtain the satellite orbit (which is a **Trajectory** object):
```
satellite_orbit = Orbit(orbital_model, earth)
```
Alternatively, the **Orbit** class can provide some useful shortcuts (for usual orbit types):
```
epoch = Instant.date_time(DateTime(2018, 1, 1, 0, 0, 0), Scale.UTC)
satellite_orbit = Orbit.sun_synchronous(epoch, Length.kilometers(500.0), Time(12, 0, 0), earth)
```
### Access
Now that the origin and the target trajectories are well defined, we can compute the **Access**.
Let's first define an **analysis interval**:
```
start_instant = Instant.date_time(DateTime.parse("2018-01-01 00:00:00"), Scale.UTC) ;
end_instant = Instant.date_time(DateTime.parse("2018-01-10 00:00:00"), Scale.UTC) ;
interval = Interval.closed(start_instant, end_instant) ;
```
Then, using an **Access Generator**, we can compute the accesses within the intervals of interest:
```
azimuth_range = RealInterval.closed(0.0, 360.0) # [deg]
elevation_range = RealInterval.closed(20.0, 90.0) # [deg]
range_range = RealInterval.closed(0.0, 10000e3) # [m]
# Access generator with Azimuth-Range-Elevation constraints
access_generator = AccessGenerator.aer_ranges(azimuth_range, elevation_range, range_range, environment)
accesses = access_generator.compute_accesses(interval, from_trajectory, satellite_orbit)
```
And format the output using a dataframe:
```
accesses_df = pd.DataFrame([[str(access.get_type()), repr(access.get_acquisition_of_signal()), repr(access.get_time_of_closest_approach()), repr(access.get_loss_of_signal()), float(access.get_duration().in_seconds())] for access in accesses], columns=['Type', 'AOS', 'TCA', 'LOS', 'Duration'])
```
### Output
Print accesses:
```
accesses_df
```
Let's calculate the geographic coordinate of the satellite, during access:
```
def compute_lla (state):
lla = LLA.cartesian(state.get_position().in_frame(Frame.ITRF(), state.get_instant()).get_coordinates(), Earth.equatorial_radius, Earth.flattening)
return [float(lla.get_latitude().in_degrees()), float(lla.get_longitude().in_degrees()), float(lla.get_altitude().in_meters())]
def compute_aer (instant, from_lla, to_position):
nedFrame = earth.get_frame_at(from_lla, Earth.FrameType.NED)
fromPosition_NED = from_position.in_frame(nedFrame, instant)
sunPosition_NED = to_position.in_frame(nedFrame, instant)
aer = AER.from_position_to_position(fromPosition_NED, sunPosition_NED, True)
return [float(aer.get_azimuth().in_degrees()), float(aer.get_elevation().in_degrees()), float(aer.get_range().in_meters())]
def compute_time_lla_aer_state (state):
instant = state.get_instant()
lla = compute_lla(state)
aer = compute_aer(instant, from_lla, state.get_position().in_frame(Frame.ITRF(), state.get_instant()))
return [instant, lla[0], lla[1], lla[2], aer[0], aer[1], aer[2]]
def compute_trajectory_geometry (aTrajectory, anInterval):
return [compute_lla(state) for state in aTrajectory.get_states_at(anInterval.generate_grid(Duration.minutes(1.0)))]
def compute_access_geometry (access):
return [compute_time_lla_aer_state(state) for state in satellite_orbit.get_states_at(access.get_interval().generate_grid(Duration.seconds(1.0)))]
satellite_orbit_geometry_df = pd.DataFrame(compute_trajectory_geometry(satellite_orbit, interval), columns=['Latitude', 'Longitude', 'Altitude'])
satellite_orbit_geometry_df.head()
access_geometry_dfs = [pd.DataFrame(compute_access_geometry(access), columns=['Time', 'Latitude', 'Longitude', 'Altitude', 'Azimuth', 'Elevation', 'Range']) for access in accesses] ;
def get_max_elevation (df):
return df.loc[df['Elevation'].idxmax()]['Elevation']
```
And plot the geometries onto a map:
```
data = []
# Target geometry
data.append(
dict(
type = 'scattergeo',
lon = [float(longitude.in_degrees())],
lat = [float(latitude.in_degrees())],
mode = 'markers',
marker = dict(
size = 10,
color = 'orange'
)
)
)
# Orbit geometry
data.append(
dict(
type = 'scattergeo',
lon = satellite_orbit_geometry_df['Longitude'],
lat = satellite_orbit_geometry_df['Latitude'],
mode = 'lines',
line = dict(
width = 1,
color = 'rgba(0, 0, 0, 0.1)',
)
)
)
# Access geometry
for access_geometry_df in access_geometry_dfs:
data.append(
dict(
type = 'scattergeo',
lon = access_geometry_df['Longitude'],
lat = access_geometry_df['Latitude'],
mode = 'lines',
line = dict(
width = 1,
color = 'red',
)
)
)
layout = dict(
title = None,
showlegend = False,
height = 1000,
geo = dict(
showland = True,
landcolor = 'rgb(243, 243, 243)',
countrycolor = 'rgb(204, 204, 204)',
),
)
figure = go.Figure(data = data, layout = layout)
figure.show()
```
---
| true |
code
| 0.569434 | null | null | null | null |
|
```
import tensorflow as tf
from tensorflow.keras import models
import numpy as np
import matplotlib.pyplot as plt
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
#creating a callback function that activates if the accuracy is greater than 60%
if(logs.get('accuracy')>0.99):
print("\nim maxed out baby, too goated!")
self.model.stop_training = True
path = "mnist.npz"
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data(path=path)
callbacks = myCallback()
x_train = x_train / 255.0
x_train = x_train.reshape(60000, 28, 28, 1)
x_test = x_test.reshape(10000, 28, 28, 1)
x_test = x_test / 255.0
model = tf.keras.models.Sequential([
#convolution part
# creates a convolution layer with 64 filters with 3 by 3 dimensions
# sets activation function to relu, with drops all negative values
# sets input shape to 28 by 28 array, same as before, 1 denotes that the image is gray-scale, only 1 color channel
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)),
# create a max pooling layer with a 2 by 2 pooling filter
# means that the largest pixel value with be chosen out of every 4 pixels
tf.keras.layers.MaxPooling2D(2, 2),
# insert another set of convolutions and pooling so that the network can learn another set of convolutions
# then pooling layer is added so that the images can get smaller again
# this reduces number of dense layers needed
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
#deep neural network part
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.summary() #generates summary of parameters so we can see images journey throughout the network
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
#the model is much slower now when compiling
#this is because there are 64 filters that are getting passed on each image multiple times, so the computation is much heavier
#but our accuracy is much better now, hitting 99.7% on the first epoch
model.fit(x_test, y_test, epochs=10, callbacks=[callbacks])
print(y_test[:100])
f, axarr = plt.subplots(3,4)
FIRST_IMAGE=0 #0th element is 7
SECOND_IMAGE=11 #7th element is 9
THIRD_IMAGE=26 #26th element is 7
CONVOLUTION_NUMBER = 1
layer_outputs = [layer.output for layer in model.layers]
activation_model = tf.keras.models.Model(inputs = model.input, outputs = layer_outputs)
#looking at effect that the convolution has on our model
for x in range(4):
f1 = activation_model.predict(x_test[FIRST_IMAGE].reshape(1, 28, 28, 1))[x]
axarr[0,x].imshow(f1[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
axarr[0,x].grid(False)
f2 = activation_model.predict(x_test[SECOND_IMAGE].reshape(1, 28, 28, 1))[x]
axarr[1,x].imshow(f2[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
axarr[1,x].grid(False)
f3 = activation_model.predict(x_test[THIRD_IMAGE].reshape(1, 28, 28, 1))[x]
axarr[2,x].imshow(f3[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
axarr[2,x].grid(False)
```
| true |
code
| 0.734962 | null | null | null | null |
|
## Importing Libraries & getting Data
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
data = pd.read_csv("dataset/winequalityN.csv")
data.head()
data.info()
data.describe()
data.columns
columns = ['type', 'fixed acidity', 'volatile acidity', 'citric acid','residual sugar', 'chlorides', 'free sulfur dioxide','total sulfur dioxide', 'density', 'pH', 'sulphates', 'alcohol','quality']
data['type']
sns.countplot(data=data ,x="quality")
```
## Handling Missing Values
```
sns.heatmap(data.isnull(), yticklabels=False, cmap="viridis", cbar=False)
data.isnull().values.sum()
# replacing missing values with mean
data = data.fillna(data.mean())
sns.heatmap(data.isnull(), yticklabels=False, cmap="viridis", cbar=False)
# as 'type' is categorical variable ,remove it from the list of our feature columns
labels = data.pop('type')
cat_columns = ['fixed acidity', 'volatile acidity', 'citric acid','residual sugar', 'chlorides', 'free sulfur dioxide','total sulfur dioxide', 'density', 'pH', 'sulphates', 'alcohol','quality']
data.head()
```
## Scaling & Encoding
```
from sklearn.preprocessing import MinMaxScaler , LabelEncoder
def scale_data(data):
scaler = MinMaxScaler(feature_range=(0,1))
X = np.array(data)
X = scaler.fit_transform(X)
return X , scaler
def encode_data(labels):
y = np.array(labels)
le = LabelEncoder()
y = le.fit_transform(y)
return y , le
# another way to encode
# labels.type = labels.type.apply(lambda x: 0 if x == "red" else 1)
X , scaler = scale_data(data)
print(X)
print(scaler.inverse_transform(X))
y , le = encode_data(labels)
print(y)
print(le.inverse_transform(y))
```
## EDA
```
plt.figure(figsize=(10,10))
sns.heatmap(data.corr() , annot=True)
plt.show()
```
### For Handling Outliers
```
def univariate(var):
sns.boxplot(data=data , y=var)
plt.show()
cat_columns
univariate('fixed acidity')
univariate('volatile acidity')
univariate('citric acid')
univariate('pH')
univariate('sulphates')
univariate('alcohol')
univariate('total sulfur dioxide')
univariate('chlorides')
univariate('residual sugar')
```
### Density and pH
```
sns.displot(data ,x="density" ,color='r',col="quality")
sns.displot(data, x="pH", color='g', col="quality")
```
## Bivariate Analysis
```
data['quality'].describe()
```
### Numerical variables vs Target variable
```
for i in cat_columns:
fig , ax = plt.subplots(1,3,figsize=(20,5))
plt.subplots_adjust(hspace=1)
sns.barplot(data=data , y=i ,x="quality" , ax=ax[0])
sns.lineplot(data=data, y=i, x="quality", ax=ax[1])
sns.violinplot(data=data, y=i, x="quality", ax=ax[2])
```
## Model building with Random Forest classifier
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42)
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier()
rfc.fit(X_train , y_train)
y_predicted = rfc.predict(X_test)
y_predicted[:15] , y_test[:15]
```
## Evaluation
```
from sklearn.metrics import accuracy_score ,confusion_matrix
print("Accuracy :" , (accuracy_score(y_predicted , y_test)))
sns.heatmap(confusion_matrix(y_predicted ,y_test),annot=True ,cmap='Purples' ,fmt='.4g')
```
| true |
code
| 0.535706 | null | null | null | null |
|
# Interpreting Neural Network Weights
Neural nets (especially deep neural nets) are some of the most powerful machine learning algorithms available. However, it can be difficult to understand (intuitively) how they work.
In the first part of this notebook, I highlight the connection between neural networks and template matching--a simple technique for classification that is popular in computer vision and signal processing. I find this observation is helpful for intution about how neural nets classify images--I hope you find this useful too!
In the second part of the notebook, I point out that for convolutional neural nets it can be helpful to think of weights as sliding filters (e.g. edge detecting filters) which in the early network layers detect low-level image features .
## Template Matching
[Template matching](https://en.wikipedia.org/wiki/Template_matching) is used in computer vision to compare images. It does this by treating images as vectors and computing their dot product: very similar images give a large dot product, and very disimilar images give a small (or negative) dot product. Why?
Mathematically, if you represent images as vectors, you can compute the difference between two images $I_1$ and $I_2$ like
$$|I_1 - I_2 |^2 = |I_1|^2 + |I_2|^2 - 2 \, I_1 \cdot I_2$$
Note that the dot product $I_1 \cdot I_2$ between two images is largest when the difference $|I_1 - I_2|$ between images is smallest, and vice versa.
For example, here's a template for each digit:
```
import matplotlib.pyplot as plt
import cv2
templates = []
for i in range(10):
img = cv2.imread("templates/{}.png".format(str(i)), cv2.IMREAD_GRAYSCALE)
if img is None:
raise Exception("Error: Failed to load image {}".format(i))
templates.append(img)
plt.subplot(2, 5, 1 + i)
plt.imshow(img, cmap=plt.get_cmap('gray'))
plt.show()
```
We can illustrate template matching by computing the dot products between digit 1 and every other digit. To make the results more robust, we compute the normalized dot product
$$ \frac{I_1 \cdot I_2}{|I_1| |I_2|}$$
(It's important to normalize the dot product--otherwise brighter images will give stronger matches than darker ones, and that would not make sense.)
```
img = templates[1]
for i in range(10):
template = templates[i]
print(" Normalized dot product between 1 and {} = {}".format(i,
cv2.matchTemplate(img, template, cv2.TM_CCORR_NORMED )[0][0] ))
```
We can see that 1 is more strongly correlated to 1 than any other digit. That's the principle behind template matching--it can measure image similarity.
Unfortunately, template matching is not robust to changes in image shapes, sizes, rotations, or partial occlusion. However, neural nets can be very robust to such image defects--that's why they are more powerful.
## Viewing Neural Network Weights as Templates
In a neuron inside neural net, inputs $x$ (a vector) are combined with weights $W$ (another vector) to generate an output. Pictorally
<img src="figs/neuron.png" style="width: 250px;">
The output of the neuron is computed by
$$ f( W \cdot x + b) $$
where $f$ is called the *activation function* and b is called the *bias term*. Most important for this discussion, the dot product $W \cdot x$ resembles template matching.
As we will show, in very simple neural nets (sometimes called *linear classifiers*) we can interpret the weights $W$ as templates--the neural net learns how to perform template matching!
We want to make a linear classifier to recognize digits 0-9. We implement a softmax architecture (shown below) with 10 outputs. For example, if digit 7 is recognized, neuron 7 will have an output close to 1 and the remaining neurons will have outputs close to 0. (FYI, this means we will have to one-hot encode the labels before training.)
The input (the image) is $x$, which we draw as a flattened (1d) vector. There are 10 weight vectors $W_0$ - $W_9$ (one for each neuron).
<img src="figs/nnet.png" style="width: 400px;">
We write the $i^{\mathrm th}$ output as
$$ \mathrm{output}_i = f( W_{i} \cdot x + b_i)$$
As we said, we expect each weight vector $W_i$ learned during training will be a template for digit $i$.
Let's train the neural net on the MNIST data set (a set of 70,000 images of hand-written digits 0-9.) We'll use Keras to implement and train the neural net.
```
#Developed with Keras 2.0.2 and the tensorflow backend
#Load the MNIST data set of handwritten digits 0-9:
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
(x_train, y_train), (x_test, y_test) = mnist.load_data()
#Invert the image colors, convert to float, and normalize values:
x_train = (255 - x_train).astype('float')/255.0
x_test = (255 - x_test).astype('float')/255.0
# plot first 5 images
for i in range(5):
plt.subplot(1, 5, 1+i)
plt.imshow(x_train[i], cmap=plt.get_cmap('gray'))
plt.show()
#Let's flatten the images to 1-d vectors for processing
image_shape = (28, 28)
num_pixels = 28*28
x_train = x_train.reshape(x_train.shape[0], num_pixels)
x_test = x_test.reshape(x_test.shape[0], num_pixels)
#Now let's one-hot encode the target variable before training
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
```
We can see that after 1-hot encoding, each digit label becomes a 10-d vector. For example, for the digit 1, there is a 1 in position 1 and 0's in all other positions of the 10-d vector:
```
y_train[3]
```
Now let us create the neural net and train it
```
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import np_utils
from keras.utils import plot_model
# fix random seed for reproducibility
seed = 123
np.random.seed(seed)
# Define the model:
def linear_classifier_model():
"""single layer, 10 output classes"""
# create model
model = Sequential()
# one layer
model.add(Dense(num_classes, kernel_initializer='normal',
activation='softmax', input_shape=(num_pixels,)))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
#Instantiate model
model = linear_classifier_model()
#Train model
model.fit(x_train, y_train, validation_data=(x_test, y_test),
epochs=10, batch_size=200, verbose=2)
# Final evaluation of the model
scores = model.evaluate(x_test, y_test, verbose=0)
print("Baseline Error: %.2f%%" % (100-scores[1]*100))
```
Keras lets us easily visualize the model to check that it has the correct architecture
```
plot_model(model, to_file='figs/model.png', show_shapes=True)
```
<img src="figs/model.png">
Finally, let us grab the weights from layer dense_1, unflatten the shape, and graph them:
```
layer_dense_1 = model.get_layer('dense_1')
weights1 = layer_dense_1.get_weights()[0]
bias1 = layer_dense_1.get_weights()[1]
#Cast shape to 2d
weights1 = weights1.reshape(28, 28, 10)
#lot the weights for the first 4 digits
for i in range(4):
plt.subplot(1, 4, 1 + i)
plt.imshow(weights1[:, :, i], cmap=plt.get_cmap('gray'))
plt.show()
```
We can see that indeed the learned weights resember digits 0, 1, 2, 3, ... just as we expected of template matching.
For further details, take a look at [http://cs231n.github.io/linear-classify/](http://cs231n.github.io/linear-classify/)
## Filters in Convolutional Neural Nets
In convolutional neural networks, it is common to use small (3x3 or 5x5) sliding convolutional layers instead of large, fully-connected layers. In that case, it may be more helpful to think of the weights as sliding filters to detect low-level features such as edges, textures, and blobs. Indeed, the learned weights often resemble standard image processing filters. Let us try to see this.
First, let us reshape (unflatten) the data so the images are again rectangular:
```
from keras import backend as K
K.set_image_data_format('channels_last') #specify image format
img_shape = (28, 28, 1)
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
```
Now let us define the neural net:
```
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Convolution2D
from keras.layers.convolutional import MaxPooling2D
def covnet_model():
"""Convolutional neural net"""
model = Sequential()
model.add(Convolution2D(32, kernel_size = 7, strides=1, padding='valid',
input_shape=(28, 28, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
#flatten 3d tensors into 1d vector.
model.add(Flatten())
#Add 128 neuron feed forward hidden layer
model.add(Dense(128, activation='relu'))
#Add output, 10 neurons for 10 target classes, activation is softmax so outputs are
#probabilities
model.add(Dense(num_classes, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model_covnet = covnet_model()
```
Let us plot the network architecture
```
plot_model(model_covnet, to_file='figs/model2.png', show_shapes=True)
```
<img src="figs/model2.png" style="width: 350px;">
Now let us train the network
```
model_covnet.fit(x_train, y_train, validation_data=(x_test, y_test),
epochs=10, batch_size=200, verbose=2)
# Final evaluation of the model
scores = model_covnet.evaluate(x_test, y_test, verbose=0)
print("Baseline Error: %.2f%%" % (100-scores[1]*100))
```
Let us now plot 16 convolutional weights (16 filters) learned in the first convolutional layer:
```
layer_conv2d_4 = model_covnet.get_layer('conv2d_1')
weights2 = layer_conv2d_4.get_weights()[0]
bias2 = layer_conv2d_4.get_weights()[1]
#Cast shape to 2d
weights2 = weights2.reshape(7, 7, 32)
#Now plot the weights for the first 16 filters
plt.figure(figsize=(7,5))
for i in range(16):
plt.subplot(4, 4, 1 + i)
plt.imshow(weights2[:, :, i], cmap=plt.get_cmap('gray'))
plt.show()
```
For comparison, let's plot Sobel filters which are used in computer vision to detect horizontal and vertical edges. We see that the neural net weights look similar to Sobel filters.
```
sobelx = np.array([[1, 2, 0, -2, -1],
[4, 8, 0, -8, -4],
[6, 12, 0, -12, -6],
[4, 8, 0, -8, -4],
[1, 2, 0, -2, -1]]).astype('float32')/12.0
sobely = sobelx.transpose()
plt.figure(figsize=(2, 2))
plt.subplot(1, 2, 1)
plt.imshow(sobelx, cmap=plt.get_cmap('gray'))
plt.subplot(1, 2, 2)
plt.imshow(sobely, cmap=plt.get_cmap('gray'))
plt.show()
```
| true |
code
| 0.657181 | null | null | null | null |
|
# Lazy Mode and Logging
So far, we have seen Ibis in interactive mode. Interactive mode (also known as eager mode) makes Ibis return the
results of an operation immediately.
In most cases, instead of using interactive mode, it makes more sense to use the default lazy mode.
In lazy mode, Ibis won't be executing the operations automatically, but instead, will generate an
expression to be executed at a later time.
Let's see this in practice, starting with the same example as in previous tutorials - the geography database.
```
!curl -LsS -o $TEMPDIR/geography.db 'https://storage.googleapis.com/ibis-tutorial-data/geography.db'
import os
import tempfile
import ibis
connection = ibis.sqlite.connect(os.path.join(tempfile.gettempdir(), 'geography.db'))
countries = connection.table('countries')
```
In previous tutorials, we set interactive mode to `True`, and we obtained the result
of every operation.
```
ibis.options.interactive = True
countries['name', 'continent', 'population'].limit(3)
```
But now let's see what happens if we leave the `interactive` option to `False` (the default),
and we operate in lazy mode.
```
ibis.options.interactive = False
countries['name', 'continent', 'population'].limit(3)
```
What we find is the graph of the expressions that would return the desired result instead of the result itself.
Let's analyze the expressions in the graph:
- We query the `countries` table (all rows and all columns)
- We select the `name`, `continent` and `population` columns
- We limit the results to only the first `3` rows
Now consider that the data is in a database, possibly in a different host than the one executing Ibis.
Also consider that the results returned to the user need to be moved to the memory of the host executing Ibis.
When using interactive (or eager) mode, if we perform one operation at a time, we would do:
- We would move all the rows and columns from the backend (database, big data system, etc.) into memory
- Once in memory, we would discard all the columns but `name`, `continent` and `population`
- After that, we would discard all the rows, except the first `3`
This is not very efficient. If you consider that the table can have millions of rows, backed by a
big data system like Spark or Impala, this may not even be possible (not enough memory to load all the data).
The solution is to use lazy mode. In lazy mode, instead of obtaining the results after each operation,
we build an expression (a graph) of all the operations that need to be done. After all the operations
are recorded, the graph is sent to the backend which will perform the operation in an efficient way - only
moving to memory the required data.
You can think of this as writing a shopping list and requesting someone to go to the supermarket and buy
everything you need once the list is complete. As opposed as getting someone to bring all the products of
the supermarket to your home and then return everything you don't want.
Let's continue with our example, save the expression in a variable `countries_expression`, and check its type.
```
countries_expression = countries['name', 'continent', 'population'].limit(3)
type(countries_expression)
```
The type is an Ibis `TableExpr`, since the result is a table (in a broad way, you can consider it a dataframe).
Now we have our query instructions (our expression, fetching only 3 columns and 3 rows) in the variable `countries_expression`.
At this point, nothing has been requested from the database. We have defined what we want to extract, but we didn't
request it from the database yet. We can continue building our expression if we haven't finished yet. Or once we
are done, we can simply request it from the database using the method `.execute()`.
```
countries_expression.execute()
```
We can build other types of expressions, for example, one that instead of returning a table,
returns a columns.
```
population_in_millions = (countries['population'] / 1_000_000).name('population_in_millions')
population_in_millions
```
If we check its type, we can see how it is a `FloatingColumn` expression.
```
type(population_in_millions)
```
We can combine the previous expression to be a column of a table expression.
```
countries['name', 'continent', population_in_millions].limit(3)
```
Since we are in lazy mode (not interactive), those expressions don't request any data from the database
unless explicitly requested with `.execute()`.
## Logging queries
For SQL backends (and for others when it makes sense), the query sent to the database can be logged.
This can be done by setting the `verbose` option to `True`.
```
ibis.options.verbose = True
countries['name', 'continent', population_in_millions].limit(3).execute()
```
By default, the logging is done to the terminal, but we can process the query with a custom function.
This allows us to save executed queries to a file, save to a database, send them to a web service, etc.
For example, to save queries to a file, we can write a custom function that given a query, saves it to a
log file.
```
import os
import datetime
import tempfile
from pathlib import Path
def log_query_to_file(query: str) -> None:
"""
Log queries to `data/tutorial_queries.log`.
Each file is a query. Line breaks in the query are
represented with the string '\n'.
A timestamp of when the query is executed is added.
"""
dirname = Path(tempfile.gettempdir())
fname = dirname / 'tutorial_queries.log'
query_in_a_single_line = query.replace('\n', r'\n')
with fname.open(mode='a') as f:
f.write(f'{query_in_a_single_line}\n')
```
Then we can set the `verbose_log` option to the custom function, execute one query,
wait one second, and execute another query.
```
import time
ibis.options.verbose_log = log_query_to_file
countries.execute()
time.sleep(1.)
countries['name', 'continent', population_in_millions].limit(3).execute()
```
This has created a log file in `data/tutorial_queries.log` where the executed queries have been logged.
```
!cat -n data/tutorial_queries.log
```
| true |
code
| 0.438004 | null | null | null | null |
|
# Plotting
In this notebook, I'll develop a function to plot subjects and their labels.
```
from astropy.coordinates import SkyCoord
import astropy.io.fits
import astropy.wcs
import h5py
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import numpy
import skimage.exposure
import sklearn.neighbors
import sklearn.pipeline
import sklearn.preprocessing
CROWDASTRO_H5_PATH = 'data/crowdastro.h5'
PATCH_DIAMETER = 200
FITS_CONVENTION = 1
ARCMIN = 1 / 60
IMAGE_SIZE = 200 * 200
NORRIS_DAT_PATH = 'data/norris_2006_atlas_classifications_ra_dec_only.dat'
TRAINING_H5_PATH = 'data/training.h5'
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
N_ASTRO = 5 if f_h5.attrs['ir_survey'] == 'wise' else 6
%matplotlib inline
```
## Displaying radio images
Radio images look pretty terrible, so let's run a filter over them to make them a little easier to see. I'll use skimage and try a few different ones.
Let's get an example and look at the basic output.
```
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
image = f_h5['/atlas/cdfs/numeric'][250, 2 : 2 + PATCH_DIAMETER ** 2].reshape((PATCH_DIAMETER, PATCH_DIAMETER))
plt.imshow(image, cmap='gray')
plt.show()
```
It's hard to make out any features. Now, let's run some filters on it.
```
fig = plt.figure(figsize=(18, 27))
def subplot_imshow_hist(i, fig, im, title):
ax = fig.add_subplot(6, 3, i)
ax.imshow(im, cmap='gray')
ax.set_title(title)
ax.axis('off')
ax = fig.add_subplot(6, 3, i + 3)
ax.hist(im.ravel(), bins=256, histtype='step', color='black')
ax.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
subplot_imshow_hist(1, fig, image, 'Default')
subplot_imshow_hist(2, fig, skimage.exposure.equalize_adapthist(image, clip_limit=0.01), 'Adaptive equalisation')
subplot_imshow_hist(3, fig, skimage.exposure.equalize_hist(image), 'Histogram equalisation')
subplot_imshow_hist(7, fig, skimage.exposure.rescale_intensity(image, in_range=tuple(numpy.percentile(image, (0.75, 99.25)))),
'Constant stretching 0.75 - 99.25')
subplot_imshow_hist(8, fig, skimage.exposure.rescale_intensity(image, in_range=tuple(numpy.percentile(image, (1, 99)))),
'Constant stretching 1 - 99')
subplot_imshow_hist(9, fig, skimage.exposure.rescale_intensity(image, in_range=tuple(numpy.percentile(image, (2, 98)))),
'Constant stretching 2 - 98')
subplot_imshow_hist(13, fig, numpy.sqrt(image - image.min()), 'Square root')
subplot_imshow_hist(14, fig, numpy.log(image - image.min() + 1e-5), 'Logarithm + 1e-5')
subplot_imshow_hist(15, fig, numpy.log(image + 1), 'Logarithm + 1')
```
Square root looks good, so let's blitz that over some random images and see how it looks.
```
fig = plt.figure(figsize=(18, 25))
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
indices = numpy.arange(f_h5['/atlas/cdfs/numeric'].shape[0])
numpy.random.seed(10000)
numpy.random.shuffle(indices)
for j, i in enumerate(indices[:3]):
image = f_h5['/atlas/cdfs/numeric'][i, 2 : 2 + PATCH_DIAMETER ** 2].reshape((PATCH_DIAMETER, PATCH_DIAMETER))
subplot_imshow_hist(j + 1, fig, numpy.sqrt(image - image.min()), str(i))
```
## Plotting IR objects
This is an extremely unpleasant operation: We have to find the pixel coordinates of each IR location, which are all specified in RA/DEC.
```
from crowdastro.config import config
with astropy.io.fits.open(config['data_sources']['atlas_image'],
ignore_blank=True) as atlas_image:
wcs = astropy.wcs.WCS(atlas_image[0].header).dropaxis(3).dropaxis(2)
def ra_dec_to_pixels(subject_coords, coords):
offset, = wcs.all_world2pix([subject_coords], FITS_CONVENTION)
# The coords are of the middle of the subject.
coords = wcs.all_world2pix(coords, FITS_CONVENTION)
coords -= offset
coords[:, 0] /= config['surveys']['atlas']['mosaic_scale_x'] * 424 / 200
coords[:, 1] /= config['surveys']['atlas']['mosaic_scale_y'] * 424 / 200
coords += [40, 40]
return coords
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
i = 296
image = f_h5['/atlas/cdfs/numeric'][i, 2 : 2 + PATCH_DIAMETER ** 2].reshape(
(PATCH_DIAMETER, PATCH_DIAMETER))[60:140, 60:140]
radio_coords = f_h5['/atlas/cdfs/numeric'][i, :2]
nearby = f_h5['/atlas/cdfs/numeric'][i, 2 + PATCH_DIAMETER ** 2:] < ARCMIN
ir_coords = f_h5['/swire/cdfs/numeric'][nearby, :2]
ir_coords = ra_dec_to_pixels(radio_coords, ir_coords)
plt.imshow(numpy.sqrt(image - image.min()), cmap='gray')
plt.scatter(ir_coords[:, 0], ir_coords[:, 1])
```
## Displaying classifications
The simplest thing we can do is to just highlight the host galaxies, so let's load up the Norris et al. classifications and have a look.
```
# Load labels.
with h5py.File(TRAINING_H5_PATH, 'r') as training_h5:
crowdsourced_labels = training_h5['labels'].value
with h5py.File(CROWDASTRO_H5_PATH, 'r') as crowdastro_h5:
ir_names = crowdastro_h5['/swire/cdfs/string'].value
ir_positions = crowdastro_h5['/swire/cdfs/numeric'].value[:, :2]
ir_tree = sklearn.neighbors.KDTree(ir_positions)
with open(NORRIS_DAT_PATH, 'r') as norris_dat:
norris_coords = [r.strip().split('|') for r in norris_dat]
norris_labels = numpy.zeros((len(ir_positions)))
for ra, dec in norris_coords:
# Find a neighbour.
skycoord = SkyCoord(ra=ra, dec=dec, unit=('hourangle', 'deg'))
ra = skycoord.ra.degree
dec = skycoord.dec.degree
((dist,),), ((ir,),) = ir_tree.query([(ra, dec)])
if dist < 0.1:
norris_labels[ir] = 1
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
i = 250
image = f_h5['/atlas/cdfs/numeric'][i, 2 : 2 + PATCH_DIAMETER ** 2].reshape(
(PATCH_DIAMETER, PATCH_DIAMETER))[60:140, 60:140]
radio_coords = f_h5['/atlas/cdfs/numeric'][i, :2]
nearby = f_h5['/atlas/cdfs/numeric'][i, 2 + PATCH_DIAMETER ** 2:] < ARCMIN
ir_coords = f_h5['/swire/cdfs/numeric'][nearby, :2]
ir_coords = ra_dec_to_pixels(radio_coords, ir_coords)
plt.imshow(numpy.sqrt(image - image.min()), cmap='gray')
plt.scatter(ir_coords[:, 0], ir_coords[:, 1])
labels = norris_labels[nearby].astype(bool)
nearby_hosts = ir_coords[labels]
plt.scatter(nearby_hosts[:, 0], nearby_hosts[:, 1], c='red')
```
What about displaying classifications from my classifier?
```
from crowdastro.classifier import RGZClassifier
from sklearn.ensemble import RandomForestClassifier
with h5py.File(TRAINING_H5_PATH, 'r') as f_h5:
classifier = RGZClassifier(f_h5['features'].value, N_ASTRO)
classifier.train(numpy.arange(f_h5['features'].shape[0]), norris_labels)
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
i = 250
image = f_h5['/atlas/cdfs/numeric'][i, 2 : 2 + PATCH_DIAMETER ** 2].reshape(
(PATCH_DIAMETER, PATCH_DIAMETER))[60:140, 60:140]
radio_coords = f_h5['/atlas/cdfs/numeric'][i, :2]
nearby = f_h5['/atlas/cdfs/numeric'][i, 2 + PATCH_DIAMETER ** 2:] < ARCMIN
ir_coords = f_h5['/swire/cdfs/numeric'][nearby, :2]
ir_coords = ra_dec_to_pixels(radio_coords, ir_coords)
vec = f_h5['/atlas/cdfs/numeric'][i, :]
probs = classifier.predict_probabilities(vec)[nearby]
nearby_norris = ir_coords[norris_labels[nearby].astype('bool')]
nearby_rgz = ir_coords[crowdsourced_labels[nearby].astype('bool')]
plt.figure(figsize=(15, 15))
base_size = 200
plt.imshow(numpy.sqrt(image - image.min()), cmap='gray')
plt.scatter(ir_coords[:, 0], ir_coords[:, 1], s=probs * base_size, c=probs, marker='o', cmap='cool')
plt.scatter(nearby_norris[:, 0], nearby_norris[:, 1], s=base_size, c='green', marker='*')
plt.axis('off')
# plt.scatter(nearby_rgz[:, 0], nearby_rgz[:, 1], s=50, c='cyan', marker='x', alpha=0.5)
plt.xlim((0, 80))
plt.ylim((0, 80))
```
## Plotting a committee
If we have multiple classifiers, how should we output their predictions?
```
with h5py.File(TRAINING_H5_PATH, 'r') as f_h5:
classifiers = [RGZClassifier(f_h5['features'], N_ASTRO) for _ in range(10)]
for classifier in classifiers:
subset = numpy.arange(f_h5['features'].shape[0])
numpy.random.shuffle(subset)
subset = subset[:len(subset) // 50]
subset = sorted(subset)
classifier.train(list(subset), norris_labels[subset])
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
i = 250
image = f_h5['/atlas/cdfs/numeric'][i, 2 : 2 + PATCH_DIAMETER ** 2].reshape(
(PATCH_DIAMETER, PATCH_DIAMETER))[60:140, 60:140]
radio_coords = f_h5['/atlas/cdfs/numeric'][i, :2]
nearby = f_h5['/atlas/cdfs/numeric'][i, 2 + PATCH_DIAMETER ** 2:] < ARCMIN
ir_coords = f_h5['/swire/cdfs/numeric'][nearby, :2]
ir_coords = ra_dec_to_pixels(radio_coords, ir_coords)
vec = f_h5['/atlas/cdfs/numeric'][i, :]
probs = [classifier.predict_probabilities(vec)[nearby] for classifier in classifiers]
# Set all but the top n predictions to zero.
n = 1
for probs_ in probs:
top_n = sorted(probs_, reverse=True)[:n]
for j, prob in enumerate(probs_):
if prob not in top_n:
probs_[j] = 0
plt.figure(figsize=(10, 10))
base_size = 200
plt.imshow(numpy.sqrt(image - image.min()), cmap='gray')
colours = cm.rainbow(numpy.linspace(0, 1, 10))
for colour, probs_ in zip(colours, probs):
plt.scatter(ir_coords[:, 0] + numpy.random.normal(size=ir_coords.shape[0], scale=0.5),
ir_coords[:, 1] + numpy.random.normal(size=ir_coords.shape[0], scale=0.5),
s=probs_ * base_size, marker='x', c=colour, alpha=1)
plt.axis('off')
plt.xlim((0, 80))
plt.ylim((0, 80))
```
These classifiers have really low diversity because of the way I divided up the data, but this should work fine.
```
def plot_points_on_background(points, background, noise=False, base_size=200):
plt.imshow(background, cmap='gray')
colours = cm.rainbow(numpy.linspace(0, 1, len(points)))
for colour, (x, y) in zip(colours, points):
if noise:
x += numpy.random.normal(scale=0.5)
y += numpy.random.normal(scale=0.5)
plt.scatter(x, y, marker='o', c=colour, s=base_size)
plt.axis('off')
plt.xlim((0, background.shape[0]))
plt.ylim((0, background.shape[1]))
def plot_classifications(atlas_vector, ir_matrix, labels, base_size=200):
image = atlas_vector[2 : 2 + PATCH_DIAMETER ** 2].reshape((PATCH_DIAMETER, PATCH_DIAMETER)
)[60:140, 60:140]
radio_coords = atlas_vector[:2]
nearby = atlas_vector[2 + PATCH_DIAMETER ** 2:] < ARCMIN
labels = labels[nearby]
ir_coords = ir_matrix[nearby, :2][labels.astype(bool)]
ir_coords = ra_dec_to_pixels(radio_coords, ir_coords)
plot_points_on_background(ir_coords, image, base_size=base_size)
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
i = 250
atlas_vector = f_h5['/atlas/cdfs/numeric'][i, :]
ir_coords = f_h5['/swire/cdfs/numeric']
plot_classifications(atlas_vector, ir_coords, norris_labels)
```
## Bringing it all together
We want to plot classifications, RGZ labels, and Norris labels in the same row.
```
def plot_classifications_row(atlas_vector, ir_matrix, classifier_labels, rgz_labels, norris_labels, base_size=200):
plt.subplot(1, 3, 1)
plt.title('Classifier')
plot_classifications(atlas_vector, ir_matrix, classifier_labels, base_size=base_size)
plt.subplot(1, 3, 2)
plt.title('RGZ')
plot_classifications(atlas_vector, ir_matrix, rgz_labels, base_size=base_size)
plt.subplot(1, 3, 3)
plt.title('Norris')
plot_classifications(atlas_vector, ir_matrix, norris_labels, base_size=base_size)
with h5py.File(TRAINING_H5_PATH, 'r') as f_h5:
classifier = RGZClassifier(f_h5['features'].value, N_ASTRO)
classifier.train(numpy.arange(f_h5['features'].shape[0]), norris_labels)
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
i = 250
vec = f_h5['/atlas/cdfs/numeric'][i, :]
mat = f_h5['/swire/cdfs/numeric']
probs = classifier.predict_probabilities(vec)
labels = numpy.zeros(probs.shape)
labels[probs.argmax()] = 1
plt.figure(figsize=(20, 10))
plot_classifications_row(vec, mat, labels, crowdsourced_labels, norris_labels, base_size=200)
```
| true |
code
| 0.583263 | null | null | null | null |
|
```
import numpy as np
import matplotlib.pyplot as plt
from xentropy import dihedrals
from astropy import units as au
```
# single Gaussian distro
## create artificial data
```
data= np.random.randn(100000)*30
```
## perform kde
```
dih_ent = dihedrals.dihedralEntropy(data=data,verbose=True)
dih_ent.calculate()
```
## plot normalized histogram and kde
```
f,axs = plt.subplots(ncols=2,figsize=(12,6))
axs[0].hist(data,180,density=True, label="histogram")
xs, ys = dih_ent.pdf_x_deg,dih_ent.pdf_deg
axs[0].plot(xs,ys, lw=5,alpha=.7, label="XEntropy KDE\nS = {:.3f} J/(mol*K)".format(dih_ent.entropy))
axs[0].set(xlabel="artif. dihedrals / degree", ylabel="prob. density / degree$^{-1}$")
axs[1].hist(data/180*np.pi,180,density=True, label="histogram")
xs, ys = dih_ent.pdf_x,dih_ent.pdf
axs[1].plot(xs,ys, lw=5,alpha=.7, label="XEntropy KDE\nS = {:.3f} J/(mol*K)".format(dih_ent.entropy))
axs[1].set(xlabel="artif. dihedrals / radian", ylabel="prob. density / radian$^{-1}$")
for ax in axs:
ax.legend(loc="upper right")
```
# Gaussians of variable width
## create artificial data
```
data= [np.random.randn(100000)*20,
np.random.randn(100000)*30,
np.random.randn(100000)*40,
np.random.randn(100000)*50]
```
## perform kde
```
dih_ent = dihedrals.dihedralEntropy(data=data,verbose=True,input_unit="degree")
dih_ent.calculate()
```
## plot normalized histogram and kde
```
f,axs = plt.subplots(2,2,figsize=(12,12),sharex=True,sharey=True)
for ax,dat,xs,ys,S in zip(axs.flatten(),data,dih_ent.pdf_x_deg,dih_ent.pdf_deg, dih_ent.entropy):
ax.hist(dat,180,density=True, label="histogram")
ax.plot(xs,ys, lw=5,alpha=.7, label="XEntropy KDE\nS = {:.3f} J/(mol*K)".format(S))
ax.set(xlabel="artificial dihedrals", ylabel="probability density")
ax.legend()
f.tight_layout()
```
# binodal distributions
## create artificial data
```
def binodal_data(n_samples=1001,w1=10,w2=10):
n1 = n_samples//2
n2 = n_samples-n1
p1 = np.random.randn(n1)*w1-90
p2 = np.random.randn(n2)*w2+90
return np.concatenate([p1,p2])
data= [binodal_data(100000,5,25),
binodal_data(100000,15,25),
binodal_data(100000,25,25),
binodal_data(100000,35,25)]
```
## perform kde
```
dih_ent = dihedrals.dihedralEntropy(data=data, verbose=False, input_unit="degree")
dih_ent.calculate()
```
## plot normalized histogram and kde
```
f,axs = plt.subplots(2,2,figsize=(12,12),sharex=True,sharey=True)
for ax,dat,xs,ys,S in zip(axs.flatten(), data,dih_ent.pdf_x_deg, dih_ent.pdf_deg, dih_ent.entropy):
ax.hist(dat,180,density=True, label="histogram")
ax.plot(xs,ys, lw=5,alpha=.7, label="XEntropy KDE\nS = {:.3f} J/(mol*K)".format(S))
ax.set(xlabel="artificial dihedrals", ylabel="probability density")
ax.legend()
f.tight_layout()
```
# shifted binodal distributions
## create artificial data
```
def binodal_data(n_samples=1001,w1=10,w2=10):
n1 = n_samples//2
n2 = n_samples-n1
p1 = np.random.randn(n1)*w1
p2 = np.random.randn(n2)*w2+180
return np.divmod(np.concatenate([p1,p2]),360)[1]-180
data= [binodal_data(100000,5,25),
binodal_data(100000,15,25),
binodal_data(100000,25,25),
binodal_data(100000,35,25)]
```
## perform kde
```
dih_ent = dihedrals.dihedralEntropy(data=data, verbose=False, input_unit="degree")
dih_ent.calculate()
```
## plot normalized histogram and kde
```
f,axs = plt.subplots(2,2,figsize=(12,12),sharex=True,sharey=True)
for ax,dat,xs,ys,S in zip(axs.flatten(),data,dih_ent.pdf_x_deg,dih_ent.pdf_deg, dih_ent.entropy):
ax.hist(dat,180,density=True, label="histogram")
ax.plot(xs,ys, lw=5,alpha=.7, label="XEntropy KDE\nS = {:.3f} J/(mol*K)".format(S))
ax.set(xlabel="artificial dihedrals", ylabel="probability density")
ax.legend()
f.tight_layout()
```
# trinodal distributions (butane-like)
## create artificial data
```
def trinodal_data(n_samples=1001,w1=20,w2=20,w3=20):
n1 = int(n_samples*2/5)
n2 = int((n_samples-n1)/2)
n3 = n_samples-n1-n2
p1 = np.random.randn(n1)*w1
p2 = np.random.randn(n2)*w2-120
p3 = np.random.randn(n3)*w3+120
return np.concatenate([p1,p2,p3])
data= trinodal_data(100000)
```
## perform kde
```
dih_ent = dihedrals.dihedralEntropy(data=data, verbose=False, input_unit="degree")
dih_ent.calculate()
```
## plot normalized histogram and kde
```
f,axs = plt.subplots()
xs, ys = dih_ent.pdf_x_deg,dih_ent.pdf_deg
axs.hist(data,180,density=True, label="histogram")
axs.plot(xs,ys, lw=5,alpha=.7, label="XEntropy KDE\nS = {:.3f} J/(mol*K)".format(dih_ent.entropy))
axs.set(xlabel="artificial dihedrals", ylabel="probability density")
axs.legend()
```
| true |
code
| 0.715275 | null | null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.