path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
notebooks/data_for_presentation_preparation.ipynb | ###Markdown
Data preparation based on pretrained modelsEach section should work individually,as long as all files used in it are already available Tweets Step 1 - drop unnecessary columns
###Code
tweets = pd.read_pickle('../datasets/tweets.pkl.gz')
filtered = tweets[['username', 'id', 'link', 'tweet']]
filtered.to_pickle('../datasets/for_presentation/tweets_raw.pkl.gz')
###Output
_____no_output_____
###Markdown
Step 2 - join with users/parties/coalitions
###Code
filtered = pd.read_pickle('../datasets/for_presentation/tweets_raw.pkl.gz')
users = pd.read_csv('../datasets/accounts_processed.csv', index_col=0)
users = users[['username', 'party', 'coalition', 'pozycja']]
users = users.rename(columns={'pozycja': 'role'})
users['username'] = users['username'].apply(str.lower)
tweets_users = filtered.merge(users, on='username')
tweets_users.to_pickle('../datasets/for_presentation/tweets_with_party_coalition.pkl.gz')
###Output
_____no_output_____
###Markdown
Stage 3 - calculate sentiment
###Code
sentiment_model = fasttext.load_model('../trained_models/sentiment_model.bin')
clean_tweets = pd.read_pickle('../datasets/tweets_cleaned_emojied2text.pkl.gz')
tweets_users = pd.read_pickle('../datasets/for_presentation/tweets_with_party_coalition.pkl.gz')
clean_tweets['tweet'] = clean_tweets['tweet'].apply(str.lower)
clean_tweets = clean_tweets[['id', 'tweet']]
just_tweets = clean_tweets['tweet'].tolist()
%%time
predictions = sentiment_model.predict(just_tweets)[0]
predictions = [label for sublist in predictions for label in sublist]
clean_tweets['sentiment'] = predictions
clean_tweets = clean_tweets[['id', 'sentiment']]
tweets_users_sentiment = tweets_users.merge(clean_tweets, on='id', how='right')
tweets_users_sentiment.replace(to_replace={
'__label__positive': 'positive',
'__label__negative': 'negative',
'__label__ambiguous': 'ambiguous',
'__label__neutral': 'neutral'
}, inplace=True)
tweets_users_sentiment['sentiment'].value_counts()
tweets_users_sentiment.to_pickle('../datasets/for_presentation/tweets_with_party_coalition_sentiment.pkl.gz')
###Output
_____no_output_____
###Markdown
Stage 4 - calculate topics
###Code
tweets_users_sentiment = pd.read_pickle('../datasets/for_presentation/tweets_with_party_coalition_sentiment.pkl.gz')
clean_tweets = pd.read_pickle('../datasets/tweets_cleaned_lemma_stopwords.pkl.gz')
with open('../trained_models/vectorizer_10.pkl.gz', 'rb') as vec_file:
vectorizer: CountVectorizer = pkl.load(vec_file)
with open('../trained_models/lda_10.pkl.gz', 'rb') as lda_file:
lda: LatentDirichletAllocation = pkl.load(lda_file)
tweets_texts = clean_tweets.tweet.tolist()
counts = vectorizer.transform(tweets_texts)
probas = lda.transform(counts)
labels = np.argmax(probas, axis=1)
prob_values = np.max(probas, axis=1)
clean_tweets['topic'] = labels
clean_tweets['topic_proba'] = prob_values
clean_tweets = clean_tweets[['id', 'topic', 'topic_proba']]
tweets_users_sentiment_topic = tweets_users_sentiment.merge(clean_tweets, on='id')
tweets_users_sentiment_topic.to_pickle('../datasets/for_presentation/tweets_with_party_coalition_sentiment_topic.pkl.gz')
###Output
_____no_output_____
###Markdown
Topics Words per topic
###Code
with open('../trained_models/vectorizer_10.pkl.gz', 'rb') as vec_file:
vectorizer: CountVectorizer = pkl.load(vec_file)
with open('../trained_models/lda_10.pkl.gz', 'rb') as lda_file:
lda: LatentDirichletAllocation = pkl.load(lda_file)
words_in_topics = {}
for topic_num, topic in enumerate(lda.components_):
frequencies = [
{
'text': name,
'value': freq
}
for name, freq in zip(vectorizer.get_feature_names(), topic)
]
frequencies.sort(key=lambda x: x['value'], reverse=True)
words_in_topics[topic_num] = frequencies
with open('../datasets/for_presentation/words_per_topic.pkl.gz', 'wb') as f:
pkl.dump(words_in_topics, f)
###Output
_____no_output_____
###Markdown
Extra - visualisation of topics
###Code
for i in range(len(lda.components_)):
topic = lda.components_[i]
frequencies = {name: freq for name, freq in zip(vectorizer.get_feature_names(), topic)}
wordcloud = WordCloud(
width=1920, height=1080, background_color="white"
).generate_from_frequencies(frequencies=frequencies)
fig = px.imshow(wordcloud, title=f"Topic {i}")
fig.show()
###Output
_____no_output_____
###Markdown
Topics per user/party/coalition
###Code
clean_tweets = pd.read_pickle('../datasets/tweets_cleaned_lemma_stopwords.pkl.gz')
with open('../trained_models/vectorizer_10.pkl.gz', 'rb') as vec_file:
vectorizer: CountVectorizer = pkl.load(vec_file)
with open('../trained_models/lda_10.pkl.gz', 'rb') as lda_file:
lda: LatentDirichletAllocation = pkl.load(lda_file)
topics_count = len(lda.components_)
tweets_texts = clean_tweets.tweet.tolist()
counts = vectorizer.transform(tweets_texts)
probas = lda.transform(counts)
tweets_users_sentiment_topic = pd.read_pickle('../datasets/for_presentation/tweets_with_party_coalition_sentiment_topic.pkl.gz')
a = clean_tweets.merge(tweets_users_sentiment_topic, on='id')
a.rename(columns={'username_x': 'username'}, inplace=True)
a = a.reset_index()
def get_topic_distribution_for_column(column_value, column_name):
indices = np.array(a[a[column_name]==column_value].index.tolist())
topics = probas[indices]
values = np.sum(topics, axis=0)
distribution = values / np.sum(values)
return distribution
topics_distributions = {
'per_user': {},
'per_party': {},
'per_coalition': {}
}
unique_usernames = a.username.unique()
unique_parties = a.party.unique()
unique_coalitions = a.coalition.unique()
for username in tqdm(unique_usernames):
topics_distributions['per_user'][username] = [
{
'topic': t,
'part': p
}
for t, p
in zip(range(topics_count), get_topic_distribution_for_column(
column_name='username',
column_value=username))
]
for party in tqdm(unique_parties):
topics_distributions['per_party'][party] = [
{
'topic': t,
'part': p
}
for t, p
in zip(range(topics_count), get_topic_distribution_for_column(
column_name='party',
column_value=party))
]
for coalition in tqdm(unique_coalitions):
topics_distributions['per_coalition'][coalition] = [
{
'topic': t,
'part': p
}
for t, p
in zip(range(topics_count), get_topic_distribution_for_column(
column_name='coalition',
column_value=coalition))
]
with open('../datasets/for_presentation/topics_distributions.pkl.gz', 'wb') as f:
pkl.dump(topics_distributions, f)
###Output
_____no_output_____
###Markdown
Words Words per user/party/coalition
###Code
clean_tweets = pd.read_pickle('../datasets/tweets_cleaned_lemma_stopwords.pkl.gz')
tweets_users_sentiment_topic = pd.read_pickle('../datasets/for_presentation/tweets_with_party_coalition_sentiment_topic.pkl.gz')
a = clean_tweets.merge(tweets_users_sentiment_topic, on='id', suffixes=('', '_y'))
a.rename(columns={'username_x': 'username'}, inplace=True)
a.reset_index(inplace=True)
del clean_tweets
del tweets_users_sentiment_topic
with open('../trained_models/vectorizer_10.pkl.gz', 'rb') as vec_file:
vectorizer: CountVectorizer = pkl.load(vec_file)
counts = vectorizer.transform(a.tweet.tolist())
def get_word_counts_for_column(column_name, column_value):
indices = np.array(a[a[column_name]==column_value].index.tolist())
words = counts[indices]
summed = np.sum(words, axis=0)
return np.array(summed).squeeze().tolist()
words_counts = {
'per_user': {},
'per_party': {},
'per_coalition': {}
}
unique_usernames = a.username.unique()
unique_parties = a.party.unique()
unique_coalitions = a.coalition.unique()
for username in tqdm(unique_usernames):
tmp = [
{
'text': name,
'value': freq
}
for name, freq
in zip(
vectorizer.get_feature_names(),
get_word_counts_for_column(
column_name='username',
column_value=username
)
)
]
tmp.sort(key=lambda x: x['value'], reverse=True)
words_counts['per_user'][username] = tmp
for party in tqdm(unique_parties):
tmp = [
{
'text': name,
'value': freq
}
for name, freq
in zip(
vectorizer.get_feature_names(),
get_word_counts_for_column(
column_name='party',
column_value=party
)
)
]
tmp.sort(key=lambda x: x['value'], reverse=True)
words_counts['per_party'][party] = tmp
for coalition in tqdm(unique_coalitions):
tmp = [
{
'text': name,
'value': freq
}
for name, freq
in zip(
vectorizer.get_feature_names(),
get_word_counts_for_column(
column_name='coalition',
column_value=coalition
)
)
]
tmp.sort(key=lambda x: x['value'], reverse=True)
words_counts['per_coalition'][coalition] = tmp
with open('../datasets/for_presentation/words_counts.pkl.gz', 'wb') as f:
pkl.dump(words_counts, f)
###Output
_____no_output_____
###Markdown
Sentiment Sentiment per user/party/coalition/topic
###Code
a = pd.read_pickle('../datasets/for_presentation/tweets_with_party_coalition_sentiment_topic.pkl.gz')
sent_values = ['negative', 'neutral', 'positive', 'ambiguous']
def get_sentiment_distribution_by_column(column_name, column_value):
sent_counts = a[a[column_name] == column_value].sentiment.value_counts()
tweets_count = sent_counts.sum()
result = []
for sent in sent_values:
if sent in sent_counts.index:
result.append((sent, sent_counts[sent] / tweets_count))
else:
result.append((sent, 0))
return result
sentiment_distributions = {
'per_user': {},
'per_party': {},
'per_coalition': {},
'per_topic': {}
}
unique_usernames = a.username.unique()
unique_parties = a.party.unique()
unique_coalitions = a.coalition.unique()
unique_topics = a.topic.unique()
for username in tqdm(unique_usernames):
sentiment_distributions['per_user'][username] = get_sentiment_distribution_by_column(
column_name='username',
column_value=username
)
for topic in tqdm(unique_topics):
sentiment_distributions['per_topic'][topic] = get_sentiment_distribution_by_column(
column_name='topic',
column_value=topic
)
for party in tqdm(unique_parties):
sentiment_distributions['per_party'][party] = get_sentiment_distribution_by_column(
column_name='party',
column_value=party
)
for coalition in tqdm(unique_coalitions):
sentiment_distributions['per_coalition'][coalition] = get_sentiment_distribution_by_column(
column_name='coalition',
column_value=coalition
)
with(open('../datasets/for_presentation/sentiment_distributions.pkl.gz', 'wb')) as f:
pkl.dump(sentiment_distributions, f)
###Output
_____no_output_____
###Markdown
Coalitions and parties Extract info about each party and coalition for quicker access
###Code
accounts = pd.read_csv('../datasets/accounts_processed.csv')
parties = accounts.groupby('party').max()
parties.reset_index(inplace=True)
parties = parties[['party', 'coalition']]
parties.to_csv('../datasets/for_presentation/parties.csv')
###Output
_____no_output_____
###Markdown
Graph positions t-SNE
###Code
tweets = pd.read_pickle('../datasets/for_presentation/tweets_with_party_coalition_sentiment_topic.pkl.gz')
usernames = tweets.username.unique()
embedding_data = pd.read_csv('../datasets/embeddings.csv')
embedding_data['username'] = embedding_data['username'].str.lower()
embedding_data = embedding_data[embedding_data['username'].isin(usernames)]
embeddings = np.array([np.array([np.float(i) for i in x.replace("]", "").replace("[", "").split()]) for x in embedding_data['embedding'].tolist()])
embeddings.shape
%%time
tsne3d = TSNE(n_components=3).fit_transform(embeddings)
%%time
tsne2d = TSNE(n_components=2).fit_transform(embeddings)
embeddings_normalized = Normalizer().fit_transform(embeddings)
embeddings_standardized = StandardScaler().fit_transform(embeddings)
tsne3d_standardized = TSNE(n_components=3).fit_transform(embeddings_standardized)
tsne3d_normalized = TSNE(n_components=3).fit_transform(embeddings_normalized)
tsne2d_standardized = TSNE(n_components=2).fit_transform(embeddings_standardized)
tsne2d_normalized = TSNE(n_components=2).fit_transform(embeddings_normalized)
graph_positions = pd.DataFrame(tsne3d, columns=['3D_x', '3D_y', '3D_z'])
graph_positions['2D_x'] = tsne2d[:, 0]
graph_positions['2D_y'] = tsne2d[:, 1]
graph_positions['username'] = usernames
graph_positions.to_csv('../datasets/for_presentation/graph_tsne.csv', index=False)
def calc_clustering_and_graph(self, embedding: np.ndarray):
###Output
_____no_output_____
###Markdown
Clusters KMeans
###Code
tweets = pd.read_pickle('../datasets/for_presentation/tweets_with_party_coalition_sentiment_topic.pkl.gz')
usernames = tweets.username.unique()
embedding_data = pd.read_csv('../datasets/embeddings.csv')
embedding_data['username'] = embedding_data['username'].str.lower()
embedding_data = embedding_data[embedding_data['username'].isin(usernames)]
embeddings = np.array([np.array([np.float(i) for i in x.replace("]", "").replace("[", "").split()]) for x in embedding_data['embedding'].tolist()])
embeddings.shape
clusters = KMeans(n_clusters=6).fit(embeddings)
with open('../trained_models/kmeans.pkl.gz', 'wb') as f:
pkl.dump(clusters, f)
df = pd.DataFrame(usernames, columns=['username'])
df['kmeans_cluster'] = clusters.labels_
df.to_csv('../datasets/for_presentation/clusters.csv', index=False)
###Output
_____no_output_____ |
notebooks/Data- Understanding-V1.ipynb | ###Markdown
Data Understanding RKI, webscrape (webscraping) https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Fallzahlen.htmlJohn Hopkins (GITHUB) https://github.com/CSSEGISandData/COVID-19.gitREST API services to retreive data https://npgeo-corona-npgeo-de.hub.arcgis.com/ John Hopkins GITHUB csv data
###Code
import pandas as pd
import numpy as np
from datetime import datetime
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
mpl.rcParams['figure.figsize']=(16,9)
pd.set_option('display.max_rows',500)
data_path='../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
pd_raw=pd.read_csv(data_path)
pd_raw.head()
time_idx=pd_raw.columns[4:]
df_plot=pd.DataFrame({
'date':time_idx})
df_plot.head()
pd_raw['Country/Region']
pd_raw[pd_raw['Country/Region']=='Germany'].iloc[:,4::].sum(axis=0)
###Output
_____no_output_____
###Markdown
Explorartive Data Analysisfocus is often a visualrepresentation of the data
###Code
df_plot=pd.read_csv('../data/processed/COVID_small_flat_table.csv',sep=';')
df_plot.head()
plt.figure();
ax=df_plot.iloc[15:,:].set_index('date').plot()
plt.ylim(10,30000)
ax.set_yscale('log')
###Output
_____no_output_____
###Markdown
Webscrapping
###Code
import requests
from bs4 import BeautifulSoup
page = requests.get("https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Fallzahlen.html")
soup = BeautifulSoup(page.content,'html.parser')
soup.get_text()
html_table=soup.find('table')
all_rows=html_table.find_all('tr')
final_data_list=[]
for pos,rows in enumerate(all_rows):
col_list=[each_col.get_text(strip=True) for each_col in rows.find_all('td')]
final_data_list.append(col_list)
pd_daily_status=pd.DataFrame(final_data_list).dropna().rename(columns={0:'state',
1:'cases',
2:'changes',
3:'cases_per_100k',
4:'fatal',
5:'comment'})
pd_daily_status.head()
###Output
_____no_output_____
###Markdown
Rest API calls
###Code
data=requests.get('https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/Coronaf%C3%A4lle_in_den_Bundesl%C3%A4ndern/FeatureServer/0/query?where=1%3D1&outFields=*&outSR=4326&f=json')
import json
json_object=json.loads(data.content)
type(json_object)
full_list=[]
for pos,each_dict in enumerate(json_object['features'][:]):
full_list.append(each_dict['attributes'])
pd_full_list=pd.DataFrame(full_list)
pd_full_list.head()
pd_full_list.to_csv('../data/raw/NPGEO/GER_state_data.csv',sep=';')
pd_full_list.shape[0]
###Output
_____no_output_____ |
Examples/Pywedge_Regression_Example.ipynb | ###Markdown
[](https://colab.research.google.com/drive/1_SamuVGCKXLtvr81JIZETpZHNvyUax43?usp=sharing)
###Code
!pip install pywedge --quiet
import pywedge as pw
import pandas as pd
train = pd.read_csv("https://raw.githubusercontent.com/taknev83/datasets/master/Train_house_price_prediction_regression.csv" )
test = pd.read_csv('https://raw.githubusercontent.com/taknev83/datasets/master/Test__house_price_prediction_regression.csv')
###Output
_____no_output_____
###Markdown
Instantiate Pywedge_Charts The Pywedge_Charts class takes following inputs,Inputs:* Dataframe* c = any redundant column to be removed (like ID column etc., at present supports a single column removal, subsequent version will provision multiple column removal requirements)* y = target column name as a stringReturns:Charts widget
###Code
mc = pw.Pywedge_Charts(train, c=None, y = 'TARGET(PRICE_IN_LACS)')
###Output
_____no_output_____
###Markdown
Call make_charts method from the instantiated class
###Code
charts = mc.make_charts()
###Output
_____no_output_____
###Markdown
Instantiate baseline_model classArgs:* train = train dataframe* test = test dataframe* c = any redundant column to be removed (like ID column etc., at present supports a single column removal, subsequent version will provision multiple column removal requirements)* y = target column name as a string* type = Classification(Default) / Regression
###Code
blm = pw.baseline_model(train, test, c='ADDRESS', y='TARGET(PRICE_IN_LACS)', type="Regression")
###Output
_____no_output_____
###Markdown
call Regression_Summary method for Regression type of tasksReturns:* Interactive pre-processing steps* User input for test size in train test split* Top 10 feature importance using Adaboost regressor* Baseline models in 10 different algorithms with metrics* Predict selected baseline models on standout test dataset
###Code
blm.Regression_summary()
###Output
_____no_output_____
###Markdown
Instantiate Pywedge_HP Class for interactive hyperparameter tuningArgs:* train = train dataframe* test = test dataframe* c = any redundant column to be removed (like ID column etc., at present supports a single column removal, subsequent version will provision multiple column removal requirements)* y = target column name as a string* tracking = True/False(Default) to enable mlflow hyperpameter tracking
###Code
pph = pw.Pywedge_HP(train, test, c='ADDRESS', y='TARGET(PRICE_IN_LACS)')
###Output
_____no_output_____
###Markdown
Call HP_Tune_Regression for regression hyperparameter tuning tasksReturns:* Interactive widget for inputing various hyperparameters* Output tab with tuned model results* Predictions on standout test data using tuned model
###Code
pph.HP_Tune_Regression()
###Output
_____no_output_____
###Markdown
This below piece of code is maximise the display in Jupyer Notebook
###Code
from IPython.core.display import display, HTML
display(HTML("<style>.container {width:100% !important; }</style"))
###Output
_____no_output_____ |
AI-data-Projects/MNIST_GAN/MNIST_Style_GAN_v3.ipynb | ###Markdown
MNIST learns your handwritingThis is a small project on using a GAN to generate numbers that look as someone else's handwriting when not trained on all numbers written by this person. For example say we had someone write the number 273 and we now want to write 481 in their own handwriting.The main inspiration for this project is a paper I read recently called STAR GAN v2. In this paper they try to recognize diferent styles and features in images and transfer those into a different image. For example they are able to use image of different animals like dogs or tigers and making them look like a cat. Furthermore at the time of writing this it is currently a state-of-the-art method for this style translation tasks.Some of the results can be seen at the end of this notebook. Unfortunately it seems not that many features were captured and mostly it was only the thickness of the numbers that was preserved. A reason this happens might be that the size of the images is small being 28x28. However, some ways to allow for more variation might be by exteding the number of layers being used, by having higher dimensional spaces for the latent and style spaces, or by giving a higher weight to the style diversification loss (look at section loss functions to see more about this).The main purpose of this notebook is to make a small showcase of the architecture used in a simple design so that the ideas are simple to follow. This notebook will also contain some explanations and comments on the architecture of the neural network so that it might be easier to follow.Note: another small thing I did in this project is to 'translate' STAR GAN code from pytorch to tensorflow. Redoing all of the work was useful to understand everything done on their code and having an option in tensorflow might be useful for some people.For a small tutorial on how to write a simple GAN architecture: https://machinelearningmastery.com/how-to-develop-a-generative-adversarial-network-for-an-mnist-handwritten-digits-from-scratch-in-keras/Link to STAR GAN v2: https://app.wandb.ai/stacey/stargan/reports/Cute-Animals-and-Post-Modern-Style-Transfer%3A-StarGAN-v2-for-Multi-Domain-Image-Synthesis---VmlldzoxNzcwODQFurther Reading on style domain techniques for image generation:Link to STAR GAN paper: https://arxiv.org/pdf/1912.01865.pdfLink to Multimodal Unsupervised Image-to-Image Translation: https://arxiv.org/pdf/1804.04732.pdfLink to Improving Style-Content Disentanglement Paper: https://arxiv.org/pdf/2007.04964.pdf Intitializing
###Code
import tensorflow as tf
from tensorflow_addons.layers import InstanceNormalization
import numpy as np
import tensorflow.keras.layers as layers
import time
from tensorflow.keras.datasets.mnist import load_data
import sys
import os
import datetime
###Output
_____no_output_____
###Markdown
LayersThere are a few layers that were custom made. More importantly it is udeful to make this custom layers for the layers that try to incorporate style. This is as the inputs themselves are custom as you are inputing an image and a vector representing the style.ResBlk is short for Residual Block, where it is predicting the residual (the difference between the original and the prediction).
###Code
class ResBlk(tf.keras.Model):
def __init__(self, dim_in, dim_out, actv=layers.LeakyReLU(),
normalize=False, downsample=False):
super(ResBlk, self).__init__()
self.actv = actv
self.normalize = normalize
self.downsample = downsample
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out)
def _build_weights(self, dim_in, dim_out):
self.conv1 = layers.Conv2D(dim_in, 3, padding='same')
self.conv2 = layers.Conv2D(dim_out, 3, padding='same')
if self.normalize:
self.norm1 = InstanceNormalization()
self.norm2 = InstanceNormalization()
if self.learned_sc:
self.conv1x1 = layers.Conv2D(dim_out, 1)
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = layers.AveragePooling2D(pool_size=(2,2), padding='same')(x)
return x
def _residual(self, x):
if len(tf.shape(x))>4:
x=tf.reshape(x,tf.shape(x)[1:])
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
x = self.conv1(x)
if self.downsample:
x = layers.AveragePooling2D(pool_size=(2,2), padding='same')(x)
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = self.conv2(x)
return x
def call(self, x):
x = self._shortcut(x) + self._residual(x)
return x / 2**(1/2) # unit variance
###Output
_____no_output_____
###Markdown
AdaIN stands for Adaptive Instance Normalization. It is a type of normalization that allows to 'mix' two inputs. In this case we use the style vector to mix with our input x which is the image or part of the process of constructing this image.
###Code
class AdaIn(tf.keras.Model):
def __init__(self, style_dim, num_features):
super(AdaIn,self).__init__()
self.norm = InstanceNormalization()
self.lin = layers.Dense(num_features*2)
def call(self, x, s):
h=self.lin(s)
h=tf.reshape(h, [tf.shape(h)[0], 1, 1, tf.shape(h)[1]])
gamma,beta=tf.split(h, 2, axis=3)
return (1+gamma)*self.norm(x)+beta
class AdainResBlk(tf.keras.Model):
def __init__(self, dim_in, dim_out, style_dim=16,
actv=layers.LeakyReLU(), upsample=False):
super(AdainResBlk, self).__init__()
self.actv = actv
self.upsample = upsample
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out, style_dim)
def _build_weights(self, dim_in, dim_out, style_dim=16):
self.conv1 = layers.Conv2D(dim_out, 3, padding='same')
self.conv2 = layers.Conv2D(dim_out, 3, padding='same')
self.norm1 = AdaIn(style_dim, dim_in)
self.norm2 = AdaIn(style_dim, dim_out)
if self.learned_sc:
self.conv1x1 = layers.Conv2D(dim_out, 1)
def _shortcut(self, x):
if self.upsample:
x = layers.UpSampling2D(size=(2,2), interpolation='nearest')(x)
if self.learned_sc:
x = self.conv1x1(x)
return x
def _residual(self, x, s):
x = self.norm1(x, s)
x = self.actv(x)
if self.upsample:
x = layers.UpSampling2D(size=(2,2), interpolation='nearest')(x)
x = self.conv1(x)
x = self.norm2(x, s)
x = self.actv(x)
x = self.conv2(x)
return x
def call(self, x, s):
x = self._shortcut(x) + self._residual(x,s)
return x / 2**(1/2) # unit variance
###Output
_____no_output_____
###Markdown
Generator ClassIn the generator we have two steps one for encoding the image into lower level information and one to decode back to the image. In this particular architecture the decoding uses the style to build back the image as it is an important part of the process. The decoding does not do this as we have the style encoder as an architecture that deals with this issue of generating a style vector for a particular image.
###Code
class Generator(tf.keras.Model):
def __init__(self, img_size=28, style_dim=16, dim_in=8, max_conv_dim=128, repeat_num=1):
super(Generator, self).__init__()
self.img_size=img_size
self.from_bw=layers.Conv2D(dim_in, 3, padding='same', input_shape=(1,img_size,img_size,1))
self.encode=[]
self.decode=[]
self.to_bw=tf.keras.Sequential([InstanceNormalization(), layers.LeakyReLU(), layers.Conv2D(1, 1, padding='same')])
for _ in range(repeat_num):
dim_out = min(dim_in*2, max_conv_dim)
self.encode.append(ResBlk(dim_in, dim_out, normalize=True, downsample=True))
self.decode.insert(0, AdainResBlk(dim_out, dim_in, style_dim, upsample=True))
dim_in = dim_out
# bottleneck blocks
for _ in range(2):
self.encode.append(ResBlk(dim_out, dim_out, normalize=True))
self.decode.insert(0, AdainResBlk(dim_out, dim_out, style_dim))
def call(self, x, s):
x = self.from_bw(x)
cache = {}
for block in self.encode:
x = block(x)
for block in self.decode:
x = block(x, s)
return self.to_bw(x)
###Output
_____no_output_____
###Markdown
Mapping NetworkThe Mapping Network and the Style encoder are the parts of this architecture that make a difference in allowing style to be analyzed and put into our images. The mapping network will take as an input a latent code (represents images as a vector in a high dimensional space) and the domain in this case the domain is the number we are representing. And the style encoder will take as inputs an image and a domain.
###Code
class MappingNetwork(tf.keras.Model):
def __init__(self, latent_dim=8, style_dim=16, num_domains=10):
super(MappingNetwork,self).__init__()
map_layers = [layers.Dense(128)]
map_layers += [layers.ReLU()]
for _ in range(2):
map_layers += [layers.Dense(128)]
map_layers += [layers.ReLU()]
self.shared = tf.keras.Sequential(layers=map_layers)
self.unshared = []
for _ in range(num_domains):
self.unshared += [tf.keras.Sequential(layers=[layers.Dense(128),
layers.ReLU(),
layers.Dense(128),
layers.ReLU(),
layers.Dense(128),
layers.ReLU(),
layers.Dense(style_dim)])]
def call(self, z, y):
h = self.shared(z)
out = []
for layer in self.unshared:
out += [layer(h)]
out = tf.stack(out, axis=1) # (batch, num_domains, style_dim)
s = tf.gather(out, y, axis=1)
s = tf.gather_nd(s, [[i,i] for i in range(len(s))]) # (batch, style_dim)
return s
###Output
_____no_output_____
###Markdown
Style EncoderAn important thing to notice from the style encoder is that it takes as an input an image and outputs a style vector. Looking at the dimensions of these we notice we need to flatten out the image through the layers. This can usually be done in two ways. By flattening a 2 dimensional input to a 1 dimensional output a flatten layer, or as it was done hear by using enough pooling layers so that we downsample the size of our 2 dimensional input until it is one dimensional.
###Code
class StyleEncoder(tf.keras.Model):
def __init__(self, img_size=28, style_dim=16, dim_in=8, num_domains=10, max_conv_dim=128, repeat_num=5):
super(StyleEncoder,self).__init__()
blocks = [layers.Conv2D(dim_in, 3, padding='same')]
for _ in range(repeat_num): #repetition 1 sends to (b,14,14,d) 2 to (b,7,7,d) 3 to (b,4,4,d) 4 to (b,2,2,d) 5 to (b,1,1,d)
dim_out = min(dim_in*2, max_conv_dim)
blocks += [ResBlk(dim_in, dim_out, downsample=True)]
dim_in = dim_out
blocks += [layers.LeakyReLU()]
blocks += [layers.Conv2D(dim_out, 4, padding='same')]
blocks += [layers.LeakyReLU()]
self.shared = tf.keras.Sequential(layers=blocks)
self.unshared = []
for _ in range(num_domains):
self.unshared += [layers.Dense(style_dim)]
def call(self, x, y):
h = self.shared(x)
h = tf.reshape(h,[tf.shape(h)[0], tf.shape(h)[3]])
out = []
for layer in self.unshared:
out += [layer(h)]
out = tf.stack(out, axis=1) # (batch, num_domains, style_dim)
s = tf.gather(out, y, axis=1) # (batch, style_dim)
s = tf.gather_nd(s, [[i,i] for i in range(len(s))])
return s
###Output
_____no_output_____
###Markdown
Discriminator ClassSimilarly to the Style encoder the input of the discriminator is an image and we need to downsample it until it is one dimensional.
###Code
class Discriminator(tf.keras.Model):
def __init__(self, img_size=28, dim_in=8, num_domains=10, max_conv_dim=128, repeat_num=5):
super(Discriminator, self).__init__()
blocks = [layers.Conv2D(dim_in, 3, padding='same')]
for _ in range(repeat_num): #repetition 1 sends to (b,14,14,d) 2 to (b,7,7,d) 3 to (b,4,4,d) 4 to (b,2,2,d) 5 to (b,1,1,d)
dim_out = min(dim_in*2, max_conv_dim)
blocks += [ResBlk(dim_in, dim_out, downsample=True)]
dim_in = dim_out
blocks += [layers.LeakyReLU()]
blocks += [layers.Conv2D(dim_out, 4, padding='same')]
blocks += [layers.LeakyReLU()]
blocks += [layers.Conv2D(num_domains, 1, padding='same')]
self.main = tf.keras.Sequential(layers=blocks)
def call(self, x, y):
out = self.main(x)
out = tf.reshape(out, (tf.shape(out)[0], tf.shape(out)[3])) # (batch, num_domains)
out = tf.gather(out, y, axis=1) # (batch)
#out = tf.reshape(out, [tf.shape(x)[0]])
return out
###Output
_____no_output_____
###Markdown
Loss FunctionsThe loss functions used are an important part of this model as it describes our goal when training and how to perform gradient descent. The discriminator loss function is the regular adversarial loss L_adv used in a GAN architecture. But furthermore we have three loss functions added.For this loss functions if you want to see the mathematical formula I recommend looking at STAR GAN 2's paper. However I will explain what the loss tries to measure and a quick description of how it does so.L_sty is a style reconstruction loss. This tries to capture how well the style was captured on our output. It is computed as an expected value of the distance between the target style vector and the style vector that our style encoder predicts for the generated image.L_ds is a style diversification loss. It tries to capture that the images produced are different to promote a variety of images produced. It is computed as the expected value of the distance between the images (l_1 norm) generated when using two different styles and the same sources. L_cyc is a characteristic preserving loss. The cyc comes from cyclic as we measusre the distance between the original image and the image generated by using an image generated by this image and the style our style encoder provides as an input. (Notice we use the image generated by the image generated, so that we use the generator two times.)In the end the total loss function is expressed asL_adv + lambda_sty * L_sty + lambda_ds * L_ds + lambda_cyc * L_cyc
###Code
def moving_average(model, model_test, beta=0.999):
for i in range(len(model.weights)):
model_test.weights[i] = (1-beta)*model.weights[i] + beta*model_test.weights[i]
def adv_loss(logits, target):
assert target in [1, 0]
targets = tf.fill(tf.shape(logits), target)
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)(targets, logits)
return loss
def r1_reg(d_out, x_in, g):
# zero-centered gradient penalty for real images
batch_size = tf.shape(x_in)[0]
grad_dout=g.gradient(d_out, x_in)
#grad_dout = tf.gradients(ys=d_out, xs=x_in)
grad_dout2 = tf.square(grad_dout)
grad_dout2 = tf.reshape(grad_dout2,[batch_size, tf.shape(grad_dout2)[1]*tf.shape(grad_dout2)[2]])
reg = 0.5 * tf.math.reduce_mean(tf.math.reduce_sum(grad_dout2, axis=1))
return reg
def compute_d_loss(nets, args, x_real, y_org, y_trg, z_trg=None, x_ref=None):
assert (z_trg is None) != (x_ref is None)
# with real images
with tf.GradientTape() as g:
g.watch(x_real)
out = nets['discriminator'](x_real, y_org)
loss_real = adv_loss(out, 1)
loss_reg = r1_reg(out, x_real, g)
# with fake images
if z_trg is not None:
s_trg = nets['mapping_network'](z_trg, y_trg)
else: # x_ref is not None
s_trg = nets['style_encoder'](x_ref, y_trg)
x_fake = nets['generator'](x_real, s_trg)
out = nets['discriminator'](x_fake, y_trg)
loss_fake = adv_loss(out, 0)
loss = loss_real + loss_fake + args['lambda_reg'] * loss_reg
return loss, {'real': loss_real, 'fake':loss_fake, 'reg':loss_reg}
def compute_g_loss(nets, args, x_real, y_org, y_trg, z_trgs=None, x_refs=None):
assert (z_trgs is None) != (x_refs is None)
if z_trgs is not None:
z_trg, z_trg2 = z_trgs
if x_refs is not None:
x_ref, x_ref2 = x_refs
# adversarial loss
if z_trgs is not None:
s_trg = nets['mapping_network'](z_trg, y_trg)
else:
s_trg = nets['style_encoder'](x_ref, y_trg)
x_fake = nets['generator'](x_real, s_trg)
out = nets['discriminator'](x_fake, y_trg)
loss_adv = adv_loss(out, 1)
# style reconstruction loss
s_pred = nets['style_encoder'](x_fake, y_trg)
loss_sty = tf.math.reduce_mean(tf.abs(s_pred - s_trg))
# diversity sensitive loss
if z_trgs is not None:
s_trg2 = nets['mapping_network'](z_trg2, y_trg)
else:
s_trg2 = nets['style_encoder'](x_ref2, y_trg)
x_fake2 = nets['generator'](x_real, s_trg2)
loss_ds = tf.math.reduce_mean(tf.abs(x_fake - x_fake2))
# cycle-consistency loss
s_org = nets['style_encoder'](x_real, y_org)
x_rec = nets['generator'](x_fake, s_org)
loss_cyc = tf.math.reduce_mean(tf.abs(x_rec - x_real))
loss = loss_adv + args['lambda_sty'] * loss_sty \
- args['lambda_ds'] * loss_ds + args['lambda_cyc'] * loss_cyc
return loss, {'adv':loss_adv, 'sty':loss_sty, 'ds':loss_ds, 'cyc':loss_cyc}
###Output
_____no_output_____
###Markdown
The ModelHere we introduce the class Solver which is the most important class as this will represent our whole model. It will initiate all of our neural networks as well as train our network.
###Code
class Solver(tf.keras.Model):
def __init__(self, args):
super(Solver, self).__init__()
self.args = args
self.step=0
self.nets, self.nets_ema = self.build_model(self.args)
# below setattrs are to make networks be children of Solver, e.g., for self.to(self.device)
for name in self.nets.keys():
setattr(self, name, self.nets[name])
for name in self.nets_ema.keys():
setattr(self, name + '_ema', self.nets_ema[name])
if args['mode'] == 'train':
self.optims = {}
for net in self.nets.keys():
self.optims[net] = tf.keras.optimizers.Adam(learning_rate= args['f_lr'] if net == 'mapping_network' else args['lr'],
beta_1=args['beta1'], beta_2=args['beta2'],
epsilon=args['weight_decay'])
self.ckptios = [tf.train.Checkpoint(model=net) for net in self.nets.values()]
self.ckptios += [tf.train.Checkpoint(model=net_ema) for net_ema in self.nets_ema.values()]
self.ckptios += [tf.train.Checkpoint(optimizer=optim) for optim in self.optims.values()]
else:
self.ckptios = [tf.train.Checkpoint(model=net_ema) for net_ema in self.nets_ema.values()]
#for name in self.nets.keys():
# Do not initialize the FAN parameters
# print('Initializing %s...' % name)
#self.nets[name].apply(initializer=tf.keras.initializers.HeNormal)
def build_model(self, args):
generator = Generator(args['img_size'], args['style_dim'])
mapping_network = MappingNetwork(args['latent_dim'], args['style_dim'], args['num_domains'])
style_encoder = StyleEncoder(args['img_size'], args['style_dim'], args['num_domains'])
discriminator = Discriminator(args['img_size'], args['num_domains'])
generator_ema = Generator(args['img_size'], args['style_dim'])
mapping_network_ema = MappingNetwork(args['latent_dim'], args['style_dim'], args['num_domains'])
style_encoder_ema = StyleEncoder(args['img_size'], args['style_dim'], args['num_domains'])
nets = {'generator':generator, 'mapping_network':mapping_network,
'style_encoder':style_encoder, 'discriminator':discriminator}
nets_ema = {'generator':generator_ema, 'mapping_network':mapping_network_ema,
'style_encoder':style_encoder_ema}
nets['discriminator'](inputs['x_src'][0:2],inputs['y_src'][0:2])
s_trg = nets['mapping_network'](inputs['z_trg'][0:2],inputs['y_src'][0:2])
nets['generator'](inputs['x_src'][0:2],s_trg)
nets['style_encoder'](inputs['x_src'][0:2], inputs['y_src'][0:2])
s_trg = nets_ema['mapping_network'](inputs['z_trg'][0:2],inputs['y_src'][0:2])
nets_ema['generator'](inputs['x_src'][0:2],s_trg)
nets_ema['style_encoder'](inputs['x_src'][0:2], inputs['y_src'][0:2])
return nets, nets_ema
def save(self):
for net in solv.nets.keys():
solv.nets[net].save_weights('MNIST_GAN_3/saved_model/'+net+'step'+str(self.step)+'.h5')
for net in solv.nets_ema.keys():
solv.nets[net].save_weights('MNIST_GAN_3/saved_model/'+net+'step'+str(self.step)+'_ema.h5')
#for ckptio in self.ckptios:
# ckptio.save(step)
def load(self, step):
self.step= step
for net in solv.nets.keys():
solv.nets[net].load_weights('MNIST_GAN_3/saved_model/'+net+'step'+str(step)+'.h5')
for net in solv.nets_ema.keys():
solv.nets[net].load_weights('MNIST_GAN_3/saved_model/'+net+'step'+str(step)+'_ema.h5')
#for ckptio in self.ckptios:
# ckptio.load(step)
# def _reset_grad(self):
# for optim in self.optims.values():
# optim.zero_grad()
def train(self, inputs, validations, gen, batch_size=128):
"""
inputs is a list of dictionaries that contains a source image, a reference image, domain and latent code information used to train the network
validation is a list that contains validation images
"""
args = self.args
nets = self.nets
nets_ema = self.nets_ema
optims = self.optims
# inputs_val=validations[0]
# resume training if necessary
if args['resume_iter'] > 0:
self.load(args['resume_iter'])
# remember the initial value of ds weight
initial_lambda_ds = args['lambda_ds']
print('Start training...')
start_time = time.time()
for i in range(args['resume_iter'], args['total_iters']):
ind=(self.step*batch_size)%len(inputs['x_src'])
self.step+=1
# fetch images and labels
x_real, y_org = gen.flow(inputs['x_src'][ind:ind+batch_size], inputs['y_src'][ind:ind+batch_size], batch_size=batch_size, shuffle=False)[0]
x_real=tf.cast(x_real, tf.float32)
x_ref, y_trg = gen.flow(inputs['x_ref'][ind:ind+batch_size], inputs['y_ref'][ind:ind+batch_size], batch_size=batch_size, shuffle=False)[0]
x_ref=tf.cast(x_ref, tf.float32)
x_ref2 = gen.flow(inputs['x_ref2'][ind:ind+batch_size], batch_size=batch_size, shuffle=False)[0]
x_ref2=tf.cast(x_ref2, tf.float32)
z_trg, z_trg2 = inputs['z_trg'][ind:ind+batch_size], inputs['z_trg2'][ind:ind+batch_size]
#print(1.5)
# train the discriminator
with tf.GradientTape() as g:
g.watch(nets['discriminator'].weights)
d_loss, d_losses_latent = compute_d_loss(
nets, args, x_real, y_org, y_trg, z_trg=z_trg)
#self._reset_grad()
#d_loss.backward()
grad=g.gradient(d_loss, nets['discriminator'].weights)
#optims['discriminator'].get_gradients(d_loss, nets['discriminator'].weights)
optims['discriminator'].apply_gradients(zip(grad, nets['discriminator'].weights))
#print(2)
with tf.GradientTape() as g:
g.watch(nets['discriminator'].weights)
d_loss, d_losses_ref = compute_d_loss(
nets, args, x_real, y_org, y_trg, x_ref=x_ref)
#self._reset_grad()
#d_loss.backward()
grad=g.gradient(d_loss, nets['discriminator'].weights)
optims['discriminator'].apply_gradients(zip(grad, nets['discriminator'].weights))
#print(3)
# train the generator
with tf.GradientTape(persistent=True) as g:
g.watch(nets['generator'].weights)
g.watch(nets['mapping_network'].weights)
g.watch(nets['style_encoder'].weights)
g_loss, g_losses_latent = compute_g_loss(
nets, args, x_real, y_org, y_trg, z_trgs=[z_trg, z_trg2])
#self._reset_grad()
#g_loss.backward()
grad=g.gradient(g_loss, nets['generator'].weights)
optims['generator'].apply_gradients(zip(grad, nets['generator'].weights))
grad=g.gradient(g_loss, nets['mapping_network'].weights)
optims['mapping_network'].apply_gradients(zip(grad, nets['mapping_network'].weights))
grad=g.gradient(g_loss, nets['style_encoder'].weights)
optims['style_encoder'].apply_gradients(zip(grad, nets['style_encoder'].weights))
del g
#print(4)
with tf.GradientTape(persistent=True) as g:
g.watch(nets['generator'].weights)
g_loss, g_losses_ref = compute_g_loss(
nets, args, x_real, y_org, y_trg, x_refs=[x_ref, x_ref2])
#self._reset_grad()
#g_loss.backward()
grad=g.gradient(g_loss, nets['generator'].weights)
optims['generator'].apply_gradients(zip(grad, nets['generator'].weights))
del g
#print(5)
# compute moving average of network parameters
moving_average(nets['generator'], nets_ema['generator'], beta=0.999)
moving_average(nets['mapping_network'], nets_ema['mapping_network'], beta=0.999)
moving_average(nets['style_encoder'], nets_ema['style_encoder'], beta=0.999)
#print(6)
# decay weight for diversity sensitive loss
if args['lambda_ds'] > 0:
args['lambda_ds'] -= (initial_lambda_ds / args['ds_iter'])
# print out log info
if (i+1) % args['print_every'] == 0:
elapsed = time.time() - start_time
elapsed = str(datetime.timedelta(seconds=elapsed))[:-7]
log = "Elapsed time [%s], Iteration [%i/%i], " % (elapsed, i+1, args['total_iters'])
all_losses = {}
for loss, prefix in [(d_losses_latent,'D/latent_'), (d_losses_ref,'D/ref_'),
(g_losses_latent,'G/latent_'), (g_losses_ref,'G/ref_')]:
for key, value in loss.items():
all_losses[prefix + key] = value
all_losses['G/lambda_ds'] = args['lambda_ds']
for key, value in all_losses.items():
if key!= 'G/lambda_ds':
print(log+key, value.numpy())
else:
print(log+key, value)
# generate images for debugging
#if (i+1) % args['sample_every'] == 0:
# os.makedirs(args['sample_dir'], exist_ok=True)
# debug_image(nets_ema, args, inputs=inputs_val, step=i+1)
# save model checkpoints
if (i+1) % args['save_every'] == 0:
for net in solv.nets.keys():
solv.nets[net].save_weights('MNIST_GAN_3/saved_model/'+net+'step'+str(self.step)+'.h5')
for net in solv.nets_ema.keys():
solv.nets[net].save_weights('MNIST_GAN_3/saved_model/'+net+'step'+str(self.step)+'_ema.h5')
# self._save_checkpoint(step=i+1)
def sample(self, src, ref):
"""
src source image that we want to modify
ref pair of reference image and domain
generates an image that changes source image into the style of the reference image
"""
args = self.args
nets_ema = self.nets_ema
os.makedirs(args['result_dir'], exist_ok=True)
self._load_checkpoint(args['resume_iter'])
fname = ospj(args['result_dir'], 'reference.jpg')
print('Working on {}...'.format(fname))
translate_using_reference(nets_ema, args, src, ref[0], ref[1], fname)
###Output
_____no_output_____
###Markdown
Data Loading and Preprocessing
###Code
(trainX, trainy), (valX, valy) = load_data()
trainX=tf.reshape(trainX, (60000,28,28,1))
valX=tf.reshape(valX, (10000,28,28,1))
x_src=tf.cast(trainX, tf.float32)
y_src=tf.cast(trainy, tf.int32)
x_ref=tf.random.shuffle(x_src, seed=0)
x_ref2=tf.random.shuffle(x_src, seed=1)
y_ref=tf.random.shuffle(y_src, seed=0)
z_trg=tf.random.normal((60000,8))
z_trg2=tf.random.normal((60000,8))
inputs={"x_src":x_src, "y_src":y_src, "x_ref":x_ref, "x_ref2":x_ref2, "y_ref":y_ref, "z_trg":z_trg, "z_trg2":z_trg2}
###Output
_____no_output_____
###Markdown
Data AugmentationWe can do some things to augment our data set like transforming our images by
###Code
img_gen=tf.keras.preprocessing.image.ImageDataGenerator(rotation_range=15 , width_shift_range=2, height_shift_range=2, zoom_range=0.1)
img_gen.fit(x_src)
###Output
_____no_output_____
###Markdown
Parameters
###Code
args={'img_size':28,
'style_dim':16,
'latent_dim':8,
'num_domains':10,
'lambda_reg':1,
'lambda_ds':1,
'lambda_sty':1,
'lambda_cyc':1,
'hidden_dim':128,
'resume_iter':0,
'ds_iter':2000,
'total_iters':2000,
'batch_size':128,
'val_batch_size':32,
'lr':1e-4,
'f_lr':1e-6,
'beta1':0,
'beta2':0.99,
'weight_decay':1e-4,
'num_outs_per_domain':4,
'mode': 'train', #train,sample,eval
'seed':0,
'train_img_dir':'GAN/data/train',
'val_img_dir': 'GAN/data/val',
'sample_dir':'GAN/res/samples',
'checkpoint_dir':'GAN/res/checkpoints',
'eval_dir':'GAN/res/eval',
'result_dir':'GAN/res/results',
'src_dir':'GAN/data/src',
'ref_dir':'GAN/data/ref',
'print_every': 500,
'sample_every':20000,
'save_every':500,
'eval_every':1000 }
###Output
_____no_output_____
###Markdown
Load Model
###Code
solv=Solver(args)
solv.build_model(args)
solv.load(500)
###Output
_____no_output_____
###Markdown
Training
###Code
with tf.device('/device:GPU:0'):
solv.train(inputs, inputs, img_gen)
###Output
Start training...
###Markdown
ResultsIn this first cell we show an image where the rows represent a source image and the columns the style they are trying to mimic. We can see in this case that that the image still highly resembles the source image but has obtained some characteristics depending on the style of our reference. In most cases this style is mostly about the thickness of the lines, but it does vary slightly in other ways.
###Code
import matplotlib.pyplot as pyplot
for i in range(4):
pyplot.subplot(5,5,2+i)
pyplot.axis('off')
pyplot.imshow(np.reshape(inputs['x_ref'][i],[28,28]), cmap='gray_r')
for i in range(4):
pyplot.subplot(5, 5, 5*(i+1) + 1)
pyplot.axis('off')
pyplot.imshow(np.reshape(inputs['x_src'][i], [28,28]), cmap='gray_r')
for j in range(4):
pyplot.subplot(5, 5, 5*(i+1) + j +2)
pyplot.axis('off')
pyplot.imshow(np.reshape(solv.nets['generator'](inputs['x_src'][i:i+1],solv.nets['style_encoder'](inputs['x_ref'][j:j+1],inputs['y_ref'][j:j+1])).numpy(), [28,28]), cmap='gray_r')
pyplot.show()
#left is source and top is the target trying to mimic its font
###Output
_____no_output_____
###Markdown
Below we generate random styles and see the output it generates. We notice that it is quite likely the images are distorted in this case, compared to when using the style of an already existing image it seems it would usually have a good quality.
###Code
for i in range(5):
pyplot.subplot(5,5,1+i)
pyplot.axis('off')
pyplot.imshow(np.reshape(solv.nets['generator'](inputs['x_src'][0:1],tf.random.normal((1,16))).numpy(), [28,28]), cmap='gray_r')
###Output
_____no_output_____
###Markdown
Here we can see the process of how the image transforms into the target. In these small images there is not too much that is changing but we can still appreciate the process.
###Code
s1=solv.nets['style_encoder'](inputs['x_src'][0:1],inputs['y_src'][0:1])
s2=solv.nets['style_encoder'](inputs['x_ref'][0:1],inputs['y_ref'][0:1])
for i in range(5):
pyplot.subplot(5,5,1+i)
pyplot.axis('off')
s=(1-i/5)*s1+i/5*s2
pyplot.imshow(np.reshape(solv.nets['generator'](inputs['x_src'][0:1],s).numpy(), [28,28]), cmap='gray_r')
###Output
_____no_output_____ |
8. Machine Learning: Part 2/Presentation follow along.ipynb | ###Markdown
Presentation follow along
###Code
from sklearn.datasets import load_digits
data = load_digits(as_frame=True).frame
data
import pandas as pd
df = pd.DataFrame(data.data, columns=data.feature_names)
df['target'] = data.target
df
data.loc[0].values[:-1]
# use .loc[] to get the first row
# then use .values to get this row as a pure array
# then take [:-1], meaning everything up to the last value
# becuase the last value is the label
row = data.loc[0].values[:-1]
# then reshape into a grid
grid = row.reshape(8, 8)
print(grid)
def plot_number(row_number):
row = data.loc[row_number].values[:-1]
grid = row.reshape(8, 8)
plt.figure()
plt.imshow(grid, cmap='gray')
plt.title(str(data.loc[row_number].values[-1]))
plot_number(120)
import matplotlib.pyplot as plt
%matplotlib notebook
plt.figure()
plt.imshow(grid, cmap='gray')
def show_digit(row_number):
row = data.loc[row_number].values[:-1]
label = data.loc[row_number].values[-1]
grid = row.reshape(8, 8)
plt.figure()
plt.imshow(grid, cmap='gray')
plt.title('Label = ' + str(int(label)))
show_digit(34)
# import the svm algorithm from scikit-learn
from sklearn.svm import SVC
# import train test split
from sklearn.model_selection import train_test_split
# perform train test split
train_data, test_data = train_test_split(data, test_size=0.2, random_state=37)
# create a new classifier. C=1 here sets the "wigglyness"
classifier = SVC(C=1)
# fit the classifier on the training data
classifier.fit(train_data.iloc[:, :-1], train_data.iloc[:, -1])
# make a prediction on the test data
prediction = classifier.predict(test_data.iloc[:, :-1])
prediction
test_data.iloc[:, -1].values
fails = test_data[prediction != test_data.iloc[:, -1]]
fails
plt.figure()
plt.imshow(fails.iloc[0, :-1].values.reshape(8, 8), cmap='gray')
plt.title('True label: 4')
plt.figure()
plt.imshow(fails.iloc[1, :-1].values.reshape(8, 8), cmap='gray')
plt.title('True label: 5')
predicted = prediction[prediction != test_data.iloc[:, -1]]
print(predicted)
# import the KFold function
from sklearn.model_selection import KFold
# make a k-fold iterator with 5 splits, shuffling the data
kf = KFold(n_splits=5, shuffle=True)
acs = []
for train_index, test_index in kf.split(data):
X_train = data.iloc[train_index, :-1]
X_test = data.iloc[test_index, :-1]
Y_train = data.iloc[train_index, -1]
Y_test = data.iloc[test_index, -1]
classifier = SVC(C=1)
classifier.fit(X_train, Y_train)
prediction = classifier.predict(X_test)
ac = sum(prediction == Y_test) / len(prediction)
acs.append(ac)
print(100 * sum(acs) / 5)
# import the KFold function
from sklearn.model_selection import KFold
# make a k-fold iterator with 5 splits, shuffling the data
kf = KFold(n_splits=5, shuffle=True)
# perform 5 loops, getting random rows
for train_index, test_index in kf.split(data):
X_train = data.iloc[train_index, :-1]
X_test = data.iloc[test_index, :-1]
y_train = data.iloc[train_index, -1]
y_test = data.iloc[test_index, -1]
classifier = SVC(C=1)
classifier.fit(X_train, y_train)
prediction = classifier.predict(X_test)
print(sum(prediction == y_test) / len(prediction))
sum([0.99166, 0.99444, 0.98050, 0.98328, 0.98607]) / 5
###Output
_____no_output_____ |
DL-LearningWithLiMu/13-Dropout.ipynb | ###Markdown
Drop outๅจๆบ๏ผไธไธชๅฅฝ็ๆจกๅ๏ผ้่ฆๅฏน่พๅ
ฅๆฐๆฎ็ๆฐๅจ้ฒๆฃ- ไฝฟ็จๆๅช้ณ็ๆฐๆฎ็ญไปทไบTikhonovๆญฃๅ- ไธขๅผๆณ๏ผๅจๅฑไน้ดๅ ๅ
ฅๅช้ณๆ ๅๅทฎ็ๅ ๅ
ฅๅช้ณ๏ผ้ฒๆญข่ฟๆๅdrop out้ๅธธไฝ็จๅจ้่ๅ
จ่ฟๆฅๅฑ็่พๅบไธ
###Code
import torch
from torch import nn
from d2l import torch as d2l
def dropout_layer(X, dropout):
assert 0 <= dropout <= 1
if dropout == 1:
return torch.zeros_like(X)
if dropout == 0:
return X
mask = (torch.rand(X.shape) > dropout).float()
return mask * X / (1.0 - dropout)
# ๆต่ฏdropout_layerๅฝๆฐ
X = torch.arange(16, dtype=torch.float32).reshape((2, 8))
print(X)
print(dropout_layer(X, 0.))
print(dropout_layer(X, 0.5))
print(dropout_layer(X, 1.))
num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256
dropout1, dropout2 = 0.2, 0.5
class Net(nn.Module):
def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,
is_training=True):
super(Net, self).__init__()
self.num_inputs = num_inputs
self.training = is_training
self.lin1 = nn.Linear(num_inputs, num_hiddens1)
self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)
self.lin3 = nn.Linear(num_hiddens2, num_outputs)
self.relu = nn.ReLU()
def forward(self, X):
H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))
if self.training == True:
H1 = dropout_layer(H1, dropout1)
H2 = self.relu(self.lin2(H1))
if self.training == True:
H2 = dropout_layer(H2, dropout2)
out = self.lin3(H2)
return out
net = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)
num_epochs, lr, batch_size = 10, 0.5, 256
loss = nn.CrossEntropyLoss(reduction='none')
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
trainer = torch.optim.SGD(net.parameters(), lr=lr)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(),
nn.Dropout(dropout1), nn.Linear(256, 256), nn.ReLU(),
nn.Dropout(dropout2), nn.Linear(256, 10))
def init_weights(m):
if type(m) == nn.Linear:
nn.init.normal_(m.weight, std=0.01)
net.apply(init_weights);
trainer = torch.optim.SGD(net.parameters(), lr=lr)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
###Output
_____no_output_____ |
q&a/notebooks/.ipynb_checkpoints/demo_0-checkpoint.ipynb | ###Markdown
Getting started with using Doc2Vec ( genism Model ) ImportsThese are the neccessary libs that we are going to use in this session.
###Code
import nltk
import pandas as pd
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import re
from gensim import utils
from gensim.models.doc2vec import LabeledSentence
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
###Output
_____no_output_____
###Markdown
Downloading the neccessay packaages for data-preprocessing
###Code
#stopwords like the at a an, unnecesasry
#tokenization into sentences, punkt
#http://www.nltk.org/
nltk.download("punkt")
nltk.download("stopwords")
###Output
[nltk_data] Downloading package punkt to /home/buckaroo/nltk_data...
[nltk_data] Package punkt is already up-to-date!
[nltk_data] Downloading package stopwords to
[nltk_data] /home/buckaroo/nltk_data...
[nltk_data] Package stopwords is already up-to-date!
###Markdown
Reading CSVWe are importing the CSV containing the Question , Answering maping. Let's visualize it and check it out
###Code
df = pd.read_csv("../data/zeamed-web/zeamed-faq.csv")
df.head(5)
###Output
_____no_output_____
###Markdown
Pre-processingThis utility is used for preprocessing the sentences/words that is removing the unneccessary things that doesnot contribute any **meaning or value**
###Code
def review_to_wordlist(review, remove_stopwords=True):
# Clean the text, with the option to remove stopwords.
# Convert words to lower case and split them
words = review.lower().split()
# Optionally remove stop words (true by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
review_text = " ".join(words)
# Clean the text
review_text = re.sub(r"[^A-Za-z0-9(),!.?\'\`]", " ", review_text)
review_text = re.sub(r"\'s", " 's ", review_text)
review_text = re.sub(r"\'ve", " 've ", review_text)
review_text = re.sub(r"n\'t", " 't ", review_text)
review_text = re.sub(r"\'re", " 're ", review_text)
review_text = re.sub(r"\'d", " 'd ", review_text)
review_text = re.sub(r"\'ll", " 'll ", review_text)
review_text = re.sub(r",", " ", review_text)
review_text = re.sub(r"\.", " ", review_text)
review_text = re.sub(r"!", " ", review_text)
review_text = re.sub(r"\(", " ( ", review_text)
review_text = re.sub(r"\)", " ) ", review_text)
review_text = re.sub(r"\?", " ", review_text)
review_text = re.sub(r"\s{2,}", " ", review_text)
words = review_text.split()
# Shorten words to their stems
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word) for word in words]
review_text = " ".join(stemmed_words)
# Return a list of words
return(review_text)
###Output
_____no_output_____
###Markdown
This utility is for clean the sentences in the array and append them into the `question_list` array which will use later for training the `Doc2Vec` model
###Code
def process_questions(question_list, questions, question_list_name):
# function to transform questions and display progress
for question in questions:
question_list.append(review_to_wordlist(question,False))
if len(question_list) % 5 == 0:
progress = len(question_list)/len(df) * 100
print("{} is {}% complete.".format(question_list_name, round(progress, 1)))
questions = []
process_questions(questions,df.Question,'Questions')
questions
###Output
Questions is 22.7% complete.
Questions is 45.5% complete.
Questions is 68.2% complete.
Questions is 90.9% complete.
###Markdown
`query` is out input and we pre-processing , storing it in a `question` which we will use it for testing the `Doc2Vec` Model.
###Code
query="who are you ?"
question = review_to_wordlist(query,False)
question
###Output
_____no_output_____
###Markdown
`labeled_questions` will be an array containing the sentences in form of `LabeledSentence` object , which we'll inject into `Doc2Vec` model.
###Code
labeled_questions = []
for i,sentence in enumerate(questions):
#print(i,sentence)
labeled_questions.append( LabeledSentence( sentence.split() , [(i+1)] ) )
labeled_questions
###Output
/home/buckaroo/.local/lib/python3.7/site-packages/ipykernel_launcher.py:5: DeprecationWarning: Call to deprecated `LabeledSentence` (Class will be removed in 4.0.0, use TaggedDocument instead).
"""
###Markdown
Converting the `string` into `LabeledSentence` Object for testing purpose
###Code
query_label = LabeledSentence(question,[0])
###Output
/home/buckaroo/.local/lib/python3.7/site-packages/ipykernel_launcher.py:1: DeprecationWarning: Call to deprecated `LabeledSentence` (Class will be removed in 4.0.0, use TaggedDocument instead).
"""Entry point for launching an IPython kernel.
###Markdown
BuildingWe are building the model with the array `labeled_questions` our list of questionaires , and vocabulary.
###Code
# Build the model
model = Doc2Vec(labeled_questions ,vector_size=50, window=2, min_count=1, workers=4)
model.build_vocab(labeled_questions ,update=True)
###Output
_____no_output_____
###Markdown
Training Training the model for `epochs=20`
###Code
# Train the model
# 20 epochs performs a bit better, but timed out when uploading
model.train(labeled_questions, total_examples=model.corpus_count, epochs=20 )
###Output
_____no_output_____
###Markdown
TestingWe are testing the input `question` with all the list of questions that we have gathered.
###Code
for i , ques in enumerate(questions):
score = model.wv.n_similarity(ques.lower().split() ,question.lower().split() )
print(i,ques,'=>',round(score,2))
###Output
0 doe the zeam provid 24 7 custom support => 0.88
1 is it a free servic => 0.83
2 whi is zeam provid a free servic => 0.85
3 where do i find zeam doctor profil on this app => 0.94
4 will the patient info be secur => 0.93
5 who are zeam s user => 0.97
6 what is zeam => 0.94
7 if we book an appoint through you ( zeam com ) do i stand a better chanc of get an appoint => 0.93
8 are there ani addit featur i have access to with this app => 0.94
9 how do i instal the zeam app for doctor => 0.97
10 is it free to use => 0.87
11 i book an appoint through you with a doctor will i have to be there on time what if i arriv late => 0.98
12 is it a free servic whi is zeam provid a free servic => 0.84
13 do i have to show the zeam appoint id when visit a clinic hospit for my appoint => 0.96
14 some doctor clinic hospit in my area are not list on zeam com what do i do about it => 0.94
15 is zeam safe to approach => 0.93
16 what if a patient has ani issu schedul an appoint onlin => 0.92
17 how can patient search for appoint => 0.91
18 if a patient has an appoint book at a particular time can the patient book anoth appoint at the same time via zeam => 0.87
19 is onlin schedul avail in multipl languag => 0.88
20 what should patient do if a specif reason for visit isn t list => 0.91
21 what internet browser provid patient with the best onlin schedul experi => 0.89
|
nbs/caching.ipynb | ###Markdown
Caching
###Code
#default_exp caching
#export
import pathlib
import pickle
import functools
from typing import Union
#export
def simplecache(path: Union[str, pathlib.Path]):
"""Pickle function's returned value. Function returns pickled value if it exists.
If `path` is str, may use "{}" placeholders to be filled from function arguments.
Placeholders must be consistent with function call arguments ({} for args, {...} for kwargs).
"""
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
p = path
if isinstance(p, str):
p = pathlib.Path(p.format(*args, **kwargs))
if p.exists():
print(f'Reading {func.__name__}() cached result from "{p}".')
return pickle.load(p.open('rb'))
else:
res = func(*args, **kwargs)
print(f'Writing {func.__name__}() result to cache at "{p}".')
pickle.dump(res, p.open('wb'), protocol=5)
return res
return wrapped
return wrapper
import tempfile
import uuid
import shutil
try:
p = pathlib.Path(tempfile.gettempdir())/uuid.uuid4().hex
@simplecache(p)
def test():
print('--> calculating')
return 1
print('calculate')
assert test() == 1
print('load cache')
assert test() == 1
p.unlink()
print('calculate')
assert test() == 1
p0 = pathlib.Path(tempfile.gettempdir())/uuid.uuid4().hex
p0.mkdir()
p1 = p0/'1'
p2 = p0/'2'
@simplecache(str(p0)+'/{x}')
def test(x):
print('--> calc', x)
return x
print('calculate')
assert test(x=1) == 1
print('calculate')
assert test(x=2) == 2
print('load cache')
assert test(x=1) == 1
finally:
p.unlink()
shutil.rmtree(p0)
###Output
_____no_output_____
###Markdown
Caching[shaypal5/cachier](https://github.com/shaypal5/cachier) multiple destinationsCaching decorator can cache using a mix of different backends.
###Code
import functools
def cache_local(func):
def wrapped(*args, **kwargs):
print('cache local')
result = func(*args, **kwargs)
return result
return wrapped
def cache_cloud(func):
def wrapped(*args, **kwargs):
print('cache cloud')
result = func(*args, **kwargs)
return result
return wrapped
def cache(func=None, local=True, cloud=False):
if func is None:
return functools.partial(cache, local=local, cloud=cloud)
if local:
func = cache_local(func)
if cloud:
func = cache_cloud(func)
return func
@cache
def f1():
return 1
@cache(local=False, cloud=True)
def f2():
return 2
@cache(cloud=True)
def f3():
return 3
print(f1())
print(f2())
print(f3())
###Output
_____no_output_____
###Markdown
cloud storage[Apache Libcloud](https://github.com/apache/libcloud) - a unified interface for the cloud```conda install -c conda-forge apache-libcloud```
###Code
import json
from libcloud.storage.types import Provider
from libcloud.storage.providers import get_driver
# authenticate using service account key
secrets = json.load(open('/home/babkin/info-group-162919-3ce4989c7e1e.json'))
cls = get_driver(Provider.GOOGLE_STORAGE)
driver = cls(secrets['client_email'], secrets['private_key'], project=secrets['project_id'])
bucket = driver.get_container('info-group-public')
# send in-memory object to cloud storage
import io
table = '''
col1,col2,col3
1,2,a
1,2,b
,5,c
'''
bucket.upload_object_via_stream(io.StringIO(table), 'data/test.csv')
# read from public URL
import pandas as pd
pd.read_csv('https://storage.googleapis.com/info-group-public/data/test.csv')
# delete object from cloud storage
bucket.get_object('data/test.csv').delete()
###Output
_____no_output_____ |
ex5_sol.ipynb | ###Markdown
Exercise 5 - Variational quantum eigensolver Historical backgroundDuring the last decade, quantum computers matured quickly and began to realize Feynman's initial dream of a computing system that could simulate the laws of nature in a quantum way. A 2014 paper first authored by Alberto Peruzzo introduced the **Variational Quantum Eigensolver (VQE)**, an algorithm meant for finding the ground state energy (lowest energy) of a molecule, with much shallower circuits than other approaches.[1] And, in 2017, the IBM Quantum team used the VQE algorithm to simulate the ground state energy of the lithium hydride molecule.[2]VQE's magic comes from outsourcing some of the problem's processing workload to a classical computer. The algorithm starts with a parameterized quantum circuit called an ansatz (a best guess) then finds the optimal parameters for this circuit using a classical optimizer. The VQE's advantage over classical algorithms comes from the fact that a quantum processing unit can represent and store the problem's exact wavefunction, an exponentially hard problem for a classical computer. This exercise 5 allows you to realize Feynman's dream yourself, setting up a variational quantum eigensolver to determine the ground state and the energy of a molecule. This is interesting because the ground state can be used to calculate various molecular properties, for instance the exact forces on nuclei than can serve to run molecular dynamics simulations to explore what happens in chemical systems with time.[3] References1. Peruzzo, Alberto, et al. "A variational eigenvalue solver on a photonic quantum processor." Nature communications 5.1 (2014): 1-7.2. Kandala, Abhinav, et al. "Hardware-efficient variational quantum eigensolver for small molecules and quantum magnets." Nature 549.7671 (2017): 242-246.3. Sokolov, Igor O., et al. "Microcanonical and finite-temperature ab initio molecular dynamics simulations on quantum computers." Physical Review Research 3.1 (2021): 013125. IntroductionFor the implementation of VQE, you will be able to make choices on how you want to compose your simulation, in particular focusing on the ansatz quantum circuits.This is motivated by the fact that one of the important tasks when running VQE on noisy quantum computers is to reduce the loss of fidelity (which introduces errors) by finding the most compact quantum circuit capable of representing the ground state.Practically, this entails to minimizing the number of two-qubit gates (e.g. CNOTs) while not loosing accuracy.Goal Find the shortest ansatz circuits for representing accurately the ground state of given problems. Be creative! Plan First you will learn how to compose a VQE simulation for the smallest molecule and then apply what you have learned to a case of a larger one. **1. Tutorial - VQE for H$_2$:** familiarize yourself with VQE and select the best combination of ansatz/classical optimizer by running statevector simulations.**2. Final Challenge - VQE for LiH:** perform similar investigation as in the first part but restricting to statevector simulator only. Use the qubit number reduction schemes available in Qiskit and find the optimal circuit for this larger system. Optimize the circuit and use your imagination to find ways to select the best building blocks of parameterized circuits and compose them to construct the most compact ansatz circuit for the ground state, better than the ones already available in Qiskit. Below is an introduction to the theory behind VQE simulations. You don't have to understand the whole thing before moving on. Don't be scared! TheoryHere below is the general workflow representing how the molecular simulations using VQE are performed on quantum computers.The core idea hybrid quantum-classical approach is to outsource to **CPU (classical processing unit)** and **QPU (quantum processing unit)** the parts that they can do best. The CPU takes care of listing the terms that need to be measured to compute the energy and also optimizing the circuit parameters. The QPU implements a quantum circuit representing the quantum state of a system and measures the energy. Some more details are given below:**CPU** can compute efficiently the energies associated to electron hopping and interactions (one-/two-body integrals by means of a Hartree-Fock calculation) that serve to represent the total energy operator, Hamiltonian. The [HartreeโFock (HF) method](https://en.wikipedia.org/wiki/Hartree%E2%80%93Fock_method:~:text=In%20computational%20physics%20and%20chemistry,system%20in%20a%20stationary%20state.) efficiently computes an approximate grounds state wavefunction by assuming that the latter can be represented by a single Slater determinant (e.g. for H$_2$ molecule in STO-3G basis with 4 spin-orbitals and qubits, $|\Psi_{HF} \rangle = |0101 \rangle$ where electrons occupy the lowest energy spin-orbitals). What QPU does later in VQE is finding a quantum state (corresponding circuit and its parameters) that can also represent other states associated missing electronic correlations (i.e. $\sum_i c_i |i\rangle$ states in $|\Psi \rangle = c_{HF}|\Psi_{HF} \rangle + \sum_i c_i |i\rangle $ where $i$ is a bitstring). After a HF calculation, operators in the Hamiltonian are mapped to measurements on a QPU using fermion-to-qubit transformations (see Hamiltonian section below). One can further analyze the properties of the system to reduce the number of qubits or shorten the ansatz circuit:- For Z2 symmetries and two-qubit reduction, see [Bravyi *et al*, 2017](https://arxiv.org/abs/1701.08213v1).- For entanglement forging, see [Eddins *et al.*, 2021](https://arxiv.org/abs/2104.10220v1).- For the adaptive ansatz see, [Grimsley *et al.*,2018](https://arxiv.org/abs/1812.11173v2), [Rattew *et al.*,2019](https://arxiv.org/abs/1910.09694), [Tang *et al.*,2019](https://arxiv.org/abs/1911.10205). You may use the ideas found in those works to find ways to shorten the quantum circuits.**QPU** implements quantum circuits (see Ansatzes section below), parameterized by angles $\vec\theta$, that would represent the ground state wavefunction by placing various single qubit rotations and entanglers (e.g. two-qubit gates). The quantum advantage lies in the fact that QPU can efficiently represent and store the exact wavefunction, which becomes intractable on a classical computer for systems that have more than a few atoms. Finally, QPU measures the operators of choice (e.g. ones representing a Hamiltonian).Below we go slightly more in mathematical details of each component of the VQE algorithm. It might be also helpful if you watch our [video episode about VQE](https://www.youtube.com/watch?v=Z-A6G0WVI9w). Hamiltonian Here we explain how we obtain the operators that we need to measure to obtain the energy of a given system.These terms are included in the molecular Hamiltonian defined as:$$\begin{aligned}\hat{H} &=\sum_{r s} h_{r s} \hat{a}_{r}^{\dagger} \hat{a}_{s} \\&+\frac{1}{2} \sum_{p q r s} g_{p q r s} \hat{a}_{p}^{\dagger} \hat{a}_{q}^{\dagger} \hat{a}_{r} \hat{a}_{s}+E_{N N}\end{aligned}$$with$$h_{p q}=\int \phi_{p}^{*}(r)\left(-\frac{1}{2} \nabla^{2}-\sum_{I} \frac{Z_{I}}{R_{I}-r}\right) \phi_{q}(r)$$$$g_{p q r s}=\int \frac{\phi_{p}^{*}\left(r_{1}\right) \phi_{q}^{*}\left(r_{2}\right) \phi_{r}\left(r_{2}\right) \phi_{s}\left(r_{1}\right)}{\left|r_{1}-r_{2}\right|} $$where the $h_{r s}$ and $g_{p q r s}$ are the one-/two-body integrals (using the Hartree-Fock method) and $E_{N N}$ the nuclear repulsion energy. The one-body integrals represent the kinetic energy of the electrons and their interaction with nuclei. The two-body integrals represent the electron-electron interaction.The $\hat{a}_{r}^{\dagger}, \hat{a}_{r}$ operators represent creation and annihilation of electron in spin-orbital $r$ and require mappings to operators, so that we can measure them on a quantum computer.Note that VQE minimizes the electronic energy so you have to retrieve and add the nuclear repulsion energy $E_{NN}$ to compute the total energy. So, for every non-zero matrix element in the $ h_{r s}$ and $g_{p q r s}$ tensors, we can construct corresponding Pauli string (tensor product of Pauli operators) with the following fermion-to-qubit transformation. For instance, in Jordan-Wigner mapping for an orbital $r = 3$, we obtain the following Pauli string:$$\hat a_{3}^{\dagger}= \hat \sigma_z \otimes \hat \sigma_z \otimes\left(\frac{ \hat \sigma_x-i \hat \sigma_y}{2}\right) \otimes 1 \otimes \cdots \otimes 1$$where $\hat \sigma_x, \hat \sigma_y, \hat \sigma_z$ are the well-known Pauli operators. The tensor products of $\hat \sigma_z$ operators are placed to enforce the fermionic anti-commutation relations.A representation of the Jordan-Wigner mapping between the 14 spin-orbitals of a water molecule and some 14 qubits is given below:Then, one simply replaces the one-/two-body excitations (e.g. $\hat{a}_{r}^{\dagger} \hat{a}_{s}$, $\hat{a}_{p}^{\dagger} \hat{a}_{q}^{\dagger} \hat{a}_{r} \hat{a}_{s}$) in the Hamiltonian by corresponding Pauli strings (i.e. $\hat{P}_i$, see picture above). The resulting operator set is ready to be measured on the QPU.For additional details see [Seeley *et al.*, 2012](https://arxiv.org/abs/1208.5986v1). AnsatzesThere are mainly 2 types of ansatzes you can use for chemical problems. - **q-UCC ansatzes** are physically inspired, and roughly map the electron excitations to quantum circuits. The q-UCCSD ansatz (`UCCSD`in Qiskit) possess all possible single and double electron excitations. The paired double q-pUCCD (`PUCCD`) and singlet q-UCCD0 (`SUCCD`) just consider a subset of such excitations (meaning significantly shorter circuits) and have proved to provide good results for dissociation profiles. For instance, q-pUCCD doesn't have single excitations and the double excitations are paired as in the image below.- **Heuristic ansatzes (`TwoLocal`)** were invented to shorten the circuit depth but still be able to represent the ground state. As in the figure below, the R gates represent the parametrized single qubit rotations and $U_{CNOT}$ the entanglers (two-qubit gates). The idea is that after repeating certain $D$-times the same block (with independent parameters) one can reach the ground state. For additional details refer to [Sokolov *et al.* (q-UCC ansatzes)](https://arxiv.org/abs/1911.10864v2) and [Barkoutsos *et al.* (Heuristic ansatzes)](https://arxiv.org/pdf/1805.04340.pdf). VQEGiven a Hermitian operator $\hat H$ with an unknown minimum eigenvalue $E_{min}$, associated with the eigenstate $|\psi_{min}\rangle$, VQE provides an estimate $E_{\theta}$, bounded by $E_{min}$:\begin{align*} E_{min} \le E_{\theta} \equiv \langle \psi(\theta) |\hat H|\psi(\theta) \rangle\end{align*} where $|\psi(\theta)\rangle$ is the trial state associated with $E_{\theta}$. By applying a parameterized circuit, represented by $U(\theta)$, to some arbitrary starting state $|\psi\rangle$, the algorithm obtains an estimate $U(\theta)|\psi\rangle \equiv |\psi(\theta)\rangle$ on $|\psi_{min}\rangle$. The estimate is iteratively optimized by a classical optimizer by changing the parameter $\theta$ and minimizing the expectation value of $\langle \psi(\theta) |\hat H|\psi(\theta) \rangle$. As applications of VQE, there are possibilities in molecular dynamics simulations, see [Sokolov *et al.*, 2021](https://arxiv.org/abs/2008.08144v1), and excited states calculations, see [Ollitrault *et al.*, 2019](https://arxiv.org/abs/1910.12890) to name a few. References for additional details For the qiskit-nature tutorial that implements this algorithm see [here](https://qiskit.org/documentation/nature/tutorials/01_electronic_structure.html)but this won't be sufficient and you might want to look on the [first page of github repository](https://github.com/Qiskit/qiskit-nature) and the [test folder](https://github.com/Qiskit/qiskit-nature/tree/main/test) containing tests that are written for each component, they provide the base code for the use of each functionality. Part 1: Tutorial - VQE for H$_2$ molecule In this part, you will simulate H$_2$ molecule using the STO-3G basis with the PySCF driver and Jordan-Wigner mapping.We will guide you through the following parts so then you can tackle harder problems. 1. DriverThe interfaces to the classical chemistry codes that are available in Qiskit are called drivers.We have for example `PSI4Driver`, `PyQuanteDriver`, `PySCFDriver` are available. By running a driver (Hartree-Fock calculation for a given basis set and molecular geometry), in the cell below, we obtain all the necessary information about our molecule to apply then a quantum algorithm.
###Code
from qiskit_nature.drivers import PySCFDriver
molecule = "H .0 .0 .0; H .0 .0 0.739"
driver = PySCFDriver(atom=molecule)
qmolecule = driver.run()
###Output
_____no_output_____
###Markdown
Tutorial questions 1 Look into the attributes of `qmolecule` and answer the questions below. 1. We need to know the basic characteristics of our molecule. What is the total number of electrons in your system?2. What is the number of molecular orbitals?3. What is the number of spin-orbitals?3. How many qubits would you need to simulate this molecule with Jordan-Wigner mapping?5. What is the value of the nuclear repulsion energy?You can find the answers at the end of this notebook.
###Code
# WRITE YOUR CODE BETWEEN THESE LINES - START
n_el = qmolecule.num_alpha + qmolecule.num_beta
n_mo = qmolecule.num_molecular_orbitals
n_so = 2 * qmolecule.num_molecular_orbitals
n_q = 2* qmolecule.num_molecular_orbitals
e_nn = qmolecule.nuclear_repulsion_energy
print(n_el, n_mo, n_so, n_q, e_nn)
# WRITE YOUR CODE BETWEEN THESE LINES - END
###Output
_____no_output_____
###Markdown
2. Electronic structure problemYou can then create an `ElectronicStructureProblem` that can produce the list of fermionic operators before mapping them to qubits (Pauli strings).
###Code
from qiskit_nature.problems.second_quantization.electronic import ElectronicStructureProblem
problem = ElectronicStructureProblem(driver)
# Generate the second-quantized operators
second_q_ops = problem.second_q_ops()
# Hamiltonian
main_op = second_q_ops[0]
###Output
_____no_output_____
###Markdown
3. QubitConverterAllows to define the mapping that you will use in the simulation. You can try different mapping but we will stick to `JordanWignerMapper` as allows a simple correspondence: a qubit represents a spin-orbital in the molecule.
###Code
from qiskit_nature.mappers.second_quantization import ParityMapper, BravyiKitaevMapper, JordanWignerMapper
from qiskit_nature.converters.second_quantization.qubit_converter import QubitConverter
# Setup the mapper and qubit converter
mapper_type = 'JordanWignerMapper'
if mapper_type == 'ParityMapper':
mapper = ParityMapper()
elif mapper_type == 'JordanWignerMapper':
mapper = JordanWignerMapper()
elif mapper_type == 'BravyiKitaevMapper':
mapper = BravyiKitaevMapper()
converter = QubitConverter(mapper=mapper, two_qubit_reduction=False)
# The fermionic operators are mapped to qubit operators
num_particles = (problem.molecule_data_transformed.num_alpha,
problem.molecule_data_transformed.num_beta)
qubit_op = converter.convert(main_op, num_particles=num_particles)
###Output
_____no_output_____
###Markdown
4. Initial stateAs we described in the Theory section, a good initial state in chemistry is the HF state (i.e. $|\Psi_{HF} \rangle = |0101 \rangle$). We can initialize it as follows:
###Code
from qiskit_nature.circuit.library import HartreeFock
num_particles = (problem.molecule_data_transformed.num_alpha,problem.molecule_data_transformed.num_beta)
num_spin_orbitals = 2 * problem.molecule_data_transformed.num_molecular_orbitals
init_state = HartreeFock(num_spin_orbitals, num_particles, converter)
print(init_state)
###Output
_____no_output_____
###Markdown
5. AnsatzOne of the most important choices is the quantum circuit that you choose to approximate your ground state.Here is the example of qiskit circuit library that contains many possibilities for making your own circuit.
###Code
from qiskit.circuit.library import TwoLocal
from qiskit_nature.circuit.library import UCCSD, PUCCD, SUCCD
# Choose the ansatz
ansatz_type = "TwoLocal"
# Parameters for q-UCC antatze
num_particles = (problem.molecule_data_transformed.num_alpha,problem.molecule_data_transformed.num_beta)
num_spin_orbitals = 2 * problem.molecule_data_transformed.num_molecular_orbitals
# Put arguments for twolocal
if ansatz_type == "TwoLocal":
# Single qubit rotations that are placed on all qubits with independent parameters
rotation_blocks = ['ry', 'rz']
# Entangling gates
entanglement_blocks = 'cx'
# How the qubits are entangled
entanglement = 'full'
# Repetitions of rotation_blocks + entanglement_blocks with independent parameters
repetitions = 3
# Skip the final rotation_blocks layer
skip_final_rotation_layer = True
ansatz = TwoLocal(qubit_op.num_qubits, rotation_blocks, entanglement_blocks, reps=repetitions,
entanglement=entanglement, skip_final_rotation_layer=skip_final_rotation_layer)
# Add the initial state
ansatz.compose(init_state, front=True, inplace=True)
elif ansatz_type == "UCCSD":
ansatz = UCCSD(converter,num_particles,num_spin_orbitals,initial_state = init_state)
elif ansatz_type == "PUCCD":
ansatz = PUCCD(converter,num_particles,num_spin_orbitals,initial_state = init_state)
elif ansatz_type == "SUCCD":
ansatz = SUCCD(converter,num_particles,num_spin_orbitals,initial_state = init_state)
elif ansatz_type == "Custom":
# Example of how to write your own circuit
from qiskit.circuit import Parameter, QuantumCircuit, QuantumRegister
# Define the variational parameter
theta = Parameter('a')
n = qubit_op.num_qubits
# Make an empty quantum circuit
qc = QuantumCircuit(qubit_op.num_qubits)
qubit_label = 0
# Place a Hadamard gate
qc.h(qubit_label)
# Place a CNOT ladder
for i in range(n-1):
qc.cx(i, i+1)
# Visual separator
qc.barrier()
# rz rotations on all qubits
qc.rz(theta, range(n))
ansatz = qc
ansatz.compose(init_state, front=True, inplace=True)
print(ansatz)
###Output
_____no_output_____
###Markdown
6. BackendThis is where you specify the simulator or device where you want to run your algorithm.We will focus on the `statevector_simulator` in this challenge.
###Code
from qiskit import Aer
backend = Aer.get_backend('statevector_simulator')
###Output
_____no_output_____
###Markdown
7. OptimizerThe optimizer guides the evolution of the parameters of the ansatz so it is very important to investigate the energy convergence as it would define the number of measurements that have to be performed on the QPU.A clever choice might reduce drastically the number of needed energy evaluations.
###Code
from qiskit.algorithms.optimizers import COBYLA, L_BFGS_B, SPSA, SLSQP
optimizer_type = 'COBYLA'
# You may want to tune the parameters
# of each optimizer, here the defaults are used
if optimizer_type == 'COBYLA':
optimizer = COBYLA(maxiter=500)
elif optimizer_type == 'L_BFGS_B':
optimizer = L_BFGS_B(maxfun=500)
elif optimizer_type == 'SPSA':
optimizer = SPSA(maxiter=500)
elif optimizer_type == 'SLSQP':
optimizer = SLSQP(maxiter=500)
###Output
_____no_output_____
###Markdown
8. Exact eigensolverFor learning purposes, we can solve the problem exactly with the exact diagonalization of the Hamiltonian matrix so we know where to aim with VQE.Of course, the dimensions of this matrix scale exponentially in the number of molecular orbitals so you can try doing this for a large molecule of your choice and see how slow this becomes. For very large systems you would run out of memory trying to store their wavefunctions.
###Code
from qiskit_nature.algorithms.ground_state_solvers.minimum_eigensolver_factories import NumPyMinimumEigensolverFactory
from qiskit_nature.algorithms.ground_state_solvers import GroundStateEigensolver
import numpy as np
def exact_diagonalizer(problem, converter):
solver = NumPyMinimumEigensolverFactory()
calc = GroundStateEigensolver(converter, solver)
result = calc.solve(problem)
return result
result_exact = exact_diagonalizer(problem, converter)
exact_energy = np.real(result_exact.eigenenergies[0])
print("Exact electronic energy", exact_energy)
print(result_exact)
# The targeted electronic energy for H2 is -1.85336 Ha
# Check with your VQE result.
###Output
_____no_output_____
###Markdown
9. VQE and initial parameters for the ansatzNow we can import the VQE class and run the algorithm.
###Code
from qiskit.algorithms import VQE
from IPython.display import display, clear_output
# Print and save the data in lists
def callback(eval_count, parameters, mean, std):
# Overwrites the same line when printing
display("Evaluation: {}, Energy: {}, Std: {}".format(eval_count, mean, std))
clear_output(wait=True)
counts.append(eval_count)
values.append(mean)
params.append(parameters)
deviation.append(std)
counts = []
values = []
params = []
deviation = []
# Set initial parameters of the ansatz
# We choose a fixed small displacement
# So all participants start from similar starting point
try:
initial_point = [0.01] * len(ansatz.ordered_parameters)
except:
initial_point = [0.01] * ansatz.num_parameters
algorithm = VQE(ansatz,
optimizer=optimizer,
quantum_instance=backend,
callback=callback,
initial_point=initial_point)
result = algorithm.compute_minimum_eigenvalue(qubit_op)
print(result)
###Output
_____no_output_____
###Markdown
9. Scoring function We need to judge how good are your VQE simulations, your choice of ansatz/optimizer.For this, we implemented the following simple scoring function:$$ score = N_{CNOT}$$where $N_{CNOT}$ is the number of CNOTs. But you have to reach the chemical accuracy which is $\delta E_{chem} = 0.004$ Ha $= 4$ mHa, which may be hard to reach depending on the problem. You have to reach the accuracy we set in a minimal number of CNOTs to win the challenge. The lower the score the better!
###Code
# Store results in a dictionary
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import Unroller
# Unroller transpile your circuit into CNOTs and U gates
pass_ = Unroller(['u', 'cx'])
pm = PassManager(pass_)
ansatz_tp = pm.run(ansatz)
cnots = ansatz_tp.count_ops()['cx']
score = cnots
accuracy_threshold = 4.0 # in mHa
energy = result.optimal_value
if ansatz_type == "TwoLocal":
result_dict = {
'optimizer': optimizer.__class__.__name__,
'mapping': converter.mapper.__class__.__name__,
'ansatz': ansatz.__class__.__name__,
'rotation blocks': rotation_blocks,
'entanglement_blocks': entanglement_blocks,
'entanglement': entanglement,
'repetitions': repetitions,
'skip_final_rotation_layer': skip_final_rotation_layer,
'energy (Ha)': energy,
'error (mHa)': (energy-exact_energy)*1000,
'pass': (energy-exact_energy)*1000 <= accuracy_threshold,
'# of parameters': len(result.optimal_point),
'final parameters': result.optimal_point,
'# of evaluations': result.optimizer_evals,
'optimizer time': result.optimizer_time,
'# of qubits': int(qubit_op.num_qubits),
'# of CNOTs': cnots,
'score': score}
else:
result_dict = {
'optimizer': optimizer.__class__.__name__,
'mapping': converter.mapper.__class__.__name__,
'ansatz': ansatz.__class__.__name__,
'rotation blocks': None,
'entanglement_blocks': None,
'entanglement': None,
'repetitions': None,
'skip_final_rotation_layer': None,
'energy (Ha)': energy,
'error (mHa)': (energy-exact_energy)*1000,
'pass': (energy-exact_energy)*1000 <= accuracy_thresholdaccuract_thres,
'# of parameters': len(result.optimal_point),
'final parameters': result.optimal_point,
'# of evaluations': result.optimizer_evals,
'optimizer time': result.optimizer_time,
'# of qubits': int(qubit_op.num_qubits),
'# of CNOTs': cnots,
'score': score}
# Plot the results
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.set_xlabel('Iterations')
ax.set_ylabel('Energy')
ax.grid()
fig.text(0.7, 0.75, f'Energy: {result.optimal_value:.3f}\nScore: {score:.0f}')
plt.title(f"{result_dict['optimizer']}-{result_dict['mapping']}\n{result_dict['ansatz']}")
ax.plot(counts, values)
ax.axhline(exact_energy, linestyle='--')
fig_title = f"\
{result_dict['optimizer']}-\
{result_dict['mapping']}-\
{result_dict['ansatz']}-\
Energy({result_dict['energy (Ha)']:.3f})-\
Score({result_dict['score']:.0f})\
.png"
fig.savefig(fig_title, dpi=300)
# Display and save the data
import pandas as pd
import os.path
filename = 'results_h2.csv'
if os.path.isfile(filename):
result_df = pd.read_csv(filename)
result_df = result_df.append([result_dict])
else:
result_df = pd.DataFrame.from_dict([result_dict])
result_df.to_csv(filename)
result_df[['optimizer','ansatz', '# of qubits', '# of parameters','rotation blocks', 'entanglement_blocks',
'entanglement', 'repetitions', 'error (mHa)', 'pass', 'score']]
###Output
_____no_output_____
###Markdown
Tutorial questions 2 Experiment with all the parameters and then:1. Can you find your best (best score) heuristic ansatz (by modifying parameters of `TwoLocal` ansatz) and optimizer?2. Can you find your best q-UCC ansatz (choose among `UCCSD, PUCCD or SUCCD` ansatzes) and optimizer?3. In the cell where we define the ansatz, can you modify the `Custom` ansatz by placing gates yourself to write a better circuit than your `TwoLocal` circuit? For each question, give `ansatz` objects.Remember, you have to reach the chemical accuracy $|E_{exact} - E_{VQE}| \leq 0.004 $ Ha $= 4$ mHa.
###Code
# WRITE YOUR CODE BETWEEN THESE LINES - START
# WRITE YOUR CODE BETWEEN THESE LINES - END
###Output
_____no_output_____
###Markdown
Part 2: Final Challenge - VQE for LiH molecule In this part, you will simulate LiH molecule using the STO-3G basis with the PySCF driver. Goal Experiment with all the parameters and then find your best ansatz. You can be as creative as you want!For each question, give `ansatz` objects as for Part 1. Your final score will be based only on Part 2. Be aware that the system is larger now. Work out how many qubits you would need for this system by retrieving the number of spin-orbitals. Reducing the problem sizeYou might want to reduce the number of qubits for your simulation:- you could freeze the core electrons that do not contribute significantly to chemistry and consider only the valence electrons. Qiskitย already has this functionality implemented. So inspect the different transformers in `qiskit_nature.transformers` and find the one that performs the freeze core approximation.- you could use `ParityMapper` with `two_qubit_reduction=True` to eliminate 2 qubits.- you could reduce the number of qubits by inspecting the symmetries of your Hamiltonian. Find a way to use `Z2Symmetries` in Qiskit. Custom ansatz You might want to explore the ideas proposed in [Grimsley *et al.*,2018](https://arxiv.org/abs/1812.11173v2), [H. L. Tang *et al.*,2019](https://arxiv.org/abs/1911.10205), [Rattew *et al.*,2019](https://arxiv.org/abs/1910.09694), [Tang *et al.*,2019](https://arxiv.org/abs/1911.10205). You can even get try machine learning algorithms to generate best ansatz circuits. Setup the simulationLet's now run the Hartree-Fock calculation and the rest is up to you!Attention We give below the `driver`, the `initial_point`, the `initial_state` that should remain as given.You are free then to explore all other things available in Qiskit.So you have to start from this initial point (all parameters set to 0.01): `initial_point = [0.01] * len(ansatz.ordered_parameters)` or`initial_point = [0.01] * ansatz.num_parameters`and your initial state has to be the Hartree-Fock state: `init_state = HartreeFock(num_spin_orbitals, num_particles, converter)` For each question, give `ansatz` object.Remember you have to reach the chemical accuracy $|E_{exact} - E_{VQE}| \leq 0.004 $ Ha $= 4$ mHa.
###Code
from qiskit_nature.drivers import PySCFDriver
molecule = 'Li 0.0 0.0 0.0; H 0.0 0.0 1.5474'
driver = PySCFDriver(atom=molecule)
qmolecule = driver.run()
n_el = qmolecule.num_alpha + qmolecule.num_beta
n_mo = qmolecule.num_molecular_orbitals
n_so = 2 * qmolecule.num_molecular_orbitals
n_q = 2 * qmolecule.num_molecular_orbitals
e_nn = qmolecule.nuclear_repulsion_energy
print(n_el, n_mo, n_so, n_q, e_nn)
# WRITE YOUR CODE BETWEEN THESE LINES - START
from qiskit_nature.drivers import PySCFDriver
from qiskit_nature.transformers import FreezeCoreTransformer
from qiskit.aqua.operators import Z2Symmetries
molecule = 'Li 0.0 0.0 0.0; H 0.0 0.0 1.5474'
driver = PySCFDriver(atom=molecule)
qmolecule = driver.run()
#transformer = FreezeCoreTransformer(freeze_core=True, remove_orbitals=[3])
transformer = FreezeCoreTransformer()
qmolecule = transformer.transform(qmolecule)
#----------------------------------------------
n_el = qmolecule.num_alpha + qmolecule.num_beta
n_mo = qmolecule.num_molecular_orbitals
n_so = 2 * qmolecule.num_molecular_orbitals
n_q = 2 * qmolecule.num_molecular_orbitals
e_nn = qmolecule.nuclear_repulsion_energy
#print(n_el, n_mo, n_so, n_q, e_nn)
#----------------------------------------------#### 2. Electronic structure problem
from qiskit_nature.problems.second_quantization.electronic import ElectronicStructureProblem
#problem = ElectronicStructureProblem(driver)
problem = ElectronicStructureProblem(driver
,q_molecule_transformers=[FreezeCoreTransformer(freeze_core=True
,remove_orbitals=[3,4]
)])
# Generate the second-quantized operators
second_q_ops = problem.second_q_ops()
# Hamiltonian
main_op = second_q_ops[0]
#----------------------------------------------#### 3. QubitConverter
from qiskit_nature.mappers.second_quantization import ParityMapper, BravyiKitaevMapper, JordanWignerMapper
from qiskit_nature.converters.second_quantization.qubit_converter import QubitConverter
# Setup the mapper and qubit converter
mapper = ParityMapper()
#mapper = JordanWignerMapper()
#mapper = BravyiKitaevMapper()
converter = QubitConverter(mapper=mapper, two_qubit_reduction=True
, z2symmetry_reduction=[1,1]
)
# The fermionic operators are mapped to qubit operators
num_particles = (problem.molecule_data_transformed.num_alpha,problem.molecule_data_transformed.num_beta)
num_particle = qmolecule.num_alpha + qmolecule.num_beta
#main_op = converter.convert(main_op)
qubit_op = converter.convert(main_op, num_particles=num_particles)
#z2symm = Z2Symmetries.find_Z2_symmetries(qubit_op)
#print(qubit_op)
#----------------------------------------------#### 4. Initial state
from qiskit_nature.circuit.library import HartreeFock
num_particles = (problem.molecule_data_transformed.num_alpha,problem.molecule_data_transformed.num_beta)
num_spin_orbitals = 2 * problem.molecule_data_transformed.num_molecular_orbitals
init_state = HartreeFock(num_spin_orbitals, num_particles, converter)
#print(init_state)
#----------------------------------------------#### 5. Ansatz
from qiskit.circuit.library import TwoLocal
from qiskit_nature.circuit.library import UCCSD, PUCCD, SUCCD
# Choose the ansatz
ansatz_type = "TwoLocal"
# Parameters for q-UCC antatze
num_particles = (problem.molecule_data_transformed.num_alpha,problem.molecule_data_transformed.num_beta)
num_spin_orbitals = 2 * problem.molecule_data_transformed.num_molecular_orbitals
# Put arguments for twolocal
if ansatz_type == "TwoLocal":
# Single qubit rotations that are placed on all qubits with independent parameters
rotation_blocks = ['ry', 'rz']
# Entangling gates
entanglement_blocks = 'cx'
# How the qubits are entangled
entanglement = 'linear'
# Repetitions of rotation_blocks + entanglement_blocks with independent parameters
repetitions = 1
# Skip the final rotation_blocks layer
skip_final_rotation_layer = False
ansatz = TwoLocal(qubit_op.num_qubits, rotation_blocks, entanglement_blocks,
#entanglement=[(0, 1), (1, 2), (2, 0)], skip_final_rotation_layer=skip_final_rotation_layer)
reps=repetitions, entanglement=entanglement, skip_final_rotation_layer=skip_final_rotation_layer)
# Add the initial state
#print(ansatz)
ansatz.compose(init_state, front=True, inplace=True)
elif ansatz_type == "UCCSD": ansatz = UCCSD(converter,num_particles,num_spin_orbitals,initial_state = init_state)
elif ansatz_type == "PUCCD": ansatz = PUCCD(converter,num_particles,num_spin_orbitals,initial_state = init_state)
elif ansatz_type == "SUCCD": ansatz = SUCCD(converter,num_particles,num_spin_orbitals,initial_state = init_state)
elif ansatz_type == "Custom":
# Example of how to write your own circuit
from qiskit.circuit import Parameter, QuantumCircuit, QuantumRegister
# Define the variational parameter
theta = Parameter('a')
n = qubit_op.num_qubits
# Make an empty quantum circuit
qc = QuantumCircuit(qubit_op.num_qubits)
qubit_label = 0
# Place a Hadamard gate
qc.h(qubit_label)
# Place a CNOT ladder
for i in range(n-1):
qc.cx(i, i+1)
# Visual separator
#qc.barrier()
# rz rotations on all qubits
qc.rz(theta, range(n))
ansatz = qc
ansatz.compose(init_state, front=True, inplace=True)
#print(ansatz)
#----------------------------------------------#### 6. Backend
from qiskit import Aer
backend = Aer.get_backend('statevector_simulator')
#----------------------------------------------#### 7. Optimizer
from qiskit.algorithms.optimizers import COBYLA, L_BFGS_B, SPSA, SLSQP
optimizer_type = 'SLSQP'
# You may want to tune the parameters of each optimizer, here the defaults are used
if optimizer_type == 'COBYLA': optimizer = COBYLA(maxiter=5000)
elif optimizer_type == 'L_BFGS_B': optimizer = L_BFGS_B(maxfun=50000)
elif optimizer_type == 'SPSA': optimizer = SPSA(maxiter=500)
elif optimizer_type == 'SLSQP': optimizer = SLSQP(maxiter=500)
#----------------------------------------------#### 8. Exact eigensolver
from qiskit_nature.algorithms.ground_state_solvers.minimum_eigensolver_factories import NumPyMinimumEigensolverFactory
from qiskit_nature.algorithms.ground_state_solvers import GroundStateEigensolver
import numpy as np
def exact_diagonalizer(problem, converter):
solver = NumPyMinimumEigensolverFactory()
calc = GroundStateEigensolver(converter, solver)
result = calc.solve(problem)
return result
result_exact = exact_diagonalizer(problem, converter)
exact_energy = np.real(result_exact.eigenenergies[0])
print("Exact electronic energy", exact_energy)
#print(result_exact)
# The targeted electronic energy for LiH is -1.85336 Ha
# Check with your VQE result.
#----------------------------------------------#### 9. VQE and initial parameters for the ansatz
from qiskit.algorithms import VQE
from IPython.display import display, clear_output
# Print and save the data in lists
def callback(eval_count, parameters, mean, std):
# Overwrites the same line when printing
display("Evaluation: {}, Energy: {}, Std: {}".format(eval_count, mean, std))
clear_output(wait=True)
counts.append(eval_count)
values.append(mean)
params.append(parameters)
deviation.append(std)
counts = []
values = []
params = []
deviation = []
# Set initial parameters of the ansatz
# We choose a fixed small displacement
# So all participants start from similar starting point
try:
initial_point = [0.01] * len(ansatz.ordered_parameters)
except:
initial_point = [0.01] * ansatz.num_parameters
algorithm = VQE(ansatz,
optimizer=optimizer,
quantum_instance=backend,
callback=callback,
initial_point=initial_point)
result = algorithm.compute_minimum_eigenvalue(qubit_op)
#print(result)
#----------------------------------------------#### 9. Scoring function
# Store results in a dictionary
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import Unroller
# Unroller transpile your circuit into CNOTs and U gates
pass_ = Unroller(['u', 'cx'])
pm = PassManager(pass_)
ansatz_tp = pm.run(ansatz)
cnots = ansatz_tp.count_ops()['cx']
score = cnots
accuracy_threshold = 4.0 # in mHa
energy = result.optimal_value
if ansatz_type == "TwoLocal":
result_dict = {
'optimizer': optimizer.__class__.__name__,
'mapping': converter.mapper.__class__.__name__,
'ansatz': ansatz.__class__.__name__,
'rotation blocks': rotation_blocks,
'entanglement_blocks': entanglement_blocks,
'entanglement': entanglement,
'repetitions': repetitions,
'skip_final_rotation_layer': skip_final_rotation_layer,
'energy (Ha)': energy,
'error (mHa)': (energy-exact_energy)*1000,
'pass': (energy-exact_energy)*1000 <= accuracy_threshold,
'# of parameters': len(result.optimal_point),
'final parameters': result.optimal_point,
'# of evaluations': result.optimizer_evals,
'optimizer time': result.optimizer_time,
'# of qubits': int(qubit_op.num_qubits),
'# of CNOTs': cnots,
'score': score}
else:
result_dict = {
'optimizer': optimizer.__class__.__name__,
'mapping': converter.mapper.__class__.__name__,
'ansatz': ansatz.__class__.__name__,
'rotation blocks': None,
'entanglement_blocks': None,
'entanglement': None,
'repetitions': None,
'skip_final_rotation_layer': None,
'energy (Ha)': energy,
'error (mHa)': (energy-exact_energy)*1000,
'pass': (energy-exact_energy)*1000 <= accuracy_threshold,
'# of parameters': len(result.optimal_point),
'final parameters': result.optimal_point,
'# of evaluations': result.optimizer_evals,
'optimizer time': result.optimizer_time,
'# of qubits': int(qubit_op.num_qubits),
'# of CNOTs': cnots,
'score': score}
# Plot the results
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.set_xlabel('Iterations')
ax.set_ylabel('Energy')
ax.grid()
fig.text(0.7, 0.75, f'Energy: {result.optimal_value:.3f}\nScore: {score:.0f}')
plt.title(f"{result_dict['optimizer']}-{result_dict['mapping']}\n{result_dict['ansatz']}")
ax.plot(counts, values)
ax.axhline(exact_energy, linestyle='--')
fig_title = f"\
{result_dict['optimizer']}-\
{result_dict['mapping']}-\
{result_dict['ansatz']}-\
Energy({result_dict['energy (Ha)']:.3f})-\
Score({result_dict['score']:.0f})\
.png"
fig.savefig(fig_title, dpi=300)
# Display and save the data
import pandas as pd
#import os.path
# filename = 'results_h2.csv'
# if os.path.isfile(filename):
# result_df = pd.read_csv(filename)
# result_df = result_df.append([result_dict])
# else:
result_df = pd.DataFrame.from_dict([result_dict])
#result_df.to_csv(filename)
result_df[['optimizer','ansatz', '# of qubits', 'error (mHa)', 'pass', 'score','# of parameters','rotation blocks', 'entanglement_blocks',
'entanglement', 'repetitions']]
# WRITE YOUR CODE BETWEEN THESE LINES - END
# Check your answer using following code
from qc_grader import grade_ex5
freeze_core = True # change to True if you freezed core electrons
grade_ex5(ansatz,qubit_op,result,freeze_core)
# Submit your answer. You can re-submit at any time.
from qc_grader import submit_ex5
submit_ex5(ansatz,qubit_op,result,freeze_core)
###Output
Submitting your answer for ex5. Please wait...
Success ๐! Your answer has been submitted.
|
code/tf/Logistical regression - MINST.ipynb | ###Markdown
TensorFlow - Logistical RegressionThe MNIST database (Mixed National Institute of Standards and Technology database) is a large database of handwritten digits that is commonly used for training various image processing systems. When one learns how to program, there's a tradition that the first thing you do is print "Hello World." Just like programming has Hello World, machine learning has MNIST.MNIST is a simple computer vision dataset. It consists of images of handwritten digits like these:It also includes labels for each image, telling us which digit it is. For example, the labels for the above images are 5, 0, 4, and 1.
###Code
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
from tensorflow.examples.tutorials.mnist import input_data
%matplotlib inline
epochs = 1000
learning_rate = 0.5
###Output
_____no_output_____
###Markdown
Load Data and LabelsThe MNIST data is split into three parts: - 55,000 data points of training data (mnist.train)- 10,000 points of test data (mnist.test)- 5,000 points of validation data (mnist.validation).
###Code
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print (mnist.train.images.shape)
print (mnist.train.labels.shape)
print (mnist.test.images.shape)
print (mnist.test.labels.shape)
###Output
(10000, 784)
(10000, 10)
###Markdown
Data Visualization
###Code
pcolor(mnist.train.images[10000].reshape(28,28), cmap=plt.cm.gray_r)
print (mnist.train.images[10000].reshape(28,28)[20:25,5:10])
print ("Label")
print (mnist.train.labels[10000])
###Output
[[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
Label
[ 0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]
###Markdown
"one-hot" format to present labels- 0 = [1., 0., 0., 0., 0., 0., 0., 0., 0., 0.]- 1 = [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.]- 2 = [0., 0., 1., 0., 0., 0., 0., 0., 0., 0.]- 3 = [0., 0., 0., 1., 0., 0., 0., 0., 0., 0.]- 4 = [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.]- 5 = [0., 0., 0., 0., 0., 1., 0., 0., 0., 0.]- 6 = [0., 0., 0., 0., 0., 0., 1., 0., 0., 0.]- 7 = [0., 0., 0., 0., 0., 0., 0., 1., 0., 0.]- 8 = [0., 0., 0., 0., 0., 0., 0., 0., 1., 0.]- 9 = [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]
###Code
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
# Define loss and optimizer
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# TensorFlow Innitialization
init = tf.initialize_all_variables()
errors = []
with tf.Session() as sess:
sess.run(init)
for i in range(epochs):
batch_xs, batch_ys = mnist.train.next_batch(100)
_, cross_entropy_value, y_value = sess.run([optimizer, cross_entropy, y], feed_dict={x: batch_xs, y_: batch_ys})
accuracy_value = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})
errors.append(1-accuracy_value)
print (errors[-1])
plt.plot([np.mean(errors[i-50:i]) for i in range(len(errors))])
plt.show()
###Output
/Users/paulrad/anaconda/lib/python3.5/site-packages/numpy/core/_methods.py:59: RuntimeWarning: Mean of empty slice.
warnings.warn("Mean of empty slice.", RuntimeWarning)
|
practice/hw1/.ipynb_checkpoints/hw1-checkpoint.ipynb | ###Markdown
Step1
###Code
# this methods generates pairs of length 5
# r: max number in each element.
def generatePairs(r):
items=range(r)
res=[]
for p in itertools.product(items, repeat=5):
for e in itertools.product(items, repeat=5):
res.append([p,e])
print("generated "+str(len(res))+" pairs")
return res
# test:
pairs=generatePairs(NUM_PAIRS)
print(pairs)
###Output
generated 59049 pairs
###Markdown
Step2
###Code
def DCG(inst):
res=0
for r in range(1,len(inst)+1):
res+=(math.pow(2,(inst[r-1])) -1)\
/(math.log(1+r,2))
return res
def ERR(inst):
res=0
prod=1
for r in range(len(inst)):
R=normRel(inst[r])
res+=prod*R/(r+1)
prod*=1-R
return res
def RBP(inst):
p = 0.8
res=0
for r in range(len(inst)):
res+=inst[r]*math.pow(p,r)
return (1-p)*res
def normRel(rel,max=4):
return (math.pow(2,rel)-1)/math.pow(2,max)
def measureAndGroup(pairs):
groups={
'DCG':[],
'RBP':[],
'ERR':[]
}
DCGmin=99999999
DCGmax=-99999999
RBPmin=99999999
RBPmax=-99999999
for i in range(0,10):
groups['DCG'].append([])
groups['RBP'].append([])
groups['ERR'].append([])
for pair in pairs:
p1=pair[0] # producton (P)
p2=pair[1] # experimental (E)
DCG1=DCG(p1)
DCG2=DCG(p2)
RBP1=RBP(p1)
RBP2=RBP(p2)
ERR1= ERR(p1)
ERR2= ERR(p2)
deltaDCG= DCG2-DCG1
deltaRBP= RBP2-RBP1
deltaERR=ERR2-ERR1
pair.append({
"deltaDCG":deltaDCG,
"deltaRBP":deltaRBP,
"deltaERR":deltaERR
})
# storing info about min/max
if(deltaDCG>DCGmax and deltaDCG>0):
DCGmax=deltaDCG
if(deltaDCG<DCGmin and deltaDCG>0):
DCGmin=deltaDCG
if(deltaRBP>RBPmax and deltaRBP>0):
RBPmax=deltaRBP
if(deltaRBP<RBPmin and deltaRBP>0):
RBPmin=deltaRBP
# grouping
binDCG=(DCGmax-DCGmin)/10
binRBP=(RBPmax-RBPmin)/10
binERR=0.1
for pair in pairs:
measures = pair[2]
DCGgr= int( math.floor(measures['deltaDCG']/binDCG))
RBPgr= int(math.floor(measures['deltaRBP']/binRBP))
ERRgr= int(math.floor(measures['deltaERR']/binERR))
if(DCGgr==10): DCGgr-=1
if(RBPgr==10): RBPgr-=1
if(ERRgr==10): ERRgr-=1
tuple=(pair[0],pair[1])
if measures['deltaDCG']>0:
groups['DCG'][DCGgr].append(tuple)
if measures['deltaRBP']>0:
groups['RBP'][RBPgr].append(tuple)
if measures['deltaERR']>0:
groups['ERR'][ERRgr].append(tuple)
return groups
def plotBins(bins):
ind = np.arange(10)
dcg_bin_count = [len(bins['DCG'][i]) for i in range(10)]
rbp_bin_count = [len(bins['RBP'][i]) for i in range(10)]
err_bin_count = [len(bins['ERR'][i]) for i in range(10)]
width = 0.2
fig, ax = plt.subplots()
rects1 = ax.bar(ind, dcg_bin_count, width, color='r', label='DCG')
rects2 = ax.bar(ind + width, rbp_bin_count, width, color='g', label='RBP')
rects3 = ax.bar(ind + 2 * width, err_bin_count, width, color='b', label='ERR')
plt.grid(True)
plt.legend()
plt.show()
## test
seq =[4,4,4,4,4]
print("for seq: "+ str(seq))
print("DCG is: "+ str(DCG(seq)))
print("ERR is: "+ str(ERR(seq)))
print("RBP is: "+ str(RBP(seq)))
pairs=generatePairs(NUM_PAIRS)
groups= measureAndGroup(pairs)
# plot the number of elements in each bin in each measure
plotBins(groups)
###Output
for seq: [4, 4, 4, 4, 4]
DCG is: 44.2268867832
ERR is: 0.968077659607
RBP is: 2.68928
generated 9765625 pairs
###Markdown
Step 3
###Code
# return an array of interleaved documents
def teamDraftInterleaving(A,B):
I =[] # unterleaved result
i=0
j=0
teamA=[]
teamB=[]
while (i<(len(A)) or j<(len(B))): # while not used all items
if(len(teamA)<len(teamB) or len(teamA)==len(teamB) and random.getrandbits(1)==1):
if(i==len(A)): continue; # used all items in A
I.append(Doc(label=A[i],team="A")) # appending an object
teamA.append(A[i])
i+=1
else:
if(j==len(B)): continue; # used all items in B
I.append(Doc(label=B[j],team="B")) # appending an object
teamB.append(B[j])
j+=1
return I
# test
A=[1,2,3,4,5]
B=[6,7,8,9,10]
I= teamDraftInterleaving(A,B)
for el in I:
print(str(el.label)+' from team '+str(el.team))
# or more sophisticated test
pairs=generatePairs(NUM_PAIRS)
groups= measureAndGroup(pairs)
InterL ={'DCG':[],'RBP':[],'ERR':[]}
for i in range(10):
InterL['DCG'].append([])
InterL['RBP'].append([])
InterL['ERR'].append([])
for mes in groups:
for bin in range(len(groups[mes])):
for pair in groups[mes][bin]:
InterL[mes][bin].append(teamDraftInterleaving(pair[0],pair[1]))
###Output
1 from team A
6 from team B
2 from team A
7 from team B
3 from team A
8 from team B
4 from team A
9 from team B
5 from team A
10 from team B
generated 59049 pairs
###Markdown
Step 4
###Code
class RCM:
p = 0.5 # default probability
def __init__(self,p=None):
self.p= p if not p==None else self.p
def train(self,clicks,queries):
num_clicks=0
num_docs=0
for i in range(len(queries)):
num_docs+=len(queries[i].listOfURLs)
for key,value in clicks.items():
num_clicks+=len(value)
self.p=float(num_clicks)/float(num_docs)
# computes probabilities for SERP items and sets some as clicked
# operation on the input array
def compProb(self,docs):
for i in range(0,len(docs)):
docs[i].clickProb=self.p
docs[i].click=self.click()
def click(self):
if(random.uniform(0,1)<=self.p):
return True
else:
return False
# returns the scores of the experiment
def runExper(self,docs):
self.compProb(docs)
A=0
B=0
for doc in docs:
if doc.click==True:
if(doc.team=='A'): A+=1
else: B+=1
return (A,B)
class SDCM:
lam=None
# n: max number of SERP items
def __init__(self,lam=None,n=10):
if(lam==None):
self.lam=[]
for i in range(n):
self.lam.append(0.5) # 0.5 is default
def train(self,clicks,queries):
sat=[]
sessions=[] # sessions where ith doc was clicked
for i in range(10):
sat.append(set())
sessions.append(set())
for q in queries:
lastClick=None
sessionID=q.sessionID
l=len(q.listOfURLs)
for i in range(10):
if(i>=l): continue
doc=q.listOfURLs[i]
if(sessionID in clicks and doc in clicks[sessionID]):
sessions[i].add(sessionID)
lastClick=i
if not lastClick==None:
sat[lastClick].add(sessionID) # the lastClick is rank of doc which was clicked last
# final computation
for i in range(10):
if len(sat[i])==0 or len(sessions[i])==0:
s=0
else:
s=float(len(sat[i]))/float(len(sessions[i]))
self.lam[i]=1-s
# computes probabilities for SERP items and sets some as clicked
# operation on the input array
def compProb(self,docs):
prev_e=1
for i in range(0,len(docs)):
try:
alpha=docs[i].label/4 # the attractiveness param.
except TypeError:
print(docs[i])
e =1 # examination param
if(i>0):
if(docs[i-1].click==True):
e=self.lam[i-1]
else:
prev_alpha=docs[i-1].label/4
e=(1-prev_alpha)*prev_e/(1-prev_alpha*prev_e)
prev_e=e
prob = alpha*e
# assign click (True or False)
# at this stage
docs[i].click = self.click(prob)
docs[i].clickProb=prob
def click(self,prob):
if(random.uniform(0,1)<=prob):
return True
else:
return False
# returns the scores of the experiment
def runExper(self,docs):
self.compProb(docs)
A=0
B=0
for doc in docs:
if doc.click==True:
if(doc.team=='A'): A+=1
else: B+=1
return (A,B)
## test
# training models
file = 'YandexRelPredChallenge.txt'
clicks,queries= readClickAndQueries(file)
rcm = RCM()
sdcm = SDCM()
rcm.train(clicks,queries)
sdcm.train(clicks,queries)
print('param of RCM is: ' +str(rcm.p))
print('params of SDCM is: ' +str(sdcm.lam))
###Output
param of RCM is: 0.13445559411
params of SDCM is: [0.24608892548373817, 0.4596477646603445, 0.49269717624148, 0.49154025670945156, 0.4819277108433735, 0.48503740648379057, 0.44308390022675737, 0.3846153846153846, 0.29679144385026734, 0.0]
###Markdown
Step 5
###Code
# returns a limited number of samples out of every bin
# this function is used to save computation resources
def samplePairs(bins,per_bin):
samples={'DCG':[],'RBP':[],'ERR':[]}
for mes in bins:
for bin in bins[mes]:
n = len(bin)
m= per_bin if (n>=per_bin) else n # if we reach the limit of the bin
samples[mes].append(random.sample(bin, m))
return samples
def simulateExperiment():
pairs=generatePairs(NUM_PAIRS)
# now we compute bins for 3 measures from step 2
pairs=measureAndGroup(pairs)
# since we don't want to run experiments on all pairs, we shall do subsampling as was described in the comment on the blackboard
# notice that we subsample from previously computed bins that had a positive delta
bin_pairs=samplePairs(pairs,20) # from each bin we will sample 20 max pairs
# note that bin_pairs is a hash object for 3 measures with samples from every bins
file = 'YandexRelPredChallenge.txt'
clicks,queries= readClickAndQueries(file)
rcm = RCM()
sdcm=SDCM()
# training models
rcm.train(clicks,queries)
sdcm.train(clicks,queries)
models=[rcm,sdcm]
groups={'DCG':[],'RBP':[],'ERR':[]}
for i in range(10):
groups['DCG'].append([])
groups['RBP'].append([])
groups['ERR'].append([])
print('Starting click simulation')
for mes in bin_pairs:
for bin in range(len(bin_pairs[mes])):
for pair in bin_pairs[mes][bin]:
# first one is P, second one is E
I=teamDraftInterleaving(pair[0],pair[1])
A_wins=0
B_wins=0
for i in range(10): # run 10 experiments with each model.
for model in models:
res=model.runExper(I)
if(res[0]>res[1]): A_wins+=1
else: B_wins+=1
if(B_wins>A_wins): # if E won over P
A_prob=float(A_wins)/float(A_wins+B_wins)
B_prob=float(B_wins)/float(A_wins+B_wins)
delta=B_prob-A_prob
# storing delta, and both probabilities to avoid re-computation later on.
groups[mes][bin].append({'pair':pair,'delta':delta,'prob':(A_prob,B_prob)})
return groups
# test
groups=simulateExperiment()
print(groups)
###Output
generated 59049 pairs
Starting click simulation
###Markdown
Step 6
###Code
def plotStats(stats):
ind = np.arange(10)
for mes in stats:
fig, ax = plt.subplots()
min_data = [stat['min'] for stat in stats[mes]]
max_data = [stat['max'] for stat in stats[mes]]
median_data = [stat['median'] for stat in stats[mes]]
perc_data=[stat['95th'] for stat in stats[mes]]
width = 0.2
rects1 = ax.bar(ind,min_data , width, color='r', label='min')
rects2 = ax.bar(ind + width, max_data, width, color='g', label='max')
rects3 = ax.bar(ind + 2 * width, median_data, width, color='b', label='median')
rects4 = ax.bar(ind + 3 * width, perc_data, width, color='y', label='95th perc.')
fig.suptitle(mes)
plt.grid(True)
plt.legend()
# ax.xaxis.labelpad = 20
x=scipy.arange(10)
ax.set_xticks(x)
ax.set_xticklabels(range(1,11))
ax.margins(0.05, None)
for tick in ax.xaxis.get_majorticklabels():
tick.set_horizontalalignment("right")
plt.show()
# computed the necessary sample size for the set significance and power
# returns statistics for each group such as min,max,median,95th percentile for samples
def computeSampleStats():
qnorm = stat.norm.ppf
alpha= 0.05
beta=0.1
z_1=qnorm(1-alpha)
z_2=qnorm(1-beta)
mes_groups=simulateExperiment()
# assuming that the assignment has changed
# I'm reporting statistics for newly obtained groups of delta prob
# NOT AS IS STATED IN THE INITIAL ASSIGNMENT BUT AS IN THE COMMENT ON THE BLACKBOARD
stats={'DCG':[],'RBP':[],'ERR':[]}
for mes in mes_groups:
for group in mes_groups[mes]:
group_Ns=[]
for el in group:
A_prob=el['prob'][0]
B_prob=el['prob'][1]
delta =el['delta']
if( not delta==0):
N=math.pow((z_1* math.sqrt(A_prob*(1-A_prob)) + z_2*math.sqrt(B_prob*(1-B_prob)))/delta,2)
group_Ns.append(N)
if not len(group_Ns)==0:
min_val=min(group_Ns)
max_val=max(group_Ns)
perc=np.percentile(group_Ns,95)
median=np.percentile(group_Ns,50)
else:
min_val=0
max_val=0
perc=0
median=0
stats[mes].append({
'min':min_val,
'max':max_val,
'95th':perc,
'median':median
})
return stats
## test
stats=computeSampleStats()
plotStats(stats)
###Output
generated 9765625 pairs
Starting click simulation
|
Tensorflow_tutorial_2.1.ipynb | ###Markdown
Linear Regression without placeholder
###Code
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import time
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
DATA_FILE = 'data/birth_life_2010.txt'
def read_birth_life_data(filename):
"""
Read in birth_life_2010.txt and return:
data in the form of NumPy array
n_samples: number of samples
"""
text = open(filename, 'r').readlines()[1:]
data = [line[:-1].split('\t') for line in text]
births = [float(line[1]) for line in data]
lifes = [float(line[2]) for line in data]
data = list(zip(births, lifes))
print(len(data))
n_samples = len(data)
data = np.asarray(data, dtype=np.float32)
print(data)
return data, n_samples
# Step 1: read in the data
data, n_samples = read_birth_life_data(DATA_FILE)
# print(data.shape, n_samples)
# Step 2: create Dataset and iterator
dataset = tf.data.Dataset.from_tensor_slices((data[:,0], data[:,1]))
iterator = dataset.make_initializable_iterator()
X, Y = iterator.get_next()
# Step 3: create weight and bias, initialized to 0
w = tf.get_variable('weights', initializer=tf.constant(0.0))
b = tf.get_variable('bias', initializer=tf.constant(0.0))
# Step 4: build model to predict Y
Y_predicted = X * w + b
# Step 5: use the square error as the loss function
loss = tf.square(Y - Y_predicted, name='loss')
# loss = utils.huber_loss(Y, Y_predicted)
# Step 6: using gradient descent with learning rate of 0.001 to minimize loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)
start = time.time()
sess = tf.InteractiveSession() #as sess:
# Step 7: initialize the necessary variables, in this case, w and b
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter('./graphs/linear_reg', sess.graph)
# Step 8: train the model for 100 epochs
for i in range(100):
sess.run(iterator.initializer) # initialize the iterator
total_loss = 0
try:
while True:
_, l = sess.run([optimizer, loss])
total_loss += l
except tf.errors.OutOfRangeError:
pass
print('Epoch {0}: {1}'.format(i, total_loss/n_samples))
# close the writer when you're done using it
writer.close()
# Step 9: output the values of w and b
w_out, b_out = sess.run([w, b])
print('w: %f, b: %f' %(w_out, b_out))
print('Took: %f seconds' %(time.time() - start))
# plot the results
plt.plot(data[:,0], data[:,1], 'bo', label='Real data')
plt.plot(data[:,0], data[:,0] * w_out + b_out, 'r', label='Predicted data with squared error')
# plt.plot(data[:,0], data[:,0] * (-5.883589) + 85.124306, 'g', label='Predicted data with Huber loss')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Prediction
###Code
# births, lifes = 3.25, 62.286682
# data = list([(births, lifes)])
# data = np.asarray(data, dtype=np.float32)
# dataset = tf.data.Dataset.from_tensor_slices((data[:,0], data[:,1]))
# iterator = dataset.make_initializable_iterator()
# # X, Y = iterator.get_next()
# # tf.assig
# sess.run(iterator.initializer)
# w_out, b_out = sess.run([w, b])
# print('w: %f, b: %f' %(w_out, b_out))
# print('Took: %f seconds' %(time.time() - start))
# Y_predicted = sess.run(Y_predicted)
# print(Y_predicted)
###Output
_____no_output_____ |
Model/Resnet101.ipynb | ###Markdown
Detecting COVID-19 with Chest X Ray using PyTorchImage classification of Chest X Rays in one of three classes: Normal, Viral Pneumonia, COVID-19Dataset from [COVID-19 Radiography Dataset](https://www.kaggle.com/tawsifurrahman/covid19-radiography-database) on Kaggle Importing Libraries
###Code
from google.colab import drive
drive.mount('/gdrive')
%matplotlib inline
import os
import shutil
import copy
import random
import torch
import torch.nn as nn
import torchvision
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import seaborn as sns
import time
from sklearn.metrics import confusion_matrix
from PIL import Image
import matplotlib.pyplot as plt
torch.manual_seed(0)
print('Using PyTorch version', torch.__version__)
if torch.cuda.is_available():
device = torch.device("cuda:0")
print("GPU")
else:
device = torch.device("cpu")
print("CPU")
###Output
Using PyTorch version 1.7.0+cu101
GPU
###Markdown
Preparing Training and Test Sets
###Code
class_names = ['Non-Covid', 'Covid']
root_dir = '/gdrive/My Drive/Research_Documents_completed/Data/Data/'
source_dirs = ['non', 'covid']
###Output
_____no_output_____
###Markdown
Creating Custom Dataset
###Code
class ChestXRayDataset(torch.utils.data.Dataset):
def __init__(self, image_dirs, transform):
def get_images(class_name):
images = [x for x in os.listdir(image_dirs[class_name]) if x.lower().endswith('png') or x.lower().endswith('jpg')]
print(f'Found {len(images)} {class_name} examples')
return images
self.images = {}
self.class_names = ['Non-Covid', 'Covid']
for class_name in self.class_names:
self.images[class_name] = get_images(class_name)
self.image_dirs = image_dirs
self.transform = transform
def __len__(self):
return sum([len(self.images[class_name]) for class_name in self.class_names])
def __getitem__(self, index):
class_name = random.choice(self.class_names)
index = index % len(self.images[class_name])
image_name = self.images[class_name][index]
image_path = os.path.join(self.image_dirs[class_name], image_name)
image = Image.open(image_path).convert('RGB')
return self.transform(image), self.class_names.index(class_name)
###Output
_____no_output_____
###Markdown
Image Transformations
###Code
train_transform = torchvision.transforms.Compose([
torchvision.transforms.Resize(size=(224, 224)),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
test_transform = torchvision.transforms.Compose([
torchvision.transforms.Resize(size=(224, 224)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
###Output
_____no_output_____
###Markdown
Prepare DataLoader
###Code
train_dirs = {
'Non-Covid': '/gdrive/My Drive/Research_Documents_completed/Data/Data/non/',
'Covid': '/gdrive/My Drive/Research_Documents_completed/Data/Data/covid/'
}
#train_dirs = {
# 'Non-Covid': '/gdrive/My Drive/Data/Data/non/',
# 'Covid': '/gdrive/My Drive/Data/Data/covid/'
#}
train_dataset = ChestXRayDataset(train_dirs, train_transform)
test_dirs = {
'Non-Covid': '/gdrive/My Drive/Research_Documents_completed/Data/Data/test/non/',
'Covid': '/gdrive/My Drive/Research_Documents_completed/Data/Data/test/covid/'
}
test_dataset = ChestXRayDataset(test_dirs, test_transform)
batch_size = 15
dl_train = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
dl_test = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
print(dl_train)
print('Number of training batches', len(dl_train))
print('Number of test batches', len(dl_test))
###Output
<torch.utils.data.dataloader.DataLoader object at 0x7f1467ed1780>
Number of training batches 232
Number of test batches 214
###Markdown
Data Visualization
###Code
class_names = train_dataset.class_names
def show_images(images, labels, preds):
plt.figure(figsize=(25, 15))
for i, image in enumerate(images):
plt.subplot(1, 15, i + 1, xticks=[], yticks=[])
image = image.cpu().numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = image * std + mean
image = np.clip(image, 0., 1.)
plt.imshow(image)
col = 'green'
if preds[i] != labels[i]:
col = 'red'
plt.xlabel(f'{class_names[int(labels[i].cpu().numpy())]}')
plt.ylabel(f'{class_names[int(preds[i].cpu().numpy())]}', color=col)
plt.tight_layout()
plt.show()
images, labels = next(iter(dl_train))
show_images(images, labels, labels)
images, labels = next(iter(dl_test))
show_images(images, labels, labels)
###Output
_____no_output_____
###Markdown
Creating the Model
###Code
model = torchvision.models.resnet101(pretrained=True)
print(model)
model.fc = torch.nn.Linear(in_features=2048, out_features=2, bias=True)
loss_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=3e-5)
model.to(device)
def show_preds():
model.eval()
images, labels = next(iter(dl_test))
images = images.to(device)
outputs = model(images)
outputs = outputs.to(device)
_, preds = torch.max(outputs, 1)
show_images(images, labels, preds)
show_preds()
###Output
_____no_output_____
###Markdown
Training the Model
###Code
def train(epochs):
best_model_wts = copy.deepcopy(model.state_dict())
b_acc = 0.0
t_loss = []
t_acc = []
avg_t_loss=[]
avg_t_acc=[]
v_loss = []
v_acc=[]
avg_v_loss = []
avg_v_acc = []
ep = []
print('Starting training..')
for e in range(0, epochs):
ep.append(e+1)
print('='*20)
print(f'Starting epoch {e + 1}/{epochs}')
print('='*20)
train_loss = 0.
val_loss = 0.
train_accuracy = 0
total_train = 0
correct_train = 0
model.train() # set model to training phase
for train_step, (images, labels) in enumerate(dl_train):
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = model(images)
outputs = outputs.to(device)
_, pred = torch.max(outputs, 1)
loss = loss_fn(outputs, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= (train_step + 1)
_, predicted = torch.max(outputs, 1)
total_train += labels.nelement()
correct_train += sum((predicted == labels).cpu().numpy())
train_accuracy = correct_train / total_train
t_loss.append(train_loss)
t_acc.append(train_accuracy)
if train_step % 20 == 0:
print('Evaluating at step', train_step)
print(f'Training Loss: {train_loss:.4f}, Training Accuracy: {train_accuracy:.4f}')
accuracy = 0.
model.eval() # set model to eval phase
for val_step, (images, labels) in enumerate(dl_test):
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
outputs = outputs.to(device)
loss = loss_fn(outputs, labels)
val_loss += loss.item()
_, preds = torch.max(outputs, 1)
accuracy += sum((preds == labels).cpu().numpy())
val_loss /= (val_step + 1)
accuracy = accuracy/len(test_dataset)
print(f'Validation Loss: {val_loss:.4f}, Validation Accuracy: {accuracy:.4f}')
v_loss.append(val_loss)
v_acc.append(accuracy)
show_preds()
model.train()
if accuracy > b_acc:
b_acc = accuracy
avg_t_loss.append(sum(t_loss)/len(t_loss))
avg_v_loss.append(sum(v_loss)/len(v_loss))
avg_t_acc.append(sum(t_acc)/len(t_acc))
avg_v_acc.append(sum(v_acc)/len(v_acc))
best_model_wts = copy.deepcopy(model.state_dict())
print('Best validation Accuracy: {:4f}'.format(b_acc))
print('Training complete..')
plt.plot(ep, avg_t_loss, 'g', label='Training loss')
plt.plot(ep, avg_v_loss, 'b', label='validation loss')
plt.title('Training and Validation loss for each epoch')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.savefig('/gdrive/My Drive/Research_Documents_completed/resnet101_loss.png')
plt.show()
plt.plot(ep, avg_t_acc, 'g', label='Training accuracy')
plt.plot(ep, avg_v_acc, 'b', label='validation accuracy')
plt.title('Training and Validation Accuracy for each epoch')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('/gdrive/My Drive/Research_Documents_completed/resnet101_accuarcy.png')
plt.show()
torch.save(model.state_dict(),'/gdrive/My Drive/Research_Documents_completed/resnet101.pt')
%%time
train(epochs=5)
###Output
_____no_output_____
###Markdown
Final Results VALIDATION LOSS AND TRAINING LOSS VS EPOCHVALIDATION ACCURACY AND TRAINING ACCURACY VS EPOCHBEST ACCURACY ERROR..
###Code
show_preds()
###Output
_____no_output_____ |
Lab2_4.ipynb | ###Markdown
###Code
#Lab2.4
n = int(input())
segments=[]
for i in range(0,n):
segments.extend([input().strip().split()])
segments[i]=[int(x) for x in segments[i]]
segments.sort(key=lambda x: x[1])
points=[segments[0][1]]
for i in range(0,n):
if points[len(points)-1]<segments[i][0]:
points.extend([segments[i][1]])
print(len(points))
print(" ".join(str(x) for x in points))
###Output
4
4 7
1 3
2 5
5 6
2
3 6
|
nbs/01-dictionary-page.ipynb | ###Markdown
Dictionary Page Generation Imports
###Code
#exports
import json
import numpy as np
import pandas as pd
from frictionless import Package
from jinja2 import Template
from IPython.display import JSON, Markdown
datapackage_json_fp = '../data/dictionary/datapackage.json'
package = Package(datapackage_json_fp, profile='tabular-data-package')
ids_resource = package.get_resource('ids')
df_ids = ids_resource.to_pandas()
df_ids.head(3)
#exports
def construct_contributor_txt(package):
contributors = []
for contributor in package.contributors:
if 'title' in contributor.keys():
title_key = 'title'
elif 'name' in contributor.keys():
title_key = 'name'
else:
raise ValueError(f'One of `title` or `name` must be provided with each licence, instead was passed: {", ".join(license.keys())}')
if 'role' in contributor:
contributors += [f'{contributor[title_key]} ({contributor["role"].capitalize()})']
else:
contributors += [contributor[title_key]]
contributors_txt = ', '.join(contributors)
return contributors_txt
def construct_licence_txt(package):
licenses = []
for license in package.licenses:
assert 'name' in license, 'Each licence must have an associated `name`'
if 'path' in license:
licenses += [f'[{license["name"]}]({license["path"]})']
else:
licenses += [f'{license["name"]}']
licence_txt = ', '.join(licenses)
return licence_txt
def construct_metadata_table_str(package):
attr_to_processing_func = {
'version': lambda package: package.version,
'contributors': lambda package: construct_contributor_txt(package),
'keywords': lambda package: ', '.join(package.keywords),
'licenses': lambda package: construct_licence_txt(package),
}
s_metadata = pd.Series({
attr.capitalize(): attr_to_processing_func[attr](package)
for attr
in package.keys()
if attr in attr_to_processing_func.keys()
})
s_metadata.index.name = 'Attribute'
s_metadata.name = 'Value(s)'
if s_metadata.size > 0:
return s_metadata.to_markdown()
else:
return False
metadata_table_str = construct_metadata_table_str(package)
Markdown(metadata_table_str)
#exports
def construct_field_desc_table_str(package, resource='ids'):
s_field_descs = pd.Series({field['title']: field['description'] for field in package.get_resource(resource).schema.fields})
s_field_descs.index.name = 'Field'
s_field_descs.name = 'Description'
md_str = s_field_descs.to_markdown()
return md_str
field_desc_table_str = construct_field_desc_table_str(package)
Markdown(field_desc_table_str)
#exports
def get_dp_field_to_url_format_str(datapackage_json_fp):
package = Package(datapackage_json_fp, profile='tabular-data-package')
ids_resource = package.get_resource('ids')
id_field_to_url_format_str = {
field['name']: field['url_format']
for field
in ids_resource['schema']['fields']
if 'url_format' in field.keys()
}
return id_field_to_url_format_str
def get_dp_field_to_title(datapackage_json_fp):
package = Package(datapackage_json_fp, profile='tabular-data-package')
ids_resource = package.get_resource('ids')
id_field_to_title = {
field['name']: field['title']
for field
in ids_resource['schema']['fields']
}
return id_field_to_title
def format_id_values(id_values, id_type, id_field_to_url_format_str):
if id_type in id_field_to_url_format_str.keys():
url_format_str = id_field_to_url_format_str[id_type]
id_values_strs = [f'[{id_value}]({url_format_str.format(value=id_value)})' for id_value in id_values]
else:
id_values_strs = [str(id_value) for id_value in id_values]
return id_values_strs
construct_linked_idxs = lambda df_ids_clean: [
f'[{idx}](https://osuked.github.io/Power-Station-Dictionary/objects/{idx})'
for idx
in df_ids_clean.index
]
def construct_linked_ids_table_str(package, datapackage_json_fp, resource='ids', table_id='dictionary'):
id_field_to_url_format_str = get_dp_field_to_url_format_str(datapackage_json_fp)
id_field_to_title = get_dp_field_to_title(datapackage_json_fp)
df_ids = package.get_resource(resource).to_pandas()
df_ids_clean = pd.DataFrame(index=df_ids.index, columns=id_field_to_title.values())
for osuked_id, row in df_ids.iterrows():
row = pd.Series({
id_field_to_title[id_type]: (
', '.join([str(id_) for id_ in format_id_values(id_values, id_type, id_field_to_url_format_str)]) if isinstance(id_values, list)
else f'[{id_values}]({id_field_to_url_format_str[id_type].format(value=id_values)})' if (id_type in id_field_to_url_format_str.keys()) and (id_values is not None)
else id_values
)
for id_type, id_values
in row.items()
}).fillna('-')
df_ids_clean.loc[osuked_id] = row
df_ids_clean = df_ids_clean.drop(columns='OSUKED ID')
df_ids_clean.index = construct_linked_idxs(df_ids_clean)
df_ids_clean.index.name = 'OSUKED ID'
linked_ids_table_str = df_ids_clean.to_markdown()+'{#id}'
return linked_ids_table_str
linked_ids_table_str = construct_linked_ids_table_str(package, datapackage_json_fp)
# Markdown(linked_ids_table_str)
#exports
def populate_and_save_template(template_fp, save_fp, **render_kwargs):
rendered_str = Template(open(template_fp).read()).render(**render_kwargs)
with open(save_fp, 'w', encoding='utf-8') as f:
try:
f.write(rendered_str)
except e as exc:
raise exc
return None
def populate_dictionary_page(
datapackage_json_fp: str='../data/dictionary/datapackage.json',
template_fp: str='../templates/dictionary_page.md',
save_fp: str=f'../docs/dictionary.md'
):
package = Package(datapackage_json_fp, profile='tabular-data-package')
render_kwargs = {
'title': package.title,
'description': package.description,
'metadata_table': construct_metadata_table_str(package),
'field_desc_table': construct_field_desc_table_str(package),
'linked_ids_table': construct_linked_ids_table_str(package, datapackage_json_fp),
}
populate_and_save_template(template_fp, save_fp, **render_kwargs)
return
populate_dictionary_page(
datapackage_json_fp = '../data/dictionary/datapackage.json',
template_fp = '../templates/dictionary_page.md',
save_fp = f'../docs/dictionary.md'
)
#hide
from nbdev.export import *
notebook2script()
###Output
Converted 00-documentation.ipynb.
Converted 01-dictionary-page.ipynb.
Converted 02-attribute extraction.ipynb.
Converted 03-page-population.ipynb.
Converted 04-cli.ipynb.
Converted 05-carbon-intensity.ipynb.
Converted 06-cfd-capture-price-comparison.ipynb.
Converted 07-dataset-pages.ipynb.
|
doc/docstrings/ecdfplot.ipynb | ###Markdown
Plot a univariate distribution along the x axis:
###Code
import seaborn as sns; sns.set_theme()
penguins = sns.load_dataset("penguins")
sns.ecdfplot(data=penguins, x="flipper_length_mm")
###Output
_____no_output_____
###Markdown
Flip the plot by assigning the data variable to the y axis:
###Code
sns.ecdfplot(data=penguins, y="flipper_length_mm")
###Output
_____no_output_____
###Markdown
If neither `x` nor `y` is assigned, the dataset is treated as wide-form, and a histogram is drawn for each numeric column:
###Code
sns.ecdfplot(data=penguins.filter(like="bill_", axis="columns"))
###Output
_____no_output_____
###Markdown
You can also draw multiple histograms from a long-form dataset with hue mapping:
###Code
sns.ecdfplot(data=penguins, x="bill_length_mm", hue="species")
###Output
_____no_output_____
###Markdown
The default distribution statistic is normalized to show a proportion, but you can show absolute counts instead:
###Code
sns.ecdfplot(data=penguins, x="bill_length_mm", hue="species", stat="count")
###Output
_____no_output_____
###Markdown
It's also possible to plot the empirical complementary CDF (1 - CDF):
###Code
sns.ecdfplot(data=penguins, x="bill_length_mm", hue="species", complementary=True)
###Output
_____no_output_____
###Markdown
Plot a univariate distribution along the x axis:
###Code
import seaborn as sns; sns.set()
penguins = sns.load_dataset("penguins")
sns.ecdfplot(data=penguins, x="flipper_length_mm")
###Output
_____no_output_____
###Markdown
Flip the plot by assigning the data variable to the y axis:
###Code
sns.ecdfplot(data=penguins, y="flipper_length_mm")
###Output
_____no_output_____
###Markdown
If neither `x` nor `y` is assigned, the dataset is treated as wide-form, and a histogram is drawn for each numeric column:
###Code
sns.ecdfplot(data=penguins.filter(like="bill_", axis="columns"))
###Output
_____no_output_____
###Markdown
You can also draw multiple histograms from a long-form dataset with hue mapping:
###Code
sns.ecdfplot(data=penguins, x="bill_length_mm", hue="species")
###Output
_____no_output_____
###Markdown
The default distribution statistic is normalized to show a proportion, but you can show absolute counts instead:
###Code
sns.ecdfplot(data=penguins, x="bill_length_mm", hue="species", stat="count")
###Output
_____no_output_____
###Markdown
It's also possible to plot the empirical complementary CDF (1 - CDF):
###Code
sns.ecdfplot(data=penguins, x="bill_length_mm", hue="species", complementary=True)
###Output
_____no_output_____
###Markdown
Plot a univariate distribution along the x axis:
###Code
import seaborn as sns; sns.set_theme()
penguins = sns.load_dataset("penguins")
sns.ecdfplot(data=penguins, x="flipper_length_mm")
###Output
_____no_output_____
###Markdown
Flip the plot by assigning the data variable to the y axis:
###Code
sns.ecdfplot(data=penguins, y="flipper_length_mm")
###Output
_____no_output_____
###Markdown
If neither `x` nor `y` is assigned, the dataset is treated as wide-form, and a histogram is drawn for each numeric column:
###Code
sns.ecdfplot(data=penguins.filter(like="bill_", axis="columns"))
###Output
_____no_output_____
###Markdown
You can also draw multiple histograms from a long-form dataset with hue mapping:
###Code
sns.ecdfplot(data=penguins, x="bill_length_mm", hue="species")
###Output
_____no_output_____
###Markdown
The default distribution statistic is normalized to show a proportion, but you can show absolute counts instead:
###Code
sns.ecdfplot(data=penguins, x="bill_length_mm", hue="species", stat="count")
###Output
_____no_output_____
###Markdown
It's also possible to plot the empirical complementary CDF (1 - CDF):
###Code
sns.ecdfplot(data=penguins, x="bill_length_mm", hue="species", complementary=True)
###Output
_____no_output_____
###Markdown
Plot a univariate distribution along the x axis:
###Code
import seaborn as sns; sns.set()
penguins = sns.load_dataset("penguins")
sns.ecdfplot(data=penguins, x="flipper_length_mm")
###Output
_____no_output_____
###Markdown
Flip the plot by assigning the data variable to the y axis:
###Code
sns.ecdfplot(data=penguins, y="flipper_length_mm")
###Output
_____no_output_____
###Markdown
If neither `x` nor `y` is assigned, the dataset is treated as wide-form, and a histogram is drawn for each numeric column:
###Code
sns.ecdfplot(data=penguins.filter(like="culmen_", axis="columns"))
###Output
_____no_output_____
###Markdown
You can also draw multiple histograms from a long-form dataset with hue mapping:
###Code
sns.ecdfplot(data=penguins, x="culmen_length_mm", hue="species")
###Output
_____no_output_____
###Markdown
The default distribution statistic is normalized to show a proportion, but you can show absolute counts instead:
###Code
sns.ecdfplot(data=penguins, x="culmen_length_mm", hue="species", stat="count")
###Output
_____no_output_____
###Markdown
It's also possible to plot the empirical complementary CDF (1 - CDF):
###Code
sns.ecdfplot(data=penguins, x="culmen_length_mm", hue="species", complementary=True)
###Output
_____no_output_____
###Markdown
Plot a univariate distribution along the x axis:
###Code
import seaborn as sns; sns.set()
penguins = sns.load_dataset("penguins")
sns.ecdfplot(data=penguins, x="flipper_length_mm")
###Output
_____no_output_____
###Markdown
Flip the plot by assigning the data variable to the y axis:
###Code
sns.ecdfplot(data=penguins, y="flipper_length_mm")
###Output
_____no_output_____
###Markdown
If neither `x` nor `y` is assigned, the dataset is treated as wide-form, and a histogram is drawn for each numeric column:
###Code
sns.ecdfplot(data=penguins.filter(like="culmen_", axis="columns"))
###Output
_____no_output_____
###Markdown
You can also draw multiple histograms from a long-form dataset with hue mapping:
###Code
sns.ecdfplot(data=penguins, x="culmen_length_mm", hue="species")
###Output
_____no_output_____
###Markdown
The default distribution statistic is normalized to show a proportion, but you can show absolute counts instead:
###Code
sns.ecdfplot(data=penguins, x="culmen_length_mm", hue="species", stat="count")
###Output
_____no_output_____
###Markdown
It's also possible to plot the empirical complementary CDF (1 - CDF):
###Code
sns.ecdfplot(data=penguins, x="culmen_length_mm", hue="species", complementary=True)
###Output
_____no_output_____ |
module4/assignment_kaggle_challenge_4.ipynb | ###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Catch up, if needed- [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2/portfolio-project/ds6), then choose your dataset, and [submit this form](https://forms.gle/nyWURUg65x1UTRNV9), due yesterday at 3:59pm Pacific.- Submit predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file.) The competition closes today at 3:59pm. Every student should make at least one submission that scores at least 60% accuracy (above the majority class baseline). Assignment- [ ] Continue to participate in our Kaggle challenge. - [ ] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_ The competition closes today at 3:59pm.- [ ] Add comments and Markdown to your notebook. Clean up your code.- [ ] Commit your notebook to your fork of the GitHub repo. Stretch Goals Reading- Jake VanderPlas, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation- Jake VanderPlas, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107)- Ron Zacharski, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation- Sebastian Raschka, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb)- Peter Worcester, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85) Doing- Try combining xgboost early stopping, cross-validation, & hyperparameter optimization, with [the "original" (non scikit-learn) xgboost API](https://xgboost.readthedocs.io/en/latest/python/python_api.htmlxgboost.cv).- In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives.- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? Try stacking multiple submissions!Here's some code you can use:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
# If you're in Colab...
import os, sys
in_colab = 'google.colab' in sys.modules
if in_colab:
# Install required python packages:
# category_encoders, version >= 2.0
# eli5, version >= 0.9
# pandas-profiling, version >= 2.0
# plotly, version >= 4.0
!pip install --upgrade category_encoders eli5 pandas-profiling plotly
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
!git pull origin master
# Change into directory for module
os.chdir('module4')
import numpy as np
import pandas as pd
df = pd.read_csv('../data/renthop-nyc.csv')
assert df.shape == (49352, 34)
df = df[(df['price'] >= np.percentile(df['price'], 0.5)) &
(df['price'] <= np.percentile(df['price'], 99.5)) &
(df['latitude'] >= np.percentile(df['latitude'], 0.05)) &
(df['latitude'] < np.percentile(df['latitude'], 99.95)) &
(df['longitude'] >= np.percentile(df['longitude'], 0.05)) &
(df['longitude'] <= np.percentile(df['longitude'], 99.95))]
df['created'] = pd.to_datetime(df['created'], infer_datetime_format=True)
cutoff = pd.to_datetime('2016-06-01')
train = df[df.created < cutoff]
test = df[df.created >= cutoff]
def engineer_features(df):
df = df.copy()
df['description'] = df['description'].str.strip().fillna('')
df['has_description'] = df['description'] != ''
df['description_length'] = df['description'].str.len()
perk_cols = ['elevator', 'cats_allowed', 'hardwood_floors', 'dogs_allowed',
'doorman', 'dishwasher', 'no_fee', 'laundry_in_building',
'fitness_center', 'pre-war', 'laundry_in_unit', 'roof_deck',
'outdoor_space', 'dining_room', 'high_speed_internet', 'balcony',
'swimming_pool', 'new_construction', 'exclusive', 'terrace',
'loft', 'garden_patio', 'common_outdoor_space',
'wheelchair_access']
df['perk_count'] = df[perk_cols].sum(axis=1)
df['cats_or_dogs'] = (df['cats_allowed']==1) | (df['dogs_allowed']==1)
df['cats_and_dogs'] = (df['cats_allowed']==1) & (df['dogs_allowed']==1)
df['rooms'] = df['bedrooms'] + df['bathrooms']
df['days'] = (df['created'] - pd.to_datetime('2016-01-01')).dt.days
df = df.drop(columns='created')
return df
train = engineer_features(train)
test = engineer_features(test)
###Output
_____no_output_____
###Markdown
hyperparameter optimizationlinear model
###Code
from scipy.stats import randint, uniform
from sklearn.model_selection import RandomizedSearchCV
features = train.columns.drop([target] + high_cardinality)
X_train = train[features]
y_train = train[target]
pipeline = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
SimpleImputer(),
SelectKBest(f_regression),
Ridge()
)
param_distributions = {
'simpleimputer__strategy': ['mean', 'median'],
'selectkbest__k': randint(1, len(X_train.columns)+1),
'ridge__alpha': uniform(1, 10),
}
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=100,
cv=5,
scoring='neg_mean_absolute_error',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train, y_train);
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook)- [ ] More Categorical Encoding. (See module 2 assignment notebook)- [ ] Stacking Ensemble. (See below) Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
import pandas as pd
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
import category_encoders as ce
import numpy as np
from scipy.stats import randint, uniform
from sklearn.impute import SimpleImputer
from sklearn.model_selection import RandomizedSearchCV, train_test_split, GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.utils.multiclass import unique_labels
import seaborn as sns
import matplotlib.pyplot as plt
my_train, my_val = train_test_split(train, random_state=42)
def wrangler(dataframe):
x = dataframe.copy()
# New Feature
x['date_recorded_year'] = x['date_recorded'].str[:4].astype(int)
x['years_before_service'] = x['date_recorded_year'] - x['construction_year']
x.loc[(x['years_before_service']<0) | (x['years_before_service']>100), 'years_before_service'] = np.nan
# Replace None, none, 0 with NaN values, and fix long/lat columns
features_replace = ['scheme_name', 'installer', 'funder', 'wpt_name', 'longitude', 'latitude']
x[features_replace] = x[features_replace].replace({'None':np.nan, 'none':np.nan, '0':np.nan, 0:np.nan, -2e-8:np.nan})
# Drop id
drops = ['id', 'recorded_by']
x = x.drop(drops, axis=1)
return x
target = 'status_group'
wrangled_train = wrangler(my_train)
wrangled_val = wrangler(my_val)
X_train = wrangled_train.drop(target, axis=1)
X_val = wrangled_val.drop(target, axis=1)
y_train = wrangled_train[target]
y_val = wrangled_val[target]
# Using params from best submission in previous notebook
pipeline = make_pipeline(
ce.OrdinalEncoder(handle_missing = 'value', handle_unknown = 'return_nan'),
SimpleImputer(strategy='mean'),
StandardScaler(),
RandomForestClassifier(max_depth = None, max_features = 0.0803875104989975, n_estimators = 962,random_state=42, n_jobs=-1)
)
pipeline.fit(X_train, y_train)
pipeline.score(X_val, y_val)
y_pred = pipeline.predict(X_val)
###Output
_____no_output_____
###Markdown
Build confusion matrix
###Code
cmatrix = confusion_matrix(y_val,y_pred)
cmatrix
unique_labels(y_pred), unique_labels(y_val)
labels = unique_labels(y_pred)
columns = (f'Predicted {i}' for i in labels)
index = (f'Actual {i}' for i in labels)
with plt.style.context('fivethirtyeight'):
fig, ax = plt.subplots(figsize=(8,8))
sns.heatmap(cmatrix, ax=ax, annot=True, fmt='d', cmap='cividis')
ax.set_ylim(3,0)
ax.set_xlim(0,3)
ax.set_xticklabels(columns, rotation=45)
ax.set_yticklabels(index, rotation=0)
class_report = classification_report(y_val, y_pred)
print(class_report)
###Output
precision recall f1-score support
functional 0.81 0.89 0.85 8098
functional needs repair 0.57 0.36 0.44 1074
non functional 0.84 0.78 0.81 5678
accuracy 0.81 14850
macro avg 0.74 0.68 0.70 14850
weighted avg 0.81 0.81 0.81 14850
###Markdown
Never did gridsearchcv yesterday, let's try it
###Code
new_pipe = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
StandardScaler(),
RandomForestClassifier(random_state=42)
)
params = {
'ordinalencoder__handle_unknown': ['return_nan', 'value'],
'ordinalencoder__handle_missing': ['return_nan', 'value'],
'simpleimputer__strategy': ['mean', 'median', 'most_frequent'],
'randomforestclassifier__n_estimators': range(1, 1001, 100),
'randomforestclassifier__max_depth': [None],
# 'randomforestclassifier__max_features': uniform(0, 1),
}
search = GridSearchCV(
new_pipe,
param_grid = params,
n_jobs=-1,
cv = 3,
verbose=15,
scoring = 'accuracy',
return_train_score=True
)
search.fit(X_train,y_train)
print(search.best_params_)
print(search.best_score_)
piper = search.best_estimator_
piper.score(X_val, y_val)
# why not make a submission
X_test = wrangler(test)
y_pred = piper.predict(X_test)
submission = sample_submission.copy()
submission['status_group'] = y_pred
submission.to_csv('maybe.csv', index=False)
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See below)- [ ] Stacking Ensemble. (See below)- [ ] More Categorical Encoding. (See below) RandomizedSearchCV / GridSearchCV, for model selection- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)``` More Categorical Encodings**1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:- **"Categorical Encoding":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.- **Numeric Encoding:** Synonymous with Label Encoding, or "Ordinal" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html).- **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](http://contrib.scikit-learn.org/categorical-encoding/onehot.html).- **Binary Encoding:** We can use [category_encoders.BinaryEncoder](http://contrib.scikit-learn.org/categorical-encoding/binary.html).**2.** The short video **[Coursera โ How to Win a Data Science Competition: Learn from Top Kagglers โ Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.Category Encoders has multiple implementations of this general concept:- [CatBoost Encoder](http://contrib.scikit-learn.org/categorical-encoding/catboost.html)- [James-Stein Encoder](http://contrib.scikit-learn.org/categorical-encoding/jamesstein.html)- [Leave One Out](http://contrib.scikit-learn.org/categorical-encoding/leaveoneout.html)- [M-estimate](http://contrib.scikit-learn.org/categorical-encoding/mestimate.html)- [Target Encoder](http://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)- [Weight of Evidence](http://contrib.scikit-learn.org/categorical-encoding/woe.html)Category Encoder's mean encoding implementations work for regression problems or binary classification problems. For multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:```pythonencoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) Both parameters > 1 to avoid overfittingX_train_encoded = encoder.fit_transform(X_train, y_train=='functional')X_val_encoded = encoder.transform(X_train, y_val=='functional')```**3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.```python dirty_cat.TargetEncoder(clf_type='multiclass-clf')```It also implements an interesting idea called ["Similarity Encoder" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).However, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.**4. [Embeddings](https://www.kaggle.com/learn/embeddings)** can work well with sparse / high cardinality categoricals._**I hope itโs not too frustrating or confusing that thereโs not one โcanonicalโ way to encode categorcals. Itโs an active area of research and experimentation! Maybe you can make your own contributions!**_
###Code
import os, sys
in_colab = 'google.colab' in sys.modules
# If you're in Colab...
if in_colab:
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
!git pull origin master
# Install required python packages
!pip install -r requirements.txt
# Change into directory for module
os.chdir('module4')
%matplotlib inline
import category_encoders as ce
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
def wrangle(X):
"""Wrangles train, validate, and test sets in the same way"""
X = X.copy()
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# Engineer feature: how many years from construction_year to date_recorded
X['years'] = X['year_recorded'] - X['construction_year']
# Drop recorded_by (never varies) and id (always varies, random)
unusable_variance = ['recorded_by', 'id','amount_tsh','num_private']
X = X.drop(columns=unusable_variance)
# Drop duplicate columns
duplicate_columns = ['quantity_group','payment_type','extraction_type','waterpoint_type_group','quality_group']
X = X.drop(columns=duplicate_columns)
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these like null values
X['latitude'] = X['latitude'].replace(-2e-08, np.nan)
# When columns have zeros and shouldn't, they are like null values
cols_with_zeros = ['construction_year', 'longitude', 'latitude', 'gps_height', 'population','funder','district_code','installer']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
return X
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'),
pd.read_csv('../data/tanzania/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
# Split train into train & val. Make val the same size as test.
target = 'status_group'
train, val = train_test_split(train, test_size=len(test),
stratify=train[target], random_state=32)
# Wrangle train, validate, and test sets in the same way
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
# Arrange data into X features matrix and y target vector
X_train = train.drop(columns=target)
y_train = train[target]
X_val = val.drop(columns=target)
y_val = val[target]
X_test = test
# Make pipeline!
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='mean'),
RandomForestClassifier(n_estimators=490,max_depth=20,min_samples_leaf=3, random_state=32,max_features= 0.454761180866987)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
print ('Training Accuracy', pipeline.score(X_train, y_train))
print ('Validation Accuracy', pipeline.score(X_val, y_val))
print('Vs Baseline',pipeline.score(X_val, y_val)- 0.8093049171193759)
from sklearn.metrics import classification_report
print(classification_report(y_val, y_pred))
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_pred)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred), columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d')
plot_confusion_matrix(y_val, y_pred);
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook)- [ ] More Categorical Encoding. (See module 2 assignment notebook)- [ ] Stacking Ensemble. (See below) Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
import category_encoders as ce
# Merge train_features.csv & train_labels.csv
data = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
#train/validate split
train, val = train_test_split(data, random_state=63)
train.shape, val.shape, test.shape
#Define a function to wrangle train, validate, and test sets in the same way.
#Clean outliers and engineer features.
def wrangle(X):
"""Wrangle train, validate, and test sets in the same way"""
# Prevent SettingWithCopyWarning
X = X.copy()
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these values like zero.
X['latitude'] = X['latitude'].replace(-2e-08, 0)
# When columns have zeros and shouldn't, they are like null values.
# So we will replace the zeros with nulls, and impute missing values later.
#confirmed model is .2% worse when excluding population here
cols_with_zeros = ['longitude', 'latitude','construction_year','population','gps_height']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
#drop duplicate columns
# quantity & quantity_group are duplicates, so drop one
# payment == payment_type
X = X.drop(columns=['quantity_group','payment_type'])
# Drop recorded_by (never varies)
unusable_variance = ['recorded_by']
X = X.drop(columns=unusable_variance)
#convert date_recorded to datetime
X['date_recorded'] = X['date_recorded'].apply(pd.to_datetime)
#drop the column but keep the information
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
#create 'age' column, the number of years between construction and evaluation
X['age'] = X['year_recorded'] - X['construction_year']
#bools for whether the waterpoint is marked as rural or urban
#...make it a tiny bit worse
X['rural'] = X['lga'].apply(lambda s: "Rural" in s)
X['urban'] = X['lga'].apply(lambda s: "Urban" in s)
#fill nans in latitude and longitude with averages for the region/basin
#instead of relying on the simple imputer
#using region makes the model worse by .3%
#using basin makes it better by about the same amount!
X2 = pd.DataFrame(columns=X.columns)
for basin in X['basin'].value_counts().index:
df = X[X['basin'] == basin]
lat_mean = df['latitude'].mean()
long_mean = df['longitude'].mean()
df['latitude'] = df['latitude'].replace(np.nan,lat_mean)
df['longitude'] = df['longitude'].replace(np.nan,long_mean)
X2 = X2.append(df)
#same thing for gps_height?
#for either basin or region, seems to make it a little worse
#concept: using the google elevation API to get elevation for those coordinates
X3 = pd.DataFrame(columns=X.columns)
for basin in X['region'].value_counts().index:
df = X[X['region'] == basin]
height_mean = df['gps_height'].mean()
df['gps_height'] = df['gps_height'].replace(np.nan,height_mean)
X3 = X3.append(df)
# return the wrangled dataframe
#return X
return X3
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
#also make a wrangled version of all the training data to make an even better-fit
#model with which to predict for kaggle
data = wrangle(data)
train.head()
target = 'status_group'
features = train.columns.drop([target])
X_train = train[features]
y_train = train[target]
#oe = ce.OrdinalEncoder()
#y_train_encoded = oe.fit_transform(y_train)
X_val = val[features]
y_val = val[target]
X_data = data[features]
y_data = data[target]
X_test = test[features]
#pipeline includes:
#encoding of categorical variables as ordinal/numeric
#imputer for nans (median and mean are within .1% of each other in performance)
#random forest (many trees)
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='mean'),
RandomForestClassifier(n_estimators=168, random_state=40, n_jobs=-1)
)
pipeline.fit(X_data, y_data)
print('Validation Accuracy', pipeline.score(X_val, y_val))
#making kaggle submission
y_pred = pipeline.predict(X_test)
y_pred
submission = pd.DataFrame(data={'id':test['id'],'status_group':y_pred})
submission.head()
submission.to_csv('fourth_submission.csv', index=False)
#I'm just gonna spend the rest of my time reading instead of trying to last-minute boost this
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import seaborn as sns
y_pred = pipeline.predict(X_val)
#my work here is done!!
confusion_matrix(y_val, y_pred)
#nah let's make the pretty one
#code from lecture
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d', cmap='viridis')
plot_confusion_matrix(y_val, y_pred);
encoder = pipeline.named_steps['ordinalencoder']
encoded = encoder.transform(X_train)
# Get feature importances
rf = pipeline.named_steps['randomforestclassifier']
importances = pd.Series(rf.feature_importances_, encoded.columns)
# Plot feature importances
n = 100
plt.figure(figsize=(10,n/2))
plt.title(f'Top {n} features')
importances.sort_values()[-n:].plot.barh(color='grey');
from sklearn.ensemble import GradientBoostingClassifier
pipeline2 = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='mean'),
GradientBoostingClassifier()
)
pipeline2.fit(X_train, y_train)
print('Validation Accuracy', pipeline2.score(X_val, y_val))
#not as good!!!
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [X] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [X] Plot a confusion matrix for your Tanzania Waterpumps model.- [X] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [x] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [X] Commit your notebook to your fork of the GitHub repo.- [X] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See below)- [ ] Stacking Ensemble. (See below)- [ ] More Categorical Encoding. (See below) RandomizedSearchCV / GridSearchCV, for model selection- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)``` More Categorical Encodings**1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:- **"Categorical Encoding":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.- **Numeric Encoding:** Synonymous with Label Encoding, or "Ordinal" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html).- **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](http://contrib.scikit-learn.org/categorical-encoding/onehot.html).- **Binary Encoding:** We can use [category_encoders.BinaryEncoder](http://contrib.scikit-learn.org/categorical-encoding/binary.html).**2.** The short video **[Coursera โ How to Win a Data Science Competition: Learn from Top Kagglers โ Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.Category Encoders has multiple implementations of this general concept:- [CatBoost Encoder](http://contrib.scikit-learn.org/categorical-encoding/catboost.html)- [James-Stein Encoder](http://contrib.scikit-learn.org/categorical-encoding/jamesstein.html)- [Leave One Out](http://contrib.scikit-learn.org/categorical-encoding/leaveoneout.html)- [M-estimate](http://contrib.scikit-learn.org/categorical-encoding/mestimate.html)- [Target Encoder](http://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)- [Weight of Evidence](http://contrib.scikit-learn.org/categorical-encoding/woe.html)Category Encoder's mean encoding implementations work for regression problems or binary classification problems. For multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:```pythonencoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) Both parameters > 1 to avoid overfittingX_train_encoded = encoder.fit_transform(X_train, y_train=='functional')X_val_encoded = encoder.transform(X_train, y_val=='functional')```**3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.```python dirty_cat.TargetEncoder(clf_type='multiclass-clf')```It also implements an interesting idea called ["Similarity Encoder" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).However, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.**4. [Embeddings](https://www.kaggle.com/learn/embeddings)** can work well with sparse / high cardinality categoricals._**I hope itโs not too frustrating or confusing that thereโs not one โcanonicalโ way to encode categorcals. Itโs an active area of research and experimentation! Maybe you can make your own contributions!**_
###Code
import os, sys
in_colab = 'google.colab' in sys.modules
# If you're in Colab...
if in_colab:
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
!git pull origin master
# Install required python packages
!pip install -r requirements.txt
# Change into directory for module
os.chdir('module4')
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'),
pd.read_csv('../data/tanzania/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
# Split train into train & val. Make val the same size as test.
target = 'status_group'
train, val = train_test_split(train, test_size=len(test),
stratify=train[target], random_state=42)
def wrangle(X):
"""Wrangle train, validate, and test sets in the same way"""
# Prevent SettingWithCopyWarning
X = X.copy()
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these values like zero.
X['latitude'] = X['latitude'].replace(-2e-08, 0)
# When columns have zeros and shouldn't, they are like null values.
# So we will replace the zeros with nulls, and impute missing values later.
# Also create a "missing indicator" column, because the fact that
# values are missing may be a predictive signal.
cols_with_zeros = ['longitude', 'latitude', 'construction_year',
'gps_height', 'population', 'amount_tsh']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
X[col+'_MISSING'] = X[col].isnull()
# Drop duplicate columns
duplicates = ['quantity_group', 'payment_type']
X = X.drop(columns=duplicates)
# Drop recorded_by (never varies) and id (always varies, random)
unusable_variance = ['recorded_by', 'id']
X = X.drop(columns=unusable_variance)
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# Engineer feature: how many years from construction_year to date_recorded
X['years'] = X['year_recorded'] - X['construction_year']
X['years_MISSING'] = X['years'].isnull()
# convert region from numeric to ordinal category
X['district_code'] = X['district_code'].astype(str)
X['region_code'] = X['region_code'].astype(str)
# return the wrangled dataframe
return X
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
%%time
import category_encoders as ce
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
# Arrange data into X features matrix and y target vector
# so we use *all* features, including the high-cardinality categoricals
X_train = train.drop(columns=target)
y_train = train[target]
X_val = val.drop(columns=target)
y_val = val[target]
X_test = test
# replacing one-hot encoder with "ordinal" encoder
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
print('Validation Accuracy', pipeline.score(X_val, y_val))
y_pred = pipeline.predict(X_val)
from sklearn.metrics import confusion_matrix
confusion_matrix(y_val, y_pred)
from sklearn.utils.multiclass import unique_labels
unique_labels(y_val)
# 1. check our labels are correct
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
return columns, index
plot_confusion_matrix(y_val, y_pred)
# 2. Return a dataframe
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return table
plot_confusion_matrix(y_val, y_pred)
# 3. Return a heatmap
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d', cmap='viridis')
import seaborn as sns
plot_confusion_matrix(y_val, y_pred);
from sklearn.metrics import classification_report
print(classification_report(y_val, y_pred))
###Output
precision recall f1-score support
functional 0.81 0.90 0.85 7798
functional needs repair 0.57 0.31 0.40 1043
non functional 0.85 0.78 0.81 5517
accuracy 0.81 14358
macro avg 0.74 0.66 0.69 14358
weighted avg 0.81 0.81 0.80 14358
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See below)- [ ] Stacking Ensemble. (See below)- [ ] More Categorical Encoding. (See below) RandomizedSearchCV / GridSearchCV, for model selection- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)``` More Categorical Encodings**1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:- **"Categorical Encoding":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.- **Numeric Encoding:** Synonymous with Label Encoding, or "Ordinal" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html).- **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](http://contrib.scikit-learn.org/categorical-encoding/onehot.html).- **Binary Encoding:** We can use [category_encoders.BinaryEncoder](http://contrib.scikit-learn.org/categorical-encoding/binary.html).**2.** The short video **[Coursera โ How to Win a Data Science Competition: Learn from Top Kagglers โ Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.Category Encoders has multiple implementations of this general concept:- [CatBoost Encoder](http://contrib.scikit-learn.org/categorical-encoding/catboost.html)- [James-Stein Encoder](http://contrib.scikit-learn.org/categorical-encoding/jamesstein.html)- [Leave One Out](http://contrib.scikit-learn.org/categorical-encoding/leaveoneout.html)- [M-estimate](http://contrib.scikit-learn.org/categorical-encoding/mestimate.html)- [Target Encoder](http://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)- [Weight of Evidence](http://contrib.scikit-learn.org/categorical-encoding/woe.html)Category Encoder's mean encoding implementations work for regression problems or binary classification problems. For multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:```pythonencoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) Both parameters > 1 to avoid overfittingX_train_encoded = encoder.fit_transform(X_train, y_train=='functional')X_val_encoded = encoder.transform(X_train, y_val=='functional')```**3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.```python dirty_cat.TargetEncoder(clf_type='multiclass-clf')```It also implements an interesting idea called ["Similarity Encoder" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).However, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.**4. [Embeddings](https://www.kaggle.com/learn/embeddings)** can work well with sparse / high cardinality categoricals._**I hope itโs not too frustrating or confusing that thereโs not one โcanonicalโ way to encode categorcals. Itโs an active area of research and experimentation! Maybe you can make your own contributions!**_
###Code
import os, sys
in_colab = 'google.colab' in sys.modules
# If you're in Colab...
if in_colab:
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
!git pull origin master
# Install required python packages
!pip install -r requirements.txt
# Change into directory for module
os.chdir('module4')
%matplotlib inline
import category_encoders as ce
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
# data wrangling
def wrangle(x):
"""wrangle trian, val, and test sets in the same way"""
# make copy
x = x.copy()
# replace outliers with 0
x['latitude'] = x['latitude'].replace(-2e-08, 0)
# replace 0's with np.nan
cols_with_zeros = ['longitude', 'latitude', 'amount_tsh', 'construction_year', 'gps_height', 'population']
for col in cols_with_zeros:
x[col] = x[col].replace(0, np.nan)
x[col+'_missing'] = x[col].isna()
# drop duplicate
x = x.drop(columns=['quantity_group', 'payment_type'])
# to_datetime
x['date_recorded'] = pd.to_datetime(x['date_recorded'], infer_datetime_format=True)
# extract components
x['year_recorded'] = x['date_recorded'].dt.year
x['month_recorded'] = x['date_recorded'].dt.month
x['day_recorded'] = x['date_recorded'].dt.day
x = x.drop(columns='date_recorded')
# Engineer feature: how many years from construction_year to date_recorded
x['years'] = x['year_recorded'] - x['construction_year']
x['years_MISSING'] = x['years'].isna()
# drop recorded_by and id
x = x.drop(columns=['recorded_by', 'id'])
return x
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'),
pd.read_csv('../data/tanzania/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
# train/ validation split
train, val = train_test_split(train,
test_size=0.25,
stratify=train['status_group'],
random_state=0)
# apply wrangle() to all sets
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
# features matrix and target vector
target = 'status_group'
x_train = train.drop(columns=target)
y_train = train[target]
x_val = val.drop(columns=target)
y_val = val[target]
x_test = test
# make pipeline and fit on train, score on valabs
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=100,
random_state=0,
min_samples_split=9,
max_features='auto',
max_depth=35,
n_jobs=-1
)
)
# Fit on train, score on val
pipeline.fit(x_train, y_train)
# y_pred
y_pred = pipeline.predict(x_val)
# print scores
print('Train Accuracy', pipeline.score(x_train, y_train))
print('Validation Accuracy', pipeline.score(x_val, y_val))
from sklearn.metrics import confusion_matrix
confusion_matrix(y_val, y_pred)
# get labels
from sklearn.utils.multiclass import unique_labels
unique_labels(y_val)
# plot a heatmap
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d', cmap='viridis')
plot_confusion_matrix(y_val, y_pred);
# how many correct
7348 + 327 + 4450
# how many total predictions made
7348 + 115 + 602 + 596 + 327 + 156 + 1193 + 63 + 4450
correct_predictions = 12125
total_predictions = 14850
correct_predictions / total_predictions
# precision and recall
from sklearn.metrics import classification_report
print(classification_report(y_val, y_pred))
###Output
precision recall f1-score support
functional 0.80 0.91 0.85 8065
functional needs repair 0.65 0.30 0.41 1079
non functional 0.85 0.78 0.82 5706
accuracy 0.82 14850
macro avg 0.77 0.66 0.69 14850
weighted avg 0.81 0.82 0.81 14850
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook)- [ ] More Categorical Encoding. (See module 2 assignment notebook)- [ ] Stacking Ensemble. (See below) Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from datetime import datetime
from sklearn.ensemble import RandomForestClassifier
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
train = train.replace({'functional': 0, 'non functional': 2, 'functional needs repair': 1})
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
train, val = train_test_split(train, train_size=0.80,
test_size=0.20, stratify=train['status_group'], random_state=59)
train.shape, val.shape, test.shape
train.select_dtypes(include='number').describe().T
train.select_dtypes(exclude='number').describe().T.sort_values(by='unique')
def wrangle(X):
"""Wrangle train, validate, and test sets in the same way"""
# Prevent SettingWithCopyWarning (in acordince with lectuere)
X = X.copy()
# About 3% of the time, latitude has small values near zero
X['latitude'] = X['latitude'].replace(-2e-08, 0)
# replace the zeros with nulls.
cols_with_zeros = ['longitude', 'latitude']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
cols_with_zeros = ['population']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
cols_with_zeros = ['construction_year']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
cols_with_zeros = ['amount_tsh']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
# duplicates, so drop them
X = X.drop(columns='quantity')
X = X.drop(columns='quality_group')
X = X.drop(columns='waterpoint_type_group')
X = X.drop(columns='extraction_type_group')
X = X.drop(columns='extraction_type_class')
X = X.drop(columns='payment')
X = X.drop(columns='source_type')
X = X.drop(columns='scheme_management')
#
X['year_inspected'] = pd.DatetimeIndex(X['date_recorded']).year
X['years_since_inspection'] = X['year_inspected'] - X['construction_year']
X = X.drop(columns='year_inspected')
X['Permited_and_Public'] = X['public_meeting'] & X['permit']
# return the wrangled dataframe
return X
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
# creating target and features lsts
target = 'status_group'
train_features = train.drop(columns=[target, 'id'])
numeric_features = train_features.select_dtypes(include='number').columns.tolist()
cardinality = train_features.select_dtypes(exclude='number').nunique()
categorical_features = cardinality[cardinality <= 50].index.tolist()
features = numeric_features + categorical_features
print(features)
# creating Train, Validation, and Test vars
X_train = train.drop(columns=target)
y_train = train[target]
X_val = val.drop(columns=target)
y_val = val[target]
X_test = test
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='most_frequent'),
RandomForestClassifier(n_estimators=413, random_state=63,
n_jobs=-1, min_samples_split=4)
)
# Fit on train, score on val
#pipeline.fit(X_train, y_train)
#print('Validation Accuracy', pipeline.score(X_val, y_val))
k=3
scores = cross_val_score(pipeline, X_train, y_train, cv=k,
scoring='neg_mean_absolute_error')
print(f'MAE for {k} folds:', -scores)
pipeline.fit(X_train, y_train)
print('Validation Accuracy', pipeline.score(X_val, y_val))
y_pred = pipeline.predict(X_val)
# estimator is your model or pipeline, which you've fit on X_train
# X_test is your pandas dataframe or numpy array,
# with the same number of rows, in the same order, as test_features.csv,
# and the same number of columns, in the same order, as X_train
y_pred = pipeline.predict(X_test)
# Makes a dataframe with two columns, id and status_group,
# and writes to a csv file, without the index
#train = train.replace({'functional': 0, 'non functional': 2, 'functional needs repair': 1})
sample_submission = sample_submission.copy()
sample_submission['status_group'] = y_pred
sample_submission['status_group'] = sample_submission['status_group'].replace({0:'functional', 2:'non functional', 1:'functional needs repair'})
sample_submission.to_csv('riley_base.csv', index=False)
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.model_selection import validation_curve
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='most_frequent'),
RandomForestClassifier()
)
depth = range(1, 480, 20)
train_scores, val_scores = validation_curve(
pipeline, X_train, y_train,
param_name='randomforestclassifier__n_estimators',
param_range=depth, scoring='neg_mean_absolute_error',
cv=3,
n_jobs=-1
)
plt.figure(dpi=150)
plt.plot(depth, np.mean(-train_scores, axis=1), color='blue', label='training error')
plt.plot(depth, np.mean(-val_scores, axis=1), color='red', label='validation error')
plt.title('Validation Curve')
plt.xlabel('model complexity: randomforestclassifier max_depth')
plt.ylabel('model score: Mean Absolute Error')
plt.legend();
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from scipy.stats import randint, uniform
features = train.columns.drop(target)
X_train = train[features]
y_train = train[target]
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='most_frequent'),
RandomForestClassifier(random_state=42)
)
param_distributions = {
'simpleimputer__strategy': ['mean', 'median', 'most_frequent'],
'randomforestclassifier__n_estimators': randint(50, 500),
'randomforestclassifier__min_samples_split': [2, 4, 6, 8],
'randomforestclassifier__min_samples_leaf': [1, 2, 3, 4],
'randomforestclassifier__max_features': uniform(0, 1),
}
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=10,
cv=3,
scoring='neg_mean_absolute_error',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train, y_train);
print('Best hyperparameters', search.best_params_)
print('Cross-validation MAE', -search.best_score_)
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d', cmap='viridis')
plot_confusion_matrix(y_val, y_pred);
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return table
plot_confusion_matrix(y_val, y_pred)
from scipy.stats import randint, uniform
features = train.columns.drop(target)
X_train = train[features]
y_train = train[target]
pipeline = make_pipeline(
ce.TargetEncoder(),
SimpleImputer(),
RandomForestRegressor(random_state=42)
)
param_distributions = {
'targetencoder__min_samples_leaf': randint(1, 1000),
'targetencoder__smoothing': uniform(1, 1000),
'simpleimputer__strategy': ['mean', 'median'],
'randomforestregressor__n_estimators': randint(50, 500),
'randomforestregressor__max_depth': [5, 10, 15, 20, None],
'randomforestregressor__max_features': uniform(0, 1),
}
# If you're on Colab, decrease n_iter & cv parameters
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=10,
cv=3,
scoring='neg_mean_absolute_error',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train, y_train);
print('Best hyperparameters', search.best_params_)
print('Cross-validation MAE', -search.best_score_)
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Catch up, if needed- [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2/portfolio-project/ds6), then choose your dataset, and [submit this form](https://forms.gle/nyWURUg65x1UTRNV9), due yesterday at 3:59pm Pacific.- Submit predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file.) The competition closes today at 3:59pm. Every student should make at least one submission that scores at least 60% accuracy (above the majority class baseline). Assignment- [ ] Continue to participate in our Kaggle challenge. - [ ] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_ The competition closes today at 3:59pm.- [ ] Add comments and Markdown to your notebook. Clean up your code.- [ ] Commit your notebook to your fork of the GitHub repo. Stretch Goals Reading- Jake VanderPlas, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation- Jake VanderPlas, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107)- Ron Zacharski, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation- Sebastian Raschka, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb)- Peter Worcester, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85) Doing- Try combining xgboost early stopping, cross-validation, & hyperparameter optimization, with [the "original" (non scikit-learn) xgboost API](https://xgboost.readthedocs.io/en/latest/python/python_api.htmlxgboost.cv).- In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives.- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? Try stacking multiple submissions!Here's some code you can use:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
conda install -c conda-forge category_encoders
conda install -c conda-forge xgboost
import numpy as np
import pandas as pd
# Read New York City apartment rental listing data
df = pd.read_csv('../data/renthop-nyc.csv')
assert df.shape == (49352, 34)
# Remove the most extreme 1% prices,
# the most extreme .1% latitudes, &
# the most extreme .1% longitudes
df = df[(df['price'] >= np.percentile(df['price'], 0.5)) &
(df['price'] <= np.percentile(df['price'], 99.5)) &
(df['latitude'] >= np.percentile(df['latitude'], 0.05)) &
(df['latitude'] < np.percentile(df['latitude'], 99.95)) &
(df['longitude'] >= np.percentile(df['longitude'], 0.05)) &
(df['longitude'] <= np.percentile(df['longitude'], 99.95))]
# Do train/test split
# Use data from April & May 2016 to train
# Use data from June 2016 to test
df['created'] = pd.to_datetime(df['created'], infer_datetime_format=True)
cutoff = pd.to_datetime('2016-06-01')
train = df[df.created < cutoff]
test = df[df.created >= cutoff]
# Wrangle train & test sets in the same way
def engineer_features(df):
# Avoid SettingWithCopyWarning
df = df.copy()
# Does the apartment have a description?
df['description'] = df['description'].str.strip().fillna('')
df['has_description'] = df['description'] != ''
# How long is the description?
df['description_length'] = df['description'].str.len()
# How many total perks does each apartment have?
perk_cols = ['elevator', 'cats_allowed', 'hardwood_floors', 'dogs_allowed',
'doorman', 'dishwasher', 'no_fee', 'laundry_in_building',
'fitness_center', 'pre-war', 'laundry_in_unit', 'roof_deck',
'outdoor_space', 'dining_room', 'high_speed_internet', 'balcony',
'swimming_pool', 'new_construction', 'exclusive', 'terrace',
'loft', 'garden_patio', 'common_outdoor_space',
'wheelchair_access']
df['perk_count'] = df[perk_cols].sum(axis=1)
# Are cats or dogs allowed?
df['cats_or_dogs'] = (df['cats_allowed']==1) | (df['dogs_allowed']==1)
# Are cats and dogs allowed?
df['cats_and_dogs'] = (df['cats_allowed']==1) & (df['dogs_allowed']==1)
# Total number of rooms (beds + baths)
df['rooms'] = df['bedrooms'] + df['bathrooms']
# Extract number of days elapsed in year, and drop original date feature
df['days'] = (df['created'] - pd.to_datetime('2016-01-01')).dt.days
df = df.drop(columns='created')
return df
train = engineer_features(train)
test = engineer_features(test)
import pandas_profiling
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook)- [ ] More Categorical Encoding. (See module 2 assignment notebook)- [ ] Stacking Ensemble. (See below) Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
import pandas as pd
from sklearn.model_selection import train_test_split
# Merge train_features.csv & train_labels.csv
train_val = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
train, val = train_test_split(train_val, random_state=2019)
train.shape, val.shape, test.shape
target = 'status_group'
features = train_val.columns.drop([target])
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
# Tanzania's max and min longitude and latitude never cross zero
should_not_have_zeroes_or_close_to_zeroes = ['amount_tsh', 'longitude',
'latitude', 'num_private',
'construction_year']
should_nots = should_not_have_zeroes_or_close_to_zeroes
def wrangle(data):
import numpy as np
import pandas as pd
# Prevents 'SettingWithCopyWarning' whatever that is
data = data.copy()
# input NaN's instead of zeroes and close to zero values where there should be NaN's
for col in should_not_have_zeroes_or_close_to_zeroes:
data[col] = np.where(data[col].between(-0.0001, 0.0001), np.nan, data[col])
# convert days of year to datetime
data['date_recorded'] = pd.to_datetime(data['date_recorded'])
#OrdinalEncoder seems to not like datetime, so we drop all information except the year of date_recorded
data['date_recorded'] = data['date_recorded'].dt.year
# creat a feature of time between last inspection and construction
data['time_between_inspections'] = data['date_recorded'] - data['construction_year']
return(data)
X_train = wrangle(X_train)
X_val = wrangle(X_val)
X_test = wrangle(X_test)
from sklearn.pipeline import make_pipeline
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
import category_encoders as ce
%%time
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=150, random_state=2019, n_jobs=-1)
)
# fit the model to the data
pipeline.fit(X_train, y_train)
# get scores
print('Train Accuracy: ', pipeline.score(X_train, y_train))
print('Val Accuracy: ', pipeline.score(X_val, y_val))
#make prediction on the val
y_val_pred = pipeline.predict(X_val)
#make a prediction on the test
y_pred = pipeline.predict(X_test)
# generate submission
# estimator is your model or pipeline, which you've fit on X_train
# X_test is your pandas dataframe or numpy array,
# with the same number of rows, in the same order, as test_features.csv,
# and the same number of columns, in the same order, as X_train
# Makes a dataframe with two columns, id and status_group,
# and writes to a csv file, without the index
submission = sample_submission.copy()
submission['status_group'] = y_pred
submission.to_csv('Curtis-McKendrick-Pipes-RandomForest.csv', index=False)
# we now have a model and predictions with which to plot a confustion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix(y_val, y_val_pred)
# now we make a nicer one with labels
from sklearn.utils.multiclass import unique_labels
unique_labels(y_val)
labels = unique_labels(y_val_pred)
columns = [f'Predicted {label}' for label in labels]
rows = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_val, y_val_pred),
columns=columns, index=rows)
table
import seaborn as sns
sns.heatmap(table, fmt='d', annot=True, cmap='Greens');
# fmt='d' makes the numbers not be in scientific notation
# annot=True puts the numbers directly on the squares
# cmap selects the color pallat the heat map uses
y_train_non_func = y_train != 'functional'
y_val_non_func = y_val != 'functional'
pipeline.fit(X_train, y_train_non_func);
y_pred_non_func = pipeline.predict(X_val)
labels = unique_labels(y_pred_non_func)
columns = [f'Predicted {label}' for label in labels]
rows = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_val_non_func, y_pred_non_func),
columns=columns, index=rows)
table
sns.heatmap(table, fmt='d', annot=True, cmap='Greens');
probabilities = pipeline.predict_proba(X_val)
probabilities
y_pred_proba = probabilities[:, 1]
y_pred_non_func
threshold = .94
probabilities[:, 1] > threshold
ax = sns.distplot(probabilities[:, 1])
ax.axvline(threshold, color='red')
pd.Series(probabilities[:, 1] > threshold).value_counts()
y_pred_non_func[probabilities[:, 1] > threshold]
results = pd.DataFrame({'y_val': y_val_non_func, 'y_pred_proba': y_pred_proba})
top2000 = results.sort_values(by='y_pred_proba', ascending=False)[:2000]
top2000
top2000['y_val'].sum()
print ('Accuracy: ', top2000['y_val'].sum()/2000 )
###Output
_____no_output_____
###Markdown
Kaggle Assignment 4 Previous Day's CodeBasically unchanged aside from reducing the number of dropped duplicate columns to 'payment_type', 'quantity_group', and 'extraction_type_group'. Of the "nearly-duplicate" columns, 'extraction_type_group' was the only one, when removed, that improved my val accuracy.Also added the 'year_difference' column.
###Code
import sys
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
train.shape, test.shape
#Checked the average elevation of Tanzania, and it's 1200, so I
#really can ignore the zeroes here. But also the lowest elevation is 0,
#so I'm a little confused about the negative numbers
train['gps_height'].value_counts()
# Numeric Columns to clean
numeric_to_clean = ['longitude','latitude','construction_year', 'gps_height']
# Checking for duplicat columns
duplicates1 = ['extraction_type','extraction_type_group','extraction_type_class']
duplicates2 = ['payment','payment_type']
duplicates3 = ['quantity_group','quantity']
duplicates4 = ['source','source_type']
duplicates5 = ['waterpoint_type','waterpoint_type_group']
train.head()
#Checking the duplicates to decide which to keep
duplicate_lists = [duplicates1, duplicates2, duplicates3, duplicates4,
duplicates5]
for duplicate in duplicate_lists:
print(train[duplicate].describe())
print("")
#my_train['region'].value_counts().index
Mwanza = train[train['region'] == 'Mwanza']
Mwanza['longitude'] = Mwanza['longitude'].replace(0,np.nan)
Mwanza['latitude'] = Mwanza['latitude'].replace(-2e-08, np.nan)
np.mean(Mwanza['latitude'])
np.mean(Mwanza['longitude'])
Shinyanga = train[train['region'] == 'Shinyanga']
Shinyanga['longitude'] = Shinyanga['longitude'].replace(0,np.nan)
Shinyanga['latitude'] = Shinyanga['latitude'].replace(-2e-08, np.nan)
np.mean(Shinyanga['longitude'])
np.mean(Shinyanga['latitude'])
#Looking at the above lists, I'll remove the duplicate columns
# and nearly duplicate columns that have fewer unique variables
duplicates_to_drop = ['extraction_type_group','extraction_type_class',
'payment_type','quantity_group', 'source_type',
'waterpoint_type_group']
from sklearn.model_selection import train_test_split
my_train, my_val = train_test_split(train, random_state=333)
my_train[my_train['longitude']==0]['region'].value_counts()
my_train[my_train['region']== 'Mwanza']
import numpy as np
def wrangle(X):
# To prevent copy warnings
X = X.copy()
# Latitude is strange in that it doesn't have any 0s, but it does have these
# near-0 values
X['latitude'] = X['latitude'].replace(-2e-08, 0)
# there are numeric columns with 0s that should be nana
nans_as_zeros = ['latitude','longitude', 'construction_year',
'gps_height', 'population']
for column in nans_as_zeros:
X[column] = X[column].replace(0, np.nan)
#I like this code Ryan had to make a new column for if its missing data
X[column+'_MISSING'] = X[column].isnull()
# X['longitude'] = X.apply(
# lambda row: np.mean(Mwanza['longitude']) if np.isnan(row['longitude']) and row['region'] == 'Mwanza' else row['longitude'],
# axis=1)
# X['longitude'] = X.apply(
# lambda row: np.mean(Shinyanga['longitude']) if np.isnan(row['longitude']) and row['region'] == 'Shinyanga' else row['longitude'],
# axis=1)
# X['latitude'] = X.apply(
# lambda row: np.mean(Mwanza['latitude']) if np.isnan(row['latitude']) and row['region'] == 'Mwanza' else row['latitude'],
# axis=1)
# X['latitude'] = X.apply(
# lambda row: np.mean(Shinyanga['latitude']) if np.isnan(row['latitude']) and row['region'] == 'Shinyanga' else row['latitude'],
# axis=1)
#Date recorded is treated as an int. Extracting y/m/d
X['date_recorded'] = pd.to_datetime(X['date_recorded'])
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# Engineer feature: how many years from construction_year to date_recorded
X['years_difference'] = X['year_recorded'] - X['construction_year']
#Removing duplicate or near-duplicate columns
X = X.drop(columns=['payment_type','quantity_group', 'extraction_type_group'])
#Can be used for each train and validation
return X
%%time
my_train = wrangle(my_train)
my_val = wrangle(my_val)
test = wrangle(test)
my_train['longitude'].value_counts()
my_train[my_train['id']== 6091]
# # Copied from previous assignment.
# # Unecessary because high cardinality features are fine
# # # Selecting target
# target = 'status_group'
# #Removing the target and useless id columns
# train_columns = my_train.drop(columns=[target,'id'])
# # separating numeric columns to readd after
# numeric_columns = train_columns.select_dtypes(include='number').columns.tolist()
# #Getting a list of cardinality for categorical features to exclude the large
# cardinality = train_columns.select_dtypes(exclude='number').nunique()
# #Excluding features with a cardinality over 50
# categorical_columns = cardinality[cardinality <50].index.tolist()
# #combining lists to get the features I will use for my model
# features = numeric_columns + categorical_columns
# We can use high cardinality features, so no need to remove them
target = 'status_group'
features = my_train.drop(columns=[target,'id']).columns
#Assigning variables
X_train = my_train[features]
y_train = my_train[target]
X_val = my_val[features]
y_val = my_val[target]
X_test = test[features]
###Output
_____no_output_____
###Markdown
Assignment 4 code Went with the column transformer to use ordinal encoding ONLY for the high cardinality columns, one hot encoding otherwise.
###Code
from sklearn.compose import ColumnTransformer
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
from sklearn.pipeline import Pipeline
#Getting a list of cardinality for categorical features to include in ordinal encoder
train_columns = my_train.drop(columns=[target,'id'])
cardinality = train_columns.select_dtypes(exclude='number').nunique()
high_cardinality = cardinality[cardinality >49].index.tolist()
# Making the high cardinality pipeline
high_cardinality_pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median')
)
# Making the non-high cardinality pipeline
non_high_cardinality = train_columns.drop(columns=high_cardinality).columns
non_high_cardinality_pipeline = make_pipeline(
ce.OneHotEncoder(),
SimpleImputer(strategy='median')
)
#Associating the transformers with the list of features
preprocessor = ColumnTransformer(
transformers=[
('high_card',high_cardinality_pipeline, high_cardinality),
('low_card',non_high_cardinality_pipeline, non_high_cardinality)])
my_pipeline = make_pipeline(
preprocessor,
RandomForestClassifier(n_estimators=100, random_state=333, n_jobs=-1,
max_depth=20)
)
my_pipeline.fit(X_train,y_train)
my_pipeline.score(X_val,y_val)
from scipy.stats import randint, uniform
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
###Output
_____no_output_____
###Markdown
Set up the RandomizedSearchCV to
###Code
%%time
param_distributions = {
'randomforestclassifier__n_estimators': randint(100, 200),
'randomforestclassifier__max_depth': [17, 18,19,20,21,22, None],
'randomforestclassifier__min_samples_leaf': [1,2,3,4,5]
}
search = RandomizedSearchCV(
my_pipeline,
param_distributions=param_distributions,
n_iter=10,
cv=5,
scoring='accuracy',
verbose=10,
return_train_score=False,
n_jobs=-1
)
search.fit(X_train,y_train)
search.best_score_
search.best_params_
search.best_estimator_
###Output
_____no_output_____
###Markdown
Submission Code
###Code
pipeline = search.best_estimator_
pred_y_test = pipeline.predict(X_test)
cv8_submission = test.copy()
cv8_submission['status_group'] = pred_y_test
cv8_submission = cv8_submission.filter(['id','status_group'])
cv8_submission
cv8_submission.to_csv('cv8_submission.csv', index=False)
###Output
_____no_output_____
###Markdown
The code below used to combine the my_train and my_val data to use in the final fitting with the randomCV optomized model
###Code
my_train.shape,my_val.shape
# Combining all training data to use with the best model the randomcv found
final_train = pd.concat([my_train,my_val], ignore_index=True)
final_train.shape
final_train.head()
pipeline.fit(final_train[features],final_train[target])
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook)- [ ] More Categorical Encoding. (See module 2 assignment notebook)- [ ] Stacking Ensemble. (See below) Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
import pandas as pd
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test_features = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
%matplotlib inline
import category_encoders as ce
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
def wrangle(X):
"""Wrangles train, validate, and test sets in the same way"""
X = X.copy()
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# Engineer feature: how many years from construction_year to date_recorded
X['years'] = X['year_recorded'] - X['construction_year']
# Drop recorded_by (never varies) and id (always varies, random)
unusable_variance = ['recorded_by', 'id']
X = X.drop(columns=unusable_variance)
# Drop duplicate columns
duplicate_columns = ['quantity_group']
X = X.drop(columns=duplicate_columns)
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these like null values
X['latitude'] = X['latitude'].replace(-2e-08, np.nan)
# When columns have zeros and shouldn't, they are like null values
cols_with_zeros = ['construction_year', 'longitude', 'latitude', 'gps_height', 'population']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
return X
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
# Split train into train & val. Make val the same size as test.
target = 'status_group'
train, val = train_test_split(train, test_size=len(test),
stratify=train[target], random_state=42)
# Wrangle train, validate, and test sets in the same way
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
# Arrange data into X features matrix and y target vector
X_train = train.drop(columns=target)
y_train = train[target]
X_val = val.drop(columns=target)
y_val = val[target]
X_test = test
# Make pipeline!
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='mean'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
print('Validation Accuracy', accuracy_score(y_val, y_pred))
from sklearn.metrics import confusion_matrix
confusion_matrix(y_val, y_pred)
from sklearn.utils.multiclass import unique_labels
unique_labels(y_val)
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
return columns, index
plot_confusion_matrix(y_val, y_pred)
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return table
plot_confusion_matrix(y_val, y_pred)
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d', cmap='viridis')
plot_confusion_matrix(y_val, y_pred);
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See below)- [ ] Stacking Ensemble. (See below)- [ ] More Categorical Encoding. (See below) RandomizedSearchCV / GridSearchCV, for model selection- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)``` More Categorical Encodings**1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:- **"Categorical Encoding":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.- **Numeric Encoding:** Synonymous with Label Encoding, or "Ordinal" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html).- **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](http://contrib.scikit-learn.org/categorical-encoding/onehot.html).- **Binary Encoding:** We can use [category_encoders.BinaryEncoder](http://contrib.scikit-learn.org/categorical-encoding/binary.html).**2.** The short video **[Coursera โ How to Win a Data Science Competition: Learn from Top Kagglers โ Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.Category Encoders has multiple implementations of this general concept:- [CatBoost Encoder](http://contrib.scikit-learn.org/categorical-encoding/catboost.html)- [James-Stein Encoder](http://contrib.scikit-learn.org/categorical-encoding/jamesstein.html)- [Leave One Out](http://contrib.scikit-learn.org/categorical-encoding/leaveoneout.html)- [M-estimate](http://contrib.scikit-learn.org/categorical-encoding/mestimate.html)- [Target Encoder](http://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)- [Weight of Evidence](http://contrib.scikit-learn.org/categorical-encoding/woe.html)Category Encoder's mean encoding implementations work for regression problems or binary classification problems. For multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:```pythonencoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) Both parameters > 1 to avoid overfittingX_train_encoded = encoder.fit_transform(X_train, y_train=='functional')X_val_encoded = encoder.transform(X_train, y_val=='functional')```**3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.```python dirty_cat.TargetEncoder(clf_type='multiclass-clf')```It also implements an interesting idea called ["Similarity Encoder" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).However, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.**4. [Embeddings](https://www.kaggle.com/learn/embeddings)** can work well with sparse / high cardinality categoricals._**I hope itโs not too frustrating or confusing that thereโs not one โcanonicalโ way to encode categorcals. Itโs an active area of research and experimentation! Maybe you can make your own contributions!**_
###Code
import os, sys
in_colab = 'google.colab' in sys.modules
# If you're in Colab...
if in_colab:
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
!git pull origin master
# Install required python packages
!pip install -r requirements.txt
# Change into directory for module
os.chdir('module4')
# Merge train_features.csv & train_labels.csv
import pandas as pd
from sklearn.model_selection import train_test_split
train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'),
pd.read_csv('../data/tanzania/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
# Split train into train & val. Make val the same size as test.
target = 'status_group'
train, val = train_test_split(train, test_size=len(test),
stratify=train[target], random_state=42)
import category_encoders as ce
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
def wrangle(df):
df = df.copy()
df['date_recorded'] = pd.to_datetime(df['date_recorded'], infer_datetime_format =True)
df['year_recorded'] = df['date_recorded'].dt.year
df['month_recorded'] = df['date_recorded'].dt.month
df['day_recorded'] = df['date_recorded'].dt.day
df = df.drop(columns = 'date_recorded')
#engineer feature
df['years'] = df['year_recorded'] - df['construction_year']
#drop columns with unusable variance
unusable = ['recorded_by','id']
df = df.drop(columns = unusable)
#drop duplicates
duplicates = ['quantity_group']
df = df.drop(columns = duplicates)
#replace small value with nans
df['latitude'] = df['latitude'].replace(-2e-08, np.nan)
#replace zeros with nans
zeros = ['construction_year','longitude','latitude','gps_height','population']
for col in zeros:
df[col] = df[col].replace(0,np.nan)
return df
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
test.head()
#Arrange data into X features matrix and y target vector
target = 'status_group'
y_train = train[target]
y_val = val[target]
X_train = train.drop(columns = target)
X_test = test
X_val = val.drop(columns = target)
#Make pipeline
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy = 'mean'),
RandomForestClassifier(n_estimators = 100, random_state=42, n_jobs = -1)
)
#Fit on data
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
print("Validation Accuracy", accuracy_score(y_val, y_pred))
###Output
Validation Accuracy 0.8140409527789386
###Markdown
Get and interpret the confusion matrix for classification models
###Code
# Get confusion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix(y_val, y_pred)
# Get labels
from sklearn.utils.multiclass import unique_labels
unique_labels(y_val)
# Create a function that plots the confusion matrix
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true,y_pred), columns = columns, index=index)
return sns.heatmap(table, annot = True, fmt = 'd', cmap = 'viridis')
plot_confusion_matrix(y_val, y_pred)
correct_predictions_made = 7005 + 332 + 4351
total_predictions_made = np.sum(confusion_matrix(y_val, y_pred))
classification_accuracy = correct_predictions_made/total_predictions_made
from sklearn.metrics import classification_report
print(classification_report(y_val, y_pred))
print("Correct Predictions Made:", correct_predictions_made)
print("Total Predictions Made:", total_predictions_made)
print("Classification Accuracy:", classification_accuracy) #this is also equal to the accuracy_score which is: sum(y_pred == y_val)/len(y_pred)
###Output
Correct Predictions Made: 11688
Total Predictions Made: 14358
Classification Accuracy: 0.8140409527789386
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook)- [ ] More Categorical Encoding. (See module 2 assignment notebook)- [ ] Stacking Ensemble. (See below) Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
import pandas as pd
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
%matplotlib inline
import category_encoders as ce
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
def wrangle(X):
X = X.copy()
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# Engineer feature: how many years from construction_year to date_recorded
X['years'] = X['year_recorded'] - X['construction_year']
# Drop recorded_by (never varies) and id (always varies, random)
unusable_variance = ['recorded_by', 'id']
X = X.drop(columns=unusable_variance)
# Drop duplicate columns
duplicate_columns = ['quantity_group']
X = X.drop(columns=duplicate_columns)
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these like null values
X['latitude'] = X['latitude'].replace(-2e-08, np.nan)
# When columns have zeros and shouldn't, they are like null values
cols_with_zeros = ['construction_year', 'longitude', 'latitude', 'gps_height', 'population']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
return X
# Split train into train & val. Make val the same size as test.
target = 'status_group'
train, val = train_test_split(train, test_size=len(test),
stratify=train[target], random_state=42)
# Wrangle train, validate, and test sets in the same way
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
# Arrange data into X features matrix and y target vector
X_train = train.drop(columns=target)
y_train = train[target]
X_val = val.drop(columns=target)
y_val = val[target]
X_test = test
# Make pipeline!
pipeline = make_pipeline(
ce.OrdinalEncoder(drop_invariant=True),
SimpleImputer(strategy='mean'),
RandomForestClassifier(n_estimators=269, max_depth=None, max_features=0.30738304577027475, random_state=42, n_jobs=-1)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
print('Validation Accuracy', accuracy_score(y_val, y_pred))
from sklearn.metrics import confusion_matrix
confusion_matrix(y_val,y_pred)
# getting labels
from sklearn.utils.multiclass import unique_labels
unique_labels(y_val)
# check that our labels are correct
def confusion_matrix_dataframe(y_true, y_pred):
labels = unique_labels(y_true, y_pred)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
return columns, index
confusion_matrix_dataframe(y_val, y_pred)
# Plot a heatmap
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d', cmap='viridis')
plot_confusion_matrix(y_val, y_pred);
# Formatting submission
y_pred = pipeline.predict(X_test)
submission = sample_submission.copy()
submission['status_group'] = y_pred
submission.to_csv('submission-Amer10.csv', index = False)
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 2, Module 4*--- Classification Metrics Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 70% accuracy (well above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook)- [ ] More Categorical Encoding. (See module 2 assignment notebook)- [ ] Stacking Ensemble. (See below) Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
import pandas as pd
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Catch up, if needed- [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2/portfolio-project/ds6), then choose your dataset, and [submit this form](https://forms.gle/nyWURUg65x1UTRNV9), due yesterday at 3:59pm Pacific.- Submit predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file.) The competition closes today at 3:59pm. Every student should make at least one submission that scores at least 60% accuracy (above the majority class baseline). Assignment- [ ] Continue to participate in our Kaggle challenge. - [ ] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_ The competition closes today at 3:59pm.- [ ] Add comments and Markdown to your notebook. Clean up your code.- [ ] Commit your notebook to your fork of the GitHub repo. Stretch Goals Reading- Jake VanderPlas, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation- Jake VanderPlas, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107)- Ron Zacharski, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation- Sebastian Raschka, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb)- Peter Worcester, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85) Doing- Try combining xgboost early stopping, cross-validation, & hyperparameter optimization, with [the "original" (non scikit-learn) xgboost API](https://xgboost.readthedocs.io/en/latest/python/python_api.htmlxgboost.cv).- In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives.- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? Try stacking multiple submissions!Here's some code you can use:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
import numpy as np
import pandas as pd
import tensorflow as tf
# If you're in Colab...
import os, sys
in_colab = 'google.colab' in sys.modules
if in_colab:
# Install required python packages:
# category_encoders, version >= 2.0
# eli5, version >= 0.9
# pandas-profiling, version >= 2.0
# plotly, version >= 4.0
!pip install --upgrade category_encoders eli5 pandas-profiling plotly
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
!git pull origin master
# Change into directory for module
os.chdir('module3')
from sklearn.model_selection import train_test_split
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'),
pd.read_csv('../data/tanzania/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
# Data Cleaning and Wrangling
def wrangle(X):
X = X.copy()
# fixing the latitude outlier
X['latitude'] = X['latitude'].replace(-2e-08, 0)
# replacing 0s with nans, so that imputer can have a field day with them
cols_with_zeros = ['longitude', 'latitude', 'construction_year',
'gps_height', 'population', 'amount_tsh']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
X[col+'_MISSING'] = X[col].isnull()
# handling duplicates
duplicates = ['quantity_group']
X = X.drop(columns=duplicates)
# handling unusable data
unusable_variance = ['recorded_by', 'id']
X = X.drop(columns=unusable_variance)
# changing the date string to date time format
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# calculating years since construction
X['years'] = X['year_recorded'] - X['construction_year']
X['years_MISSING'] = X['years'].isnull()
# giving numerical quantities to payment type category
X['payment_type'] = X.payment_type.replace({'annually' : 2, 'never pay' : 0,
'unknown' : 0, 'on failure' : 0,'other' : 0,
'per bucket' : 1, 'monthly' : 1})
# in early 2000s Tanzanian Gov enacted National Water Policy NAWAPO
X['new generation'] = ((X['construction_year'] > 2000).map({True : 1, False : 0}))
# ranking according to feature importances
X['region'] = X.region.replace({'Iringa' : 1, 'Arusha' : 2.8, 'Manyara' : 1.4, 'Shinyanga' : .5,
'Mbeya' : -2, 'Kilimanjaro' : 1.1, 'Morogoro' : -1, 'Kagera' : 1.1,
'Mwanza' : 2, 'Kigoma' : -1, 'Ruvuma' : -1, 'Pwani':-.5, 'Tanga':1.5,
'Dodoma' : 0, 'Singida' : .7, 'Mara' : 0, 'Dar es Salaam' : 0,
'Tabora' : -.15, 'Rukwa' : -1, 'Lindi' : -1, 'Mtwara' : -1})
# ranking according to feature importances
X['waterpoint_types'] = X.waterpoint_type.replace({'communal standpipe' : 1.5,
'hand pump' : .6,
'other' : -2,
'communal standpipe multiple' : -2,
'improved spring' : 1,
'cattle trough' : 0,
'dam' : 0})
# ranking according to feature importances
X['quantity'] = X.quantity.replace({'enough': 3,
'insufficient' : 2,
'dry' : -4,
'seasonal' : 1.5,
'unknown' : 0})
X['basin'] = X.basin.replace({'Lake Victoria' : -.25,
'Pangani' : -1.5,
'Rufiji' : .15,
'Internal' : -.1,
'Lake Tanganyika' : .25,
'Wami / Ruvu' : .1,
'Lake Nyasa' : 3,
'Ruvuma / Southern Coast' : -.1,
'Lake Rukwa' : -.6})
return X
train = wrangle(train)
test = wrangle(test)
train['basin'].value_counts()
target = 'status_group'
X_train = train.drop(columns=target)
y_train = train[target]
X_test = test
target = 'status_group'
train_features = train.drop(columns=[target])
numeric_features = train_features.select_dtypes(include='number').columns.tolist()
high_cardinality = train_features.select_dtypes(exclude='number').nunique()
categorical_features = high_cardinality[high_cardinality<=50].index.tolist()
features = numeric_features + categorical_features
X_train = train[features]
y_train = train[target]
X_test = test[features]
%%time
from sklearn.pipeline import make_pipeline
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
StandardScaler(),
XGBClassifier(objective = 'multi:softmax', booster = 'gbtree',
nrounds = 'min.error.idx', num_class = 3,
maximize = False, eval_metric = 'merror', eta = .2,
max_depth = 19, colsample_bytree = .55, nthread = -1, n_jobs = -1,
learning_rate=.1, gamma = 1, n_estimators=210, random_state=69,
scale_pos_weight=13)
)
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print('Validation Accuracy: ', accuracy_score(y_train, y_pred))
###Output
Validation Accuracy: 0.9487542087542088
CPU times: user 5min 3s, sys: 422 ms, total: 5min 3s
Wall time: 2min 33s
###Markdown
Finding best Hyper Parameter with RandomizedSearchCV
###Code
%%time
from sklearn.linear_model import Ridge
from sklearn.ensemble import RandomForestClassifier
from scipy.stats import randint, uniform
from sklearn.model_selection import RandomizedSearchCV
#pipeline = make_pipeline(
#ce.OrdinalEncoder(),
ce.TargetEncoder(min_samples_leaf=1, smoothing=1),
SimpleImputer(),
StandardScaler(),
XGBClassifier(objective = 'multi:softmax', booster = 'gbtree',
nrounds = 'min.error.idx', num_class = 3,
maximize = False, eval_metric = 'merror',
colsample_bytree = .55, n_jobs = -1,
random_state=69)
)
#param_distributions = {
'xgbclassifier__n_estimators': randint(50, 1000),
'xgbclassifier__max_depth': randint(3, 35),
'xgbclassifier__learning_rate': [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3],
'xgbclassifier__gamma': randint(1,10),
'xgbclassifier__eta' : [.1,.2,.3,.4,.5,.6,.7,.8]
}
#search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=5,
cv=2,
scoring=None,
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train, y_train);
###Output
_____no_output_____
###Markdown
**This code cell takes nearly 40 minutes to run and I already ran it in another notebook, so I'll just paste the results I got here:**```Fitting 2 folds for each of 5 candidates, totalling 10 fits[Parallel(n_jobs=-1)]: Using backend LokyBackend with 2 concurrent workers.[Parallel(n_jobs=-1)]: Done 1 tasks | elapsed: 12.1min[Parallel(n_jobs=-1)]: Done 4 tasks | elapsed: 19.0min[Parallel(n_jobs=-1)]: Done 10 out of 10 | elapsed: 37.1min finishedCPU times: user 21min 1s, sys: 12.8 s, total: 21min 14sWall time: 47min 45sBest hyperparameters {'xgbclassifier__eta': 0.4, 'xgbclassifier__gamma': 1, 'xgbclassifier__learning_rate': 0.001, 'xgbclassifier__max_depth': 16, 'xgbclassifier__n_estimators': 834}```
###Code
# although weirdly, i tried parameters aside from these ones and actually got better results
###Output
_____no_output_____
###Markdown
Final Submissions To Kaggle Challenge
###Code
y_pred = pipeline.predict(X_test)
submission = sample_submission.copy()
submission['status_group'] = y_pred
submission.to_csv('my-ultimate-ensemble-submission.csv', index=False)
!head my-ultimate-ensemble-submission.csv
if in_colab:
from google.colab import files
#Just try again if you get this error:
#TypeError: Failed to fetch
#https://github.com/googlecolab/colabtools/issues/337
files.download('my-ultimate-ensemble-submission.csv')
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See below)- [ ] Stacking Ensemble. (See below)- [ ] More Categorical Encoding. (See below) RandomizedSearchCV / GridSearchCV, for model selection- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)``` More Categorical Encodings**1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:- **"Categorical Encoding":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.- **Numeric Encoding:** Synonymous with Label Encoding, or "Ordinal" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html).- **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](http://contrib.scikit-learn.org/categorical-encoding/onehot.html).- **Binary Encoding:** We can use [category_encoders.BinaryEncoder](http://contrib.scikit-learn.org/categorical-encoding/binary.html).**2.** The short video **[Coursera โ How to Win a Data Science Competition: Learn from Top Kagglers โ Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.Category Encoders has multiple implementations of this general concept:- [CatBoost Encoder](http://contrib.scikit-learn.org/categorical-encoding/catboost.html)- [James-Stein Encoder](http://contrib.scikit-learn.org/categorical-encoding/jamesstein.html)- [Leave One Out](http://contrib.scikit-learn.org/categorical-encoding/leaveoneout.html)- [M-estimate](http://contrib.scikit-learn.org/categorical-encoding/mestimate.html)- [Target Encoder](http://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)- [Weight of Evidence](http://contrib.scikit-learn.org/categorical-encoding/woe.html)Category Encoder's mean encoding implementations work for regression problems or binary classification problems. For multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:```pythonencoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) Both parameters > 1 to avoid overfittingX_train_encoded = encoder.fit_transform(X_train, y_train=='functional')X_val_encoded = encoder.transform(X_train, y_val=='functional')```**3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.```python dirty_cat.TargetEncoder(clf_type='multiclass-clf')```It also implements an interesting idea called ["Similarity Encoder" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).However, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.**4. [Embeddings](https://www.kaggle.com/learn/embeddings)** can work well with sparse / high cardinality categoricals._**I hope itโs not too frustrating or confusing that thereโs not one โcanonicalโ way to encode categorcals. Itโs an active area of research and experimentation! Maybe you can make your own contributions!**_
###Code
import os, sys
in_colab = 'google.colab' in sys.modules
# If you're in Colab...
if in_colab:
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
!git pull origin master
# Install required python packages
!pip install -r requirements.txt
# Change into directory for module
os.chdir('module4')
# import block
pd.set_option('display.max_columns', None)
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
plt.style.use('dark_background')
import numpy as np
from scipy.stats import randint, uniform
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.metrics import accuracy_score
import category_encoders as ce
import pandas as pd
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'),
pd.read_csv('../data/tanzania/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
# Split train into train & val. Make val the same size as test.
target = 'status_group'
train, val = train_test_split(train, test_size=len(test),
stratify=train[target], random_state=42)
# Cleaning/engineering function
def wrangler(X):
# Make a copy to avoid warning, prevent making changes from view.
X = X.copy()
# Replace near-zero latitudes with zero
X['latitude'] = X['latitude'].replace(-2e-08, 0)
# Replace near-zero longitudes with zero
X['longitude'] = X['longitude'].replace(-2e-08, 0)
# Swap zeros with nulls
cols_with_zeros = ['longitude', 'latitude', 'construction_year',
'gps_height', 'population']
zeros = [0, '0']
for col in cols_with_zeros:
X[col] = X[col].replace(zeros, np.nan)
X[col+'_MISSING'] = X[col].isna()
# clean text columns by lowercasing, swapping unknowns with NaNs and add a 'MISSING' column for each
textcols = ['installer','funder','wpt_name','basin','subvillage','region','lga','ward',
'scheme_management','scheme_name','extraction_type','extraction_type_group',
'extraction_type_class','management','management_group','payment','water_quality',
'quality_group','quantity','source','source_type','source_class','waterpoint_type',
'waterpoint_type_group']
unknowns = ['unknown', 'not known', 'none', 'nan', '-', '##',
'unknown installer']
for col in textcols:
X[col] = X[col].str.lower().str.replace(' ','').str.replace('.','').str.replace('-','')
X[col] = X[col].replace(unknowns, np.nan)
X[col+'_MISSING'] = X[col].isna()
# clean boolean columns by imputing the most common value and flagging missing vals
boolcols = ['public_meeting','permit']
for col in boolcols:
X[col+'_MISSING'] = X[col].isna()
# Drop recorded_by (never varies) and id (always varies, random)
unusable_variance = ['recorded_by', 'id','num_private','wpt_name']
X = X.drop(columns=unusable_variance)
# create a distance feature for population centers
X['dardistance'] = (((X['latitude']-(6.7924))**2)+((X['longitude']-(39.2083))**2))**0.5
X['mwanzadistance'] = (((X['latitude']-(2.5164))**2)+((X['longitude']-(32.9175))**2))**0.5
X['dodomadistance'] = (((X['latitude']-(6.1630))**2)+((X['longitude']-(35.7516))**2))**0.5
X['dardistance_MISSING'] = X['dardistance'].isnull()
X['mwanzadistance_MISSING'] = X['mwanzadistance'].isnull()
X['dodomadistance_MISSING'] = X['dodomadistance'].isnull()
# change date_recorded to datetime format
X['date_recorded'] = pd.to_datetime(X.date_recorded, infer_datetime_format=True)
X['date_recorded_MISSING'] = X['date_recorded'].isnull()
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# make list of columns of numeric and categoric type
numericcolumns = X.select_dtypes(include = 'number').columns.tolist()
nonnumericcolumns = X.select_dtypes(exclude = 'number').columns.tolist()
# create 'structspect_interval' - number of years between construction and date recorded
X['structspect_interval'] = X['year_recorded'] - X['construction_year']
X['structspect_MISSING'] = X['structspect_interval'].isnull()
return X
# Clean and engineer all datasets
train = wrangler(train)
val = wrangler(val)
test = wrangler(test)
# Arrange data into X features matrix and y target vector
target = 'status_group'
X_train = train.drop(columns=target)
y_train = train[target]
X_val = val.drop(columns=target)
y_val = val[target]
X_test = test
# fit it
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
RandomForestClassifier(n_estimators=129, max_depth=30, min_samples_leaf=2,
random_state=42, n_jobs=-1, min_samples_split=4)
)
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
print('Validation Accuracy', accuracy_score(y_val, y_pred))
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_pred)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred), columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d')
plot_confusion_matrix(y_val, y_pred);
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook)- [ ] More Categorical Encoding. (See module 2 assignment notebook)- [ ] Stacking Ensemble. (See below) Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
import pandas as pd
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
###Output
_____no_output_____
###Markdown
Assignment Importing
###Code
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
import category_encoders as ce
from sklearn.impute import SimpleImputer
from scipy.stats import randint, uniform
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Wrangling
###Code
def wrangle(x):
x = x.copy() #to avoid warning
x['latittude'] = x['latitude'].replace(-2e-09,0) #to replace near 0 with 0
cols_with_zero = ['longitude', 'latitude', 'construction_year',
'gps_height', 'population']
for col in cols_with_zero: #Replace zeroes in cols above with NaN
x[col] = x[col].replace(0, np.nan)
x[col+'_MISSING'] = x[col].isnull() #adding a missing indicator
duplicates = ['extraction_type_class', 'extraction_type_group', 'quantity_group', 'payment', 'source', 'waterpoint_type_group']
x = x.drop(columns=duplicates)
x = x.drop(columns=['id','recorded_by']) # Unusable info
x['date_recorded'] = pd.to_datetime(x['date_recorded'], infer_datetime_format=True) #convert to DateTime
# Extract components from date_recorded, then drop the original column
x['year_recorded'] = x['date_recorded'].dt.year
x['month_recorded'] = x['date_recorded'].dt.month
x['day_recorded'] = x['date_recorded'].dt.day
x = x.drop(columns='date_recorded')
x['years'] = x['year_recorded'] - x['construction_year']
x['years_MISSING'] = x['years'].isnull()
return x
train = wrangle(train)
test = wrangle(test)
target = 'status_group'
X_train = train.drop(columns=target)
y_train = train[target]
###Output
_____no_output_____
###Markdown
RandoomizedCV
###Code
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
RandomForestClassifier(random_state=42)
)
params = {
'simpleimputer__strategy': ['mean', 'median'],
'randomforestclassifier__n_estimators': randint(50, 500),
'randomforestclassifier__max_depth': [5, 10, 15, 20, None],
'randomforestclassifier__max_features': uniform(0, 1),
}
search = RandomizedSearchCV(
pipeline,
param_distributions=params,
n_iter=10,
cv=3,
scoring='accuracy',
verbose=10,
return_train_score= True,
n_jobs=-1
)
search.fit(X_train, y_train);
print('Best hyperparameters', search.best_params_)
print('Score:', search.best_score_)
y_pred = search.predict(test)
###Output
_____no_output_____
###Markdown
Confusion matrix Model for confusion matrix
###Code
train2 = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
train2, val = train_test_split(train2, test_size=len(test),
stratify=train[target], random_state=42)
train2 = wrangle(train2)
val = wrangle(val)
target = 'status_group'
X_train2 = train2.drop(columns=target)
y_train2 = train2[target]
X_val = val.drop(columns=target)
y_val = val[target]
search2 = RandomizedSearchCV(
pipeline,
param_distributions=params,
n_iter=10,
cv=3,
scoring='accuracy',
verbose=10,
return_train_score= True,
n_jobs=-1
)
search2.fit(X_train2, y_train2);
print('Best hyperparameters', search2.best_params_)
print('Score:', search2.best_score_)
y_pred2 = search2.predict(X_val)
y_pred2_sub = search.predict(test)
def plot_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d', cmap='BuGn')
plot_matrix(y_val,y_pred2);
search1 = sample_submission.copy()
search1['status_group'] = y_pred
search2 = sample_submission.copy()
search2['status_group'] = y_pred2_sub
search1.to_csv('search1.csv', index=False)
search2.to_csv('search2.csv', index=False)
###Output
_____no_output_____
###Markdown
Stacking submissions
###Code
pwd
files = ['big_model_sub.csv', 'Dtree_Sub.csv', 'Dtree_sub2.csv', 'my-ultimate-ensemble-submission.csv',
'RandCV1.csv', 'random_forest_1.csv']
target = 'status_group'
submissions = (pd.read_csv(file)[[target]] for file in files)
ensemble = pd.concat(submissions, axis='columns')
majority_vote = ensemble.mode(axis='columns')[0]
sample_submission = sample_submission
submission = sample_submission.copy()
submission[target] = majority_vote
submission.to_csv('my-second-ultimate-ensemble-submission.csv', index=False)
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook)- [ ] More Categorical Encoding. (See module 2 assignment notebook)- [ ] Stacking Ensemble. (See below) Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
import pandas as pd
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See below)- [ ] Stacking Ensemble. (See below)- [ ] More Categorical Encoding. (See below) RandomizedSearchCV / GridSearchCV, for model selection- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)``` More Categorical Encodings**1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:- **"Categorical Encoding":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.- **Numeric Encoding:** Synonymous with Label Encoding, or "Ordinal" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html).- **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](http://contrib.scikit-learn.org/categorical-encoding/onehot.html).- **Binary Encoding:** We can use [category_encoders.BinaryEncoder](http://contrib.scikit-learn.org/categorical-encoding/binary.html).**2.** The short video **[Coursera โ How to Win a Data Science Competition: Learn from Top Kagglers โ Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.Category Encoders has multiple implementations of this general concept:- [CatBoost Encoder](http://contrib.scikit-learn.org/categorical-encoding/catboost.html)- [James-Stein Encoder](http://contrib.scikit-learn.org/categorical-encoding/jamesstein.html)- [Leave One Out](http://contrib.scikit-learn.org/categorical-encoding/leaveoneout.html)- [M-estimate](http://contrib.scikit-learn.org/categorical-encoding/mestimate.html)- [Target Encoder](http://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)- [Weight of Evidence](http://contrib.scikit-learn.org/categorical-encoding/woe.html)Category Encoder's mean encoding implementations work for regression problems or binary classification problems. For multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:```pythonencoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) Both parameters > 1 to avoid overfittingX_train_encoded = encoder.fit_transform(X_train, y_train=='functional')X_val_encoded = encoder.transform(X_train, y_val=='functional')```**3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.```python dirty_cat.TargetEncoder(clf_type='multiclass-clf')```It also implements an interesting idea called ["Similarity Encoder" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).However, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.**4. [Embeddings](https://www.kaggle.com/learn/embeddings)** can work well with sparse / high cardinality categoricals._**I hope itโs not too frustrating or confusing that thereโs not one โcanonicalโ way to encode categorcals. Itโs an active area of research and experimentation! Maybe you can make your own contributions!**_
###Code
import os, sys
in_colab = 'google.colab' in sys.modules
# If you're in Colab...
if in_colab:
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
!git pull origin master
# Install required python packages
!pip install -r requirements.txt
# Change into directory for module
os.chdir('module4')
import pandas as pd
from sklearn.model_selection import train_test_split
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'),
pd.read_csv('../data/tanzania/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
# Split train into train & val. Make val the same size as test.
target = 'status_group'
train, val = train_test_split(train, test_size=len(test),
stratify=train[target], random_state=42)
target = 'status_group'
train_features = train.drop(columns=[target, 'id'])
numeric_features = train_features.select_dtypes(include='number').columns.tolist()
cardinality = train_features.select_dtypes(exclude='number').nunique()
categorical_features = cardinality[cardinality <= 50].index.tolist()
features = numeric_features + categorical_features
print(features)
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
import category_encoders as ce
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.tree import DecisionTreeClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
pipeline = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
SimpleImputer(),
StandardScaler(),
RandomForestClassifier(n_estimators=5, random_state=42, n_jobs=-1)
)
pipeline.fit(X_train, y_train)
print('Validation Accuracy ', pipeline.score(X_val, y_val))
y_pred = pipeline.predict(X_test)
from sklearn.metrics import classification_report
print(classification_report(y_val, y_pred))
###Output
precision recall f1-score support
functional 0.54 0.57 0.56 7798
functional needs repair 0.08 0.06 0.07 1043
non functional 0.38 0.37 0.37 5517
accuracy 0.45 14358
macro avg 0.33 0.33 0.33 14358
weighted avg 0.44 0.45 0.45 14358
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Catch up, if needed- [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2/portfolio-project/ds6), then choose your dataset, and [submit this form](https://forms.gle/nyWURUg65x1UTRNV9), due yesterday at 3:59pm Pacific.- Submit predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file.) The competition closes today at 3:59pm. Every student should make at least one submission that scores at least 60% accuracy (above the majority class baseline). Assignment- [X] Continue to participate in our Kaggle challenge. - [?] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.- [X] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_ The competition closes today at 3:59pm.- [X] Add comments and Markdown to your notebook. Clean up your code.- [X] Commit your notebook to your fork of the GitHub repo. Stretch Goals Reading- Jake VanderPlas, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation- Jake VanderPlas, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107)- Ron Zacharski, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation- Sebastian Raschka, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb)- Peter Worcester, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85) Doing- Try combining xgboost early stopping, cross-validation, & hyperparameter optimization, with [the "original" (non scikit-learn) xgboost API](https://xgboost.readthedocs.io/en/latest/python/python_api.htmlxgboost.cv).- In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives.- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? Try stacking multiple submissions!Here's some code you can use:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
# If you're in Colab...
import os, sys
in_colab = 'google.colab' in sys.modules
if in_colab:
# Install required python packages:
# category_encoders, version >= 2.0
# pandas-profiling, version >= 2.0
# plotly, version >= 4.0
!pip install --upgrade category_encoders pandas-profiling plotly
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
!git pull origin master
# Change into directory for module
os.chdir('module2')
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'),
pd.read_csv('../data/tanzania/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
# train/val split
train, val = train_test_split(train, train_size=0.80, test_size=0.20,
stratify=train['status_group'], random_state=42)
train.shape, val.shape, test.shape
# wrangle data
def wrangle(X):
"""Wrangle train, validate, and test sets in the same way"""
# Prevent SettingWithCopyWarning
X = X.copy()
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these values like zero.
X['latitude'] = X['latitude'].replace(-2e-08, 0)
# replace 0s in lat, long, and construction year columns
cols_with_zeros = ['longitude', 'latitude', 'construction_year', 'population']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
# drop quantity_group, waterpoint_type_group, payment_type, extraction_type_group
X = X.drop(columns=['quantity_group', 'waterpoint_type_group', 'payment_type', 'extraction_type_group'])
# return the wrangled dataframe
return X
# apply wrangling to train, val, and test data
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
from scipy.stats import randint, uniform
from sklearn.model_selection import RandomizedSearchCV
from sklearn.pipeline import make_pipeline
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.feature_selection import f_regression, SelectKBest
from sklearn.linear_model import Ridge
target = 'status_group'
features = train.columns.drop([target])
X_train = train[features]
y_train = train[target]
pipeline = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
SimpleImputer(),
SelectKBest(f_regression),
Ridge()
)
param_distributions = {
'simpleimputer__strategy': ['mean', 'median'],
'selectkbest__k': randint(1, len(X_train.columns)+1),
'ridge__alpha': uniform(1, 10),
}
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=100,
cv=5,
scoring='neg_mean_absolute_error',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train, y_train);
###Output
Fitting 5 folds for each of 100 candidates, totalling 500 fits
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See below)- [ ] Stacking Ensemble. (See below)- [ ] More Categorical Encoding. (See below) RandomizedSearchCV / GridSearchCV, for model selection- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)``` More Categorical Encodings**1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:- **"Categorical Encoding":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.- **Numeric Encoding:** Synonymous with Label Encoding, or "Ordinal" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html).- **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](http://contrib.scikit-learn.org/categorical-encoding/onehot.html).- **Binary Encoding:** We can use [category_encoders.BinaryEncoder](http://contrib.scikit-learn.org/categorical-encoding/binary.html).**2.** The short video **[Coursera โ How to Win a Data Science Competition: Learn from Top Kagglers โ Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.Category Encoders has multiple implementations of this general concept:- [CatBoost Encoder](http://contrib.scikit-learn.org/categorical-encoding/catboost.html)- [James-Stein Encoder](http://contrib.scikit-learn.org/categorical-encoding/jamesstein.html)- [Leave One Out](http://contrib.scikit-learn.org/categorical-encoding/leaveoneout.html)- [M-estimate](http://contrib.scikit-learn.org/categorical-encoding/mestimate.html)- [Target Encoder](http://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)- [Weight of Evidence](http://contrib.scikit-learn.org/categorical-encoding/woe.html)Category Encoder's mean encoding implementations work for regression problems or binary classification problems. For multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:```pythonencoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) Both parameters > 1 to avoid overfittingX_train_encoded = encoder.fit_transform(X_train, y_train=='functional')X_val_encoded = encoder.transform(X_train, y_val=='functional')```**3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.```python dirty_cat.TargetEncoder(clf_type='multiclass-clf')```It also implements an interesting idea called ["Similarity Encoder" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).However, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.**4. [Embeddings](https://www.kaggle.com/learn/embeddings)** can work well with sparse / high cardinality categoricals._**I hope itโs not too frustrating or confusing that thereโs not one โcanonicalโ way to encode categorcals. Itโs an active area of research and experimentation! Maybe you can make your own contributions!**_
###Code
import os, sys
in_colab = 'google.colab' in sys.modules
# If you're in Colab...
if in_colab:
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
!git pull origin master
# Install required python packages
!pip install -r requirements.txt
# Change into directory for module
os.chdir('module4')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from scipy.stats import randint, uniform
%matplotlib inline
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'),
pd.read_csv('../data/tanzania/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
# Split train into train & val. Make val the same size as test.
target = 'status_group'
train, val = train_test_split(train, test_size=len(test),
stratify=train[target], random_state=42)
def wrangle(X):
'''Wrangle train, validate and test sets'''
# this will prevent SettingWithCopyWarning
X = X.copy()
# latitude and longitude have some missing values coded as 0s (or very small near-zeros.)
# I'm going to replace those with nulls for now and later decide how to impute them
X['latitude'] = X['latitude'].replace(-2e-08, 0)
cols_with_zeros = ['latitude', 'longitude', 'construction_year', 'gps_height',
]
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
# quantity and quantity_group are the same data, so we don't need both
# recorded_by only has one value, which gives us literally no variance
# num_private doesnt seem to help the model
# wpt_name has very high cardinality
# extraction_type_group and extraction_type are both similar to extraction_type_class
# payment_type is similar to payment
# water_quality is similar to quality_group
# basin and scheme_management don't help the model
# waterpoint_type_gruop is similar to waterpoint_type
# quantity_group is a duplicate of quantity
# installer has very high cardinality
X = X.drop(columns=['quantity_group', 'recorded_by', 'num_private', 'wpt_name',
'extraction_type_group', 'extraction_type', 'payment_type', 'water_quality',
'basin', 'scheme_management', 'waterpoint_type_group', 'installer'])
return X
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
target = 'status_group'
X_train = train.drop(columns=target)
y_train = train[target]
X_val = val.drop(columns=target)
y_val = val[target]
X_test = test
# pipeline = make_pipeline(
# # ce.TargetEncoder(min_samples_leaf=1, smoothing=1),
# ce.OrdinalEncoder(),
# SimpleImputer(strategy='median'),
# RandomForestClassifier(n_estimators=76, random_state=42, n_jobs=-1, max_depth=None, min_samples_split=2,
# min_samples_leaf=2, max_features=0.14002100940103568)
# )
# k = 3
# scores = cross_val_score(pipeline, X_train, y_train, cv=k,
# scoring='accuracy')
# print(f'MAE for {k} folds', scores)
pipeline.fit(X_train, y_train)
print('Validation accuracy:', pipeline.score(X_val, y_val))
y_pred = pipeline.predict(X_val)
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_pred)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred), columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d')
plot_confusion_matrix(y_val, y_pred)
train.columns
pd.set_option("display.max_columns", 500)
train.head()
categorical_features = [
'date_recorded', 'funder', 'subvillage', 'region', 'lga', 'ward', 'public_meeting',
'scheme_name', 'permit', 'extraction_type_class', 'management', 'management_group',
'payment', 'quality_group', 'quantity', 'source', 'source_type', 'source_class', 'waterpoint_type'
]
numerical_features = [x for x in train.columns if x not in categorical_features and x != 'status_group']
numerical_features = numerical_features[1:]
numeric_df = train[numerical_features]
cat_df = train[categorical_features]
imputer = SimpleImputer()
numeric_df = pd.DataFrame(imputer.fit_transform(numeric_df), columns=numerical_features)
numeric_df.head()
import xgboost as xgb
import random
import random
random.seed(208)
# imputer = SimpleImputer(strategy='most_frequent')
# train = imputer.fit_transform(train)
# encoder = ce.OrdinalEncoder()
# train = encoder.fit_transform(train)
count = 0
score = 0
scores = 0
prediction_votes=pd.DataFrame()
for num in range(1,12):
seed = random.randint(1,9000)
model = xgb.XGBClassifier(max_depth=14, n_estimators=50, learning_rate=0.2, colsample_bytree = .4, booster='gbtree',
random_state=seed, objective='multi:softmax', nthread=-1, num_class=4, eval_metric='merror',
nrounds=50, feature_names=X_val.columns, num_parallel_trees=3).fit(X_train, y_train)
predictions = model.predict(X_val_no_id)
score = accuracy_score(y_val, predictions)
prediction_votes[num] = predictions
scores += score
count += 1
print(score)
print(f"The average accuracy score using these hyperparams over {count} iterations is {scores/count}")
majority_votes = prediction_votes.mode(axis=1)[0]
score = accuracy_score(y_val, majority_votes)
score
assert len(majority_votes) == sample_submission.shape[0]
submission = sample_submission.copy()
submission['status_group'] = majority_votes
def format_submission(n):
if n == 1:
return('functional')
if n == 2:
return('functional needs repair')
else:
return('non functional')
submission['status_group'] = submission['status_group'].apply(lambda x: format_submission(x))
submission.to_csv('submission.csv', index=False)
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See below)- [ ] Stacking Ensemble. (See below)- [ ] More Categorical Encoding. (See below) RandomizedSearchCV / GridSearchCV, for model selection- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)``` More Categorical Encodings**1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:- **"Categorical Encoding":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.- **Numeric Encoding:** Synonymous with Label Encoding, or "Ordinal" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html).- **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](http://contrib.scikit-learn.org/categorical-encoding/onehot.html).- **Binary Encoding:** We can use [category_encoders.BinaryEncoder](http://contrib.scikit-learn.org/categorical-encoding/binary.html).**2.** The short video **[Coursera โ How to Win a Data Science Competition: Learn from Top Kagglers โ Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.Category Encoders has multiple implementations of this general concept:- [CatBoost Encoder](http://contrib.scikit-learn.org/categorical-encoding/catboost.html)- [James-Stein Encoder](http://contrib.scikit-learn.org/categorical-encoding/jamesstein.html)- [Leave One Out](http://contrib.scikit-learn.org/categorical-encoding/leaveoneout.html)- [M-estimate](http://contrib.scikit-learn.org/categorical-encoding/mestimate.html)- [Target Encoder](http://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)- [Weight of Evidence](http://contrib.scikit-learn.org/categorical-encoding/woe.html)Category Encoder's mean encoding implementations work for regression problems or binary classification problems. For multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:```pythonencoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) Both parameters > 1 to avoid overfittingX_train_encoded = encoder.fit_transform(X_train, y_train=='functional')X_val_encoded = encoder.transform(X_train, y_val=='functional')```**3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.```python dirty_cat.TargetEncoder(clf_type='multiclass-clf')```It also implements an interesting idea called ["Similarity Encoder" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).However, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.**4. [Embeddings](https://www.kaggle.com/learn/embeddings)** can work well with sparse / high cardinality categoricals._**I hope itโs not too frustrating or confusing that thereโs not one โcanonicalโ way to encode categorcals. Itโs an active area of research and experimentation! Maybe you can make your own contributions!**_
###Code
import os, sys
in_colab = 'google.colab' in sys.modules
# If you're in Colab...
if in_colab:
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
!git pull origin master
# Install required python packages
!pip install -r requirements.txt
# Change into directory for module
os.chdir('module4')
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'),
pd.read_csv('../data/tanzania/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
# Split train into train & val. Make val the same size as test.
target = 'status_group'
train, val = train_test_split(train, test_size=len(test),
stratify=train[target], random_state=42)
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Catch up, if needed- [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2/portfolio-project/ds6), then choose your dataset, and [submit this form](https://forms.gle/nyWURUg65x1UTRNV9), due yesterday at 3:59pm Pacific.- Submit predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file.) The competition closes today at 3:59pm. Every student should make at least one submission that scores at least 60% accuracy (above the majority class baseline). Assignment- [ ] Continue to participate in our Kaggle challenge. - [ ] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_ The competition closes today at 3:59pm.- [ ] Add comments and Markdown to your notebook. Clean up your code.- [ ] Commit your notebook to your fork of the GitHub repo. Stretch Goals Reading- Jake VanderPlas, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation- Jake VanderPlas, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107)- Ron Zacharski, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation- Sebastian Raschka, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb)- Peter Worcester, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85) Doing- Try combining xgboost early stopping, cross-validation, & hyperparameter optimization, with [the "original" (non scikit-learn) xgboost API](https://xgboost.readthedocs.io/en/latest/python/python_api.htmlxgboost.cv).- In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives.- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? Try stacking multiple submissions!Here's some code you can use:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
import pandas as pd
import numpy as np
# Import train features and labels as train data
train = pd.merge(pd.read_csv('C://Users/ajaco/Desktop/train_features.csv'),
pd.read_csv('C://Users/ajaco/Desktop/train_labels.csv'))
# Import test features
test = pd.read_csv('C:/Users/ajaco/Desktop/test_features.csv')
# Import sample_submission file for creating submission later
sample_submission = pd.read_csv('C://Users/ajaco/Desktop/sample_submission.csv')
train.shape, test.shape
def wrangle(X):
# Prevent SettingWithCopyWarning
X = X.copy()
# Treat zeros as nulls, convert near zeros to zero
X['latitude'] = X['latitude'].replace(-2e-08, 0)
# Replace suspicious zeroes with nans, then create new
# column with binary variable for whether it was missing
# will impute values later
cols_with_zeros = ['longitude', 'latitude', 'construction_year',
'gps_height', 'population']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
X[col+'_MISSING'] = X[col].isnull()
# Convert date_recorded to datetime object
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract year, month, and day from date_recorded and drop date_recorded
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# New column time_from_construction_to_inspection
X['years_til_inspection'] = X['year_recorded'] - X['construction_year']
X['years_missing'] = X['years_til_inspection'].isnull()
# quantity group is a duplicate of quantity so drop
X = X.drop(columns=['quantity_group', 'scheme_management', 'num_private',
'recorded_by', 'payment', 'waterpoint_type'])
return X
# Wrangle train and test data
train = wrangle(train)
test = wrangle(test)
import category_encoders as ce
from sklearn.feature_selection import SelectKBest
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
# Select features and create X matrix and y vector
target = 'status_group'
features = train.columns.drop(['id', target]).tolist()
X_train = train[features]
y_train = train[target]
# Create pipeline; ordinal encoding,
# simple imputing w median,
# RFClass model n_estimators=500
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=500, max_depth=9, random_state=42, n_jobs=-1)
)
# Get scores and print
scores = cross_val_score(pipeline, X_train, y_train, cv=5)
print(scores)
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.model_selection import validation_curve
depth = range(1, 40, 2)
train_scores, val_scores = validation_curve(
pipeline, X_train, y_train,
param_name='randomforestclassifier__max_depth',
param_range=depth, scoring='accuracy',
cv=3,
n_jobs=-1
)
plt.plot(depth, np.mean(train_scores, axis=1), color='b', label='training error')
plt.plot(depth, np.mean(val_scores, axis=1), color='r', label='validation error')
plt.xlabel('Depth of Trees')
plt.ylabel('Accuracy Score')
plt.legend();
from sklearn.model_selection import RandomizedSearchCV
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
#SelectKBest(f_regression),
RandomForestClassifier(n_estimators=100, n_jobs=-1)
)
pipeline.fit(X_train, y_train)
pipeline.score(X_train, y_train)
X_train = train[features]
y_train = train[target]
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
SelectKBest(),
RandomForestClassifier(n_jobs=-1)
)
param_distributions = {
'simpleimputer__strategy': ['mean', 'median'],
'selectkbest__k': range(1, len(X_train.columns)+1),
'randomforestclassifier__n_estimators': range(50, 500),
}
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=5,
cv=5,
scoring='accuracy',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train, y_train);
search.best_params_
search.best_score_
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
cross_val_score(pipeline, X_train, y_train, cv=2, scoring='accuracy')
X_train = train[features]
y_train = train[target]
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
SelectKBest(),
RandomForestClassifier(n_jobs=-1)
)
param_distributions = {
'simpleimputer__strategy': ['mean', 'median'],
'selectkbest__k': range(1, len(X_train.columns)+1),
'randomforestclassifier__n_estimators': range(50, 500),
'randomforestclassifier__max_depth': [5, 10, 20, 50, None],
'randomforestclassifier__min_samples_leaf': range(1, 50, 5)
}
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=20,
cv=5,
scoring='accuracy',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train, y_train);
search.best_params_
search.best_score_
import xgboost as xgb
X_train = train[features]
y_train = train[target]
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
SelectKBest(),
xgb.XGBClassifier(n_jobs=-1)
)
param_distributions = {
'simpleimputer__strategy': ['mean', 'median'],
'selectkbest__k': range(1, len(X_train.columns)+1),
'xgbclassifier__n_estimators': range(100, 500),
'xgbclassifier__max_depth': range(1,10),
'xgbclassifier__learning_rate': [0.001, 0.01, 0.1, 0.2, 0.3]
}
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=20,
cv=5,
scoring='accuracy',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train, y_train);
print('Best Score: ', search.best_score_)
print('Best Parameters: ', search.best_params_)
###Output
Fitting 5 folds for each of 20 candidates, totalling 100 fits
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook)- [ ] More Categorical Encoding. (See module 2 assignment notebook)- [ ] Stacking Ensemble. (See below) Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
# (This is from a previous version of the assignment notebook)
target = 'status_group'
train, val = train_test_split(train, test_size=len(test),
stratify=train[target], random_state=42)
# Copying my earlier code
def remove_zeroes(X):
X = X.copy()
X['latitude'] = X['latitude'].replace(-2e-08, 0)
zeroes = ['gps_height', 'longitude', 'latitude', 'population', 'construction_year']
for col in zeroes:
X[col] = X[col].replace(0, np.nan)
return X
def datetime_features(X):
X = X.copy()
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
X['year_recorded'] = X['date_recorded'].dt.year
X['construction_year'] = X['construction_year'].fillna(np.around(np.mean(X['construction_year']), decimals=0))
X['time_to_inspection'] = X['year_recorded'] - X['construction_year']
return X
def drop_redundant(X):
X = X.copy()
redundant_cols = ['recorded_by', 'payment_type', 'region_code', 'date_recorded', 'id']
for col in redundant_cols:
X = X.drop(col, axis=1)
return X
def wrangle(X):
X = X.copy()
X = remove_zeroes(X)
X = datetime_features(X)
X = drop_redundant(X)
return X
X_train = wrangle(train).drop(target, axis=1)
y_train = train[target]
X_val = wrangle(val).drop(target, axis=1)
y_val = val[target]
X_test = wrangle(test)
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline, Pipeline
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='mean'),
RandomForestClassifier(max_depth=20, max_features=0.7, n_estimators=200, random_state=99)
)
pipeline.fit(X_train, y_train)
print(pipeline.score(X_val, y_val))
from sklearn.model_selection import GridSearchCV
pipeline = Pipeline([
('encoder', ce.BinaryEncoder()),
('imputer', SimpleImputer()),
('classifier', RandomForestClassifier())
])
param_grid = {
'encoder': [ce.BinaryEncoder(), ce.OrdinalEncoder()],
'imputer__strategy': ['mean', 'median', 'most_frequent'],
'classifier__n_estimators': [200],
'classifier__max_depth': [20],
'classifier__max_features': [0.7]
}
grid = GridSearchCV(pipeline, param_grid=param_grid, scoring='accuracy', cv=5, n_jobs=-1)
grid.fit(X_train, y_train);
print('Best hyperparameters', grid.best_params_)
print('Accuracy', grid.best_score_)
pipeline = make_pipeline(
ce.BinaryEncoder(cols=None, drop_invariant=False, handle_missing='value', handle_unknown='value',
mapping=None, return_df=True, verbose=0),
SimpleImputer(strategy='most_frequent'),
RandomForestClassifier(max_depth=20, max_features=0.7, n_estimators=200, random_state=99)
)
pipeline.fit(X_train, y_train)
print(pipeline.score(X_val, y_val))
y_pred = pd.DataFrame(pipeline.predict(X_test), columns=['status_group'])
submission1 = pd.concat([test['id'], y_pred], axis=1)
submission1.to_csv('water-submission-14.csv', index=None, header=True)
pipeline = make_pipeline(
ce.BinaryEncoder(cols=None, drop_invariant=False, handle_missing='value', handle_unknown='value',
mapping=None, return_df=True, verbose=0),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=200, random_state=99)
)
pipeline.fit(X_train, y_train)
print(pipeline.score(X_val, y_val))
y_pred = pd.DataFrame(pipeline.predict(X_test), columns=['status_group'])
submission2 = pd.concat([test['id'], y_pred], axis=1)
submission2.to_csv('water-submission-15.csv', index=None, header=True)
from sklearn.metrics import confusion_matrix
confusion_matrix(y_val, y_pred)
%matplotlib inline
from sklearn.utils.multiclass import unique_labels
import seaborn as sns
labels = unique_labels(y_val)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_val, y_pred), columns=columns, index=index)
sns.heatmap(table, annot=True, fmt='d', cmap='viridis');
files = ['water-submission-13.csv', 'water-submission-14.csv', 'water-submission-15.csv']
target = 'status_group'
submissions = (pd.read_csv(file)[[target]] for file in files)
ensemble = pd.concat(submissions, axis='columns')
majority_vote = ensemble.mode(axis='columns')[0]
submission = sample_submission.copy()
submission[target] = majority_vote
submission.to_csv('water-submission-16.csv', index=False)
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Catch up, if needed- [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2/portfolio-project/ds6), then choose your dataset, and [submit this form](https://forms.gle/nyWURUg65x1UTRNV9), due yesterday at 3:59pm Pacific.- Submit predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file.) The competition closes today at 3:59pm. Every student should make at least one submission that scores at least 60% accuracy (above the majority class baseline). Assignment- [X] Continue to participate in our Kaggle challenge. - [X] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.- [X] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_ The competition closes today at 3:59pm.- [X] Add comments and Markdown to your notebook. Clean up your code.- [X] Commit your notebook to your fork of the GitHub repo. Stretch Goals Reading- Jake VanderPlas, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation- Jake VanderPlas, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107)- Ron Zacharski, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation- Sebastian Raschka, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb)- Peter Worcester, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85) Doing- Try combining xgboost early stopping, cross-validation, & hyperparameter optimization, with [the "original" (non scikit-learn) xgboost API](https://xgboost.readthedocs.io/en/latest/python/python_api.htmlxgboost.cv).- In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives.- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? Try stacking multiple submissions!Here's some code you can use:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
# If you're in Colab...
import os, sys
in_colab = 'google.colab' in sys.modules
if in_colab:
# Install required python packages:
# category_encoders, version >= 2.0
# eli5, version >= 0.9
# pandas-profiling, version >= 2.0
# plotly, version >= 4.0
!pip install --upgrade category_encoders eli5 pandas-profiling plotly
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
!git pull origin master
# Change into directory for module
os.chdir('module3')
import pandas as pd
from sklearn.model_selection import train_test_split
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'),
pd.read_csv('../data/tanzania/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
X_train, X_val, y_train, y_val = train_test_split(train.drop('status_group', axis = 'columns'),train.status_group, test_size = .25, stratify = train.status_group)
# Defining feature engineering function
import datetime
import numpy as np
def engineer(df):
df = df.copy()
# Making age variable
df.date_recorded = pd.to_datetime(df.date_recorded)
df.construction_year.replace(0,np.NaN, inplace = True)
mean_year = np.nanmean(df.construction_year)
df.construction_year.replace(np.NaN,mean_year, inplace = True)
df['age'] = df.date_recorded.dt.year - df.construction_year
# Adding day, month, year, and day of week features
df['day_recorded'] = df.date_recorded.dt.day
df['month_recorded'] = df.date_recorded.dt.month
df['year_recorded'] = df.date_recorded.dt.year
df['day_of_week_recorded'] = df.date_recorded.dt.dayofweek
df.drop('date_recorded', axis = 'columns', inplace = True)
# putting nan values where zeros are but shouldn't be
cols_with_zeros = ['longitude', 'latitude',
'gps_height', 'population']
for col in cols_with_zeros:
df[col] = df[col].replace(0, np.nan)
df[col+'_MISSING'] = df[col].isnull()
# Drop duplicate columns
duplicates = ['quantity_group', 'payment_type']
df = df.drop(columns=duplicates)
# Drop recorded_by (never varies) and id (always varies, random)
unusable_variance = ['recorded_by', 'id']
df = df.drop(columns=unusable_variance)
# Making region code categorical instead of numeric
df['region_code'] = pd.Categorical(df.region_code)
return df
# applying engineer function
X_train = engineer(X_train)
X_val = engineer(X_val)
import category_encoders as ce
from sklearn.pipeline import make_pipeline
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.ensemble import RandomForestClassifier
# pipeline = make_pipeline(
# ce.OrdinalEncoder(),
# SimpleImputer(strategy='median'),
# RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
# )
# # Fit on train, score on val
# pipeline.fit(X_train, y_train)
# print('Validation Accuracy', pipeline.score(X_val, y_val))
pipe = make_pipeline(
ce.OrdinalEncoder()
, IterativeImputer()
, RandomForestClassifier(n_estimators = 2000, min_samples_leaf = 9, max_depth = 33, n_jobs = -1)
)
pipe.fit(X_train, y_train)
from sklearn.metrics import accuracy_scor
y_pred = pipe.predict(X_val)
print('Validation Accuracy', accuracy_score(y_pred, y_val))
from sklearn.model_selection import RandomizedSearchCV
param_dist = {
'randomforestclassifier__n_estimators':range(500,5000,500)
, 'randomforestclassifier__max_depth':range(10,50,5)
, 'randomforestclassifier__max_features':range(5, len(X_train.columns)+1)
, 'randomforestclassifier__min_samples_leaf':range(1,50,5)
# , 'targetencoder_smoothing':range(0,20,3)
}
search = RandomizedSearchCV(pipe
,param_distributions = param_dist
,n_iter = 100
,cv = 3
,scoring = 'accuracy'
,verbose = 10
,return_train_score = True
,n_jobs = -1
)
search.fit(X_train, y_train)
print('Best parameters: ', search.best_params_)
print('Best score: ',search.best_score_)
from sklearn import metrics
sorted(metrics.SCORERS.keys())
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [x] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [x] Plot a confusion matrix for your Tanzania Waterpumps model.- [x] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [x] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [x] Commit your notebook to your fork of the GitHub repo.- [x] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook)- [ ] More Categorical Encoding. (See module 2 assignment notebook)- [ ] Stacking Ensemble. (See below) Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
import pandas as pd
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
%matplotlib inline
import category_encoders as ce
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
def wrangle(X):
"""Wrangles train, validate, and test sets in the same way"""
X = X.copy()
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# Engineer feature: how many years from construction_year to date_recorded
X['years'] = X['year_recorded'] - X['construction_year']
# Drop recorded_by (never varies) and id (always varies, random)
unusable_variance = ['recorded_by', 'id']
X = X.drop(columns=unusable_variance)
# Drop duplicate columns
duplicate_columns = ['quantity_group']
X = X.drop(columns=duplicate_columns)
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these like null values
X['latitude'] = X['latitude'].replace(-2e-08, np.nan)
# When columns have zeros and shouldn't, they are like null values
cols_with_zeros = ['construction_year', 'longitude', 'latitude', 'gps_height', 'population']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
return X
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
# Split train into train & val. Make val the same size as test.
target = 'status_group'
train, val = train_test_split(train, test_size=len(test),
stratify=train[target], random_state=42)
# Wrangle train, validate, and test sets in the same way
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
# Arrange data into X features matrix and y target vector
X_train = train.drop(columns=target)
y_train = train[target]
X_val = val.drop(columns=target)
y_val = val[target]
X_test = test
# Make pipeline!
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='mean'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
print('Validation Accuracy', accuracy_score(y_val, y_pred))
###Output
Validation Accuracy 0.8140409527789386
###Markdown
confusion matrix
###Code
y_val.value_counts()
y_pred
from sklearn.metrics import confusion_matrix
confusion_matrix(y_val, y_pred)
# We need to get labels
from sklearn.utils.multiclass import unique_labels
unique_labels(y_val)
# Let's write the function iteratively
# 1. Check that our labels are correct
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
return columns, index
plot_confusion_matrix(y_val, y_pred)
# 2. Make it a pandas dataframe
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return table
plot_confusion_matrix(y_val, y_pred)
# 3. Plot a heatmap
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d', cmap='GnBu')
plot_confusion_matrix(y_val, y_pred);
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See below)- [ ] Stacking Ensemble. (See below)- [ ] More Categorical Encoding. (See below) RandomizedSearchCV / GridSearchCV, for model selection- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)``` More Categorical Encodings**1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:- **"Categorical Encoding":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.- **Numeric Encoding:** Synonymous with Label Encoding, or "Ordinal" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html).- **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](http://contrib.scikit-learn.org/categorical-encoding/onehot.html).- **Binary Encoding:** We can use [category_encoders.BinaryEncoder](http://contrib.scikit-learn.org/categorical-encoding/binary.html).**2.** The short video **[Coursera โ How to Win a Data Science Competition: Learn from Top Kagglers โ Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.Category Encoders has multiple implementations of this general concept:- [CatBoost Encoder](http://contrib.scikit-learn.org/categorical-encoding/catboost.html)- [James-Stein Encoder](http://contrib.scikit-learn.org/categorical-encoding/jamesstein.html)- [Leave One Out](http://contrib.scikit-learn.org/categorical-encoding/leaveoneout.html)- [M-estimate](http://contrib.scikit-learn.org/categorical-encoding/mestimate.html)- [Target Encoder](http://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)- [Weight of Evidence](http://contrib.scikit-learn.org/categorical-encoding/woe.html)Category Encoder's mean encoding implementations work for regression problems or binary classification problems. For multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:```pythonencoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) Both parameters > 1 to avoid overfittingX_train_encoded = encoder.fit_transform(X_train, y_train=='functional')X_val_encoded = encoder.transform(X_train, y_val=='functional')```**3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.```python dirty_cat.TargetEncoder(clf_type='multiclass-clf')```It also implements an interesting idea called ["Similarity Encoder" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).However, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.**4. [Embeddings](https://www.kaggle.com/learn/embeddings)** can work well with sparse / high cardinality categoricals._**I hope itโs not too frustrating or confusing that thereโs not one โcanonicalโ way to encode categorcals. Itโs an active area of research and experimentation! Maybe you can make your own contributions!**_
###Code
import os, sys
in_colab = 'google.colab' in sys.modules
# If you're in Colab...
if in_colab:
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
!git pull origin master
# Install required python packages
!pip install -r requirements.txt
# Change into directory for module
os.chdir('module4')
import pandas as pd
from sklearn.model_selection import train_test_split
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'),
pd.read_csv('../data/tanzania/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
# # Split train into train & val. Make val the same size as test.
# target = 'status_group'
# train, val = train_test_split(train, test_size=len(test),
# stratify=train[target], random_state=42)
train_labels = train['status_group']
train_features = train.drop(['status_group'], axis=1)
X_train, X_val, y_train, y_val = train_test_split(train_features, train_labels)
import category_encoders as ce
import numpy as np
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import randint, uniform
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
pipeline = make_pipeline(
ce.OrdinalEncoder(),
RandomForestClassifier(max_depth=40, max_features=.5, min_samples_leaf=10, n_estimators=1000)
)
def datacleaning(frame):
frame = frame.copy()
frame = frame.drop(['quantity_group', 'source_type', 'quality_group',
'management_group', 'id', 'num_private', 'funder',
'gps_height', 'subvillage', 'wpt_name', 'scheme_name',
'recorded_by', 'region_code','district_code',
'extraction_type_group', 'waterpoint_type_group',
'amount_tsh'], axis=1)
frame['latitude'] = frame['latitude'].replace(-2e-08, 0)
frame['latitude'] = frame['latitude'].replace(-2e-08, 0)
cols_with_zeroes = ['latitude', 'longitude', 'construction_year']
for col in cols_with_zeroes:
frame[col] = frame[col].replace(0, frame[col].mean())
high_cardinality = ['installer', 'lga', 'ward']
for col in high_cardinality:
top10 = frame[col].value_counts()[:10].index
frame.loc[~frame[col].isin(top10), col] = 'OTHER'
frame['date_recorded'] = pd.to_datetime(frame['date_recorded'],
infer_datetime_format=True)
frame['date_recorded'] = frame['date_recorded'].dt.year
frame['age'] = frame['date_recorded'] - frame['construction_year']
return frame
X_train = datacleaning(X_train)
X_val = datacleaning(X_val)
X_test = datacleaning(test)
# train_features = datacleaning(train_features) #
pipeline.fit(X_train, y_train)
print(pipeline.score(X_train, y_train))
print(pipeline.score(X_val, y_val))
# param_distributions = {
# 'randomforestclassifier__max_depth': [30,35,40,45, None],
# 'randomforestclassifier__n_estimators': randint(5,500),
# 'randomforestclassifier__max_features': uniform(0,1),
# 'randomforestclassifier__min_samples_leaf': randint(1,100)
# }
# search = RandomizedSearchCV(pipeline,
# param_distributions=param_distributions,
# n_iter = 10,
# cv = 3,
# )
# search.fit(train_features,train_labels)
# print(search.best_params_)
submission_array = pipeline.predict(X_test)
sub_df = pd.Series(submission_array)
sub_id = test['id']
submission_df = pd.concat([sub_id, sub_df], axis=1, ignore_index=True)
submission_df = submission_df.rename(mapper={0:'id',1:'status_group'}, axis=1)
!pwd
submission_df.to_csv('submission2.csv', index=False)
# def plot_confusion_matrix(y_true, y_pred):
# labels = unique_labels(y_true)
# columns = [f'Predicted {label}' for label in labels]
# index = [f'Actual {label}' for label in labels]
# table = pd.DataFrame(confusion_matrix(y_true, y_pred),
# columns=columns, index=index)
# return table
# plot_confusion_matrix(y_val, y_pred)
def plot_confusion_matrix(y_true,y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns = columns, index = index)
return table
plot_confusion_matrix(y_val, pipeline.predict(X_val))
###Output
_____no_output_____
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 4 Assignment- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_- [ ] Commit your notebook to your fork of the GitHub repo.- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. Stretch Goals Reading- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Doing- [ ] Share visualizations in our Slack channel!- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See below)- [ ] Stacking Ensemble. (See below)- [ ] More Categorical Encoding. (See below) RandomizedSearchCV / GridSearchCV, for model selection- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? Stacking EnsembleHere's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)``` More Categorical Encodings**1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:- **"Categorical Encoding":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.- **Numeric Encoding:** Synonymous with Label Encoding, or "Ordinal" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html).- **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](http://contrib.scikit-learn.org/categorical-encoding/onehot.html).- **Binary Encoding:** We can use [category_encoders.BinaryEncoder](http://contrib.scikit-learn.org/categorical-encoding/binary.html).**2.** The short video **[Coursera โ How to Win a Data Science Competition: Learn from Top Kagglers โ Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.Category Encoders has multiple implementations of this general concept:- [CatBoost Encoder](http://contrib.scikit-learn.org/categorical-encoding/catboost.html)- [James-Stein Encoder](http://contrib.scikit-learn.org/categorical-encoding/jamesstein.html)- [Leave One Out](http://contrib.scikit-learn.org/categorical-encoding/leaveoneout.html)- [M-estimate](http://contrib.scikit-learn.org/categorical-encoding/mestimate.html)- [Target Encoder](http://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)- [Weight of Evidence](http://contrib.scikit-learn.org/categorical-encoding/woe.html)Category Encoder's mean encoding implementations work for regression problems or binary classification problems. For multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:```pythonencoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) Both parameters > 1 to avoid overfittingX_train_encoded = encoder.fit_transform(X_train, y_train=='functional')X_val_encoded = encoder.transform(X_train, y_val=='functional')```**3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.```python dirty_cat.TargetEncoder(clf_type='multiclass-clf')```It also implements an interesting idea called ["Similarity Encoder" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).However, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.**4. [Embeddings](https://www.kaggle.com/learn/embeddings)** can work well with sparse / high cardinality categoricals._**I hope itโs not too frustrating or confusing that thereโs not one โcanonicalโ way to encode categorcals. Itโs an active area of research and experimentation! Maybe you can make your own contributions!**_
###Code
import os, sys
in_colab = 'google.colab' in sys.modules
# If you're in Colab...
if in_colab:
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
!git pull origin master
# Install required python packages
!pip install -r requirements.txt
# Change into directory for module
os.chdir('module4')
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
# Merge train_features.csv & train_labels.csv
import pandas as pd
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
pd.set_option('display.max_columns', None)
numeric_cols = ['amount_tsh', 'gps_height', 'longitude', 'latitude',
'num_private', 'region_code', 'district_code',
'population', 'construction_year']
cat_cols = ['basin', 'region', 'lga', 'public_meeting', 'scheme_management',
'permit', 'extraction_type', 'management', 'payment',
'water_quality', 'quantity', 'source', 'waterpoint_type', 'status_group']
features = numeric_cols + cat_cols
target = 'status_group'
import numpy as np
def clean(data):
data = data.copy()
data['latitude'] = data['latitude'].replace(-2e-8, 0)
replace_zeroes = ['latitude', 'longitude', 'population', 'construction_year']
for col in replace_zeroes:
data[col] = data[col].replace(0, np.nan)
return data
train[numeric_cols] = clean(train[numeric_cols])
train['date_recorded'] = pd.to_datetime(train['date_recorded'])
slim_train = train[features]
target_train = train[target]
slim_train['month'] = train['date_recorded'].dt.month
slim_train['year'] = train['date_recorded'].dt.year
slim_train['day'] = train['date_recorded'].dt.day
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
RandomForestClassifier(n_estimators=154, max_depth=68, random_state=42,
n_jobs=-1, criterion='entropy',
min_samples_leaf=0.003)
)
X_train, X_val = train_test_split(slim_train, random_state=42)
y_train = X_train[target]
X_train = X_train.drop(target, axis=1)
y_val = X_val[target]
X_val = X_val.drop(target, axis=1)
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
print('Train accuracy: ', pipeline.score(X_train, y_train))
print('Validation accuracy: ', pipeline.score(X_val, y_val))
from IPython.display import FileLink, FileLinks
submission = pd.DataFrame()
submission['id'] = test['id']
submission['status_group'] = y_pred
submission.to_csv('last_hurrah.csv', index=False)
# If you're working locally, the csv file is saved in the same directory as your notebook.
# If you're using Google Colab, you can use this code to download your submission csv file.
from google.colab import files
files.download('last_hurrah.csv')
from sklearn.metrics import confusion_matrix
confusion_matrix(y_val, y_pred)
from sklearn.utils.multiclass import unique_labels
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Acutal {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return table
plot_confusion_matrix(y_val, y_pred)
import seaborn as sns
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d', cmap='viridis')
plot_confusion_matrix(y_val, y_pred);
train['month'] = train['date_recorded'].dt.month
train['year'] = train['date_recorded'].dt.year
train['day'] = train['date_recorded'].dt.day
train = train.drop('date_recorded', axis=1)
search_y_train = train[target]
search_train = train.drop('status_group', axis=1)
train.head()
pipeline_x = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
RandomForestClassifier()
)
from scipy.stats import randint, uniform
param_distributions = {
'simpleimputer__strategy': ['mean', 'median'],
'randomforestclassifier__n_estimators': randint(1, 500),
'randomforestclassifier__max_depth': randint(1, 100),
'randomforestclassifier__criterion': ['gini', 'entropy'],
'randomforestclassifier__max_features': randint(1, len(features)),
'randomforestclassifier__min_samples_leaf': uniform(0, 0.5),
}
from sklearn.model_selection import RandomizedSearchCV
search = RandomizedSearchCV(
pipeline_x,
param_distributions=param_distributions,
n_iter=30,
cv=3,
# scoring='accuracy',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(search_train, search_y_train);
print('Best Hyperparameters:', search.best_params_)
print('Cross_validation accuracy:', search.best_score_)
test.shape
###Output
_____no_output_____ |
2.describe-data/0.describe-data.ipynb | ###Markdown
Describing Data by Batch
###Code
import os
import pathlib
import numpy as np
import pandas as pd
import plotnine as gg
from pycytominer.cyto_utils import infer_cp_features
from scripts.processing_utils import load_data
def get_count_per_batch(df, batch_name):
result = (
df
.Metadata_Plate
.value_counts()
.reset_index()
.rename({
"index": "Metadata_Plate",
"Metadata_Plate": "profile_count"
}, axis="columns")
.assign(batch=batch_name)
)
return result
def count_treatments_per_plate(df, batch_name):
group_cols = ["Metadata_clone_number", "Metadata_treatment", "Metadata_Plate"]
result = (
df
.groupby(group_cols)
["Metadata_Well"]
.count()
.reset_index()
.rename({
"Metadata_Well": "profile_count",
group_cols[0]: "Metadata_clone"
}, axis="columns")
.assign(batch=batch_name)
)
return result
def process_counts(batch_name, profile_dir="profiles"):
df = load_data(
batch=batch_name,
plates="all",
profile_dir=profile_dir,
suffix="normalized_feature_selected.csv.gz",
combine_dfs=True,
harmonize_cols=True,
add_cell_count=False,
)
batch_count = get_count_per_batch(df, batch_name)
treatment_count = count_treatments_per_plate(df, batch_name)
return df, batch_count, treatment_count
profile_dir = pathlib.Path("../0.generate-profiles/profiles")
batches = sorted([x for x in os.listdir(profile_dir) if x != ".DS_Store"])
batches
profile_dir
batch_data = {}
profile_count_list = list()
for batch in batches:
print("Now processing... {}".format(batch))
df, batch_count, treatment_count = process_counts(batch, profile_dir=profile_dir)
batch_data[batch] = {
"dataframe": df,
"metafeatures": infer_cp_features(df, metadata=True),
"batch_count": batch_count,
"treatment_count": treatment_count
}
profile_count_list.append(
treatment_count.loc[:, ["Metadata_clone", "Metadata_treatment", "profile_count"]]
)
sample_count_df = (
pd.DataFrame(
pd.concat(profile_count_list, axis="rows")
.fillna("DMSO")
.reset_index(drop=True)
.groupby(["Metadata_clone", "Metadata_treatment"])
["profile_count"]
.sum()
)
.sort_values("profile_count", ascending=False)
.reset_index()
)
sample_treatment_count_df = (
sample_count_df
.pivot_table(
values="profile_count",
index="Metadata_clone",
columns="Metadata_treatment",
aggfunc=lambda x: x.sum()
)
.fillna(0)
.astype(int)
)
sample_treatment_count_df.to_csv(
pathlib.Path("results/sample_summary_profile_counts.tsv"), sep="\t", index=True
)
sample_treatment_count_df
plot_ready_df = (
sample_treatment_count_df
.reset_index()
.melt(
id_vars="Metadata_clone",
value_vars=sample_count_df.Metadata_treatment.unique(),
value_name="profile_count"
)
)
clone_order = (
plot_ready_df
.groupby("Metadata_clone")
.sum()
.reset_index()
.sort_values(by="profile_count")
.Metadata_clone
)
plot_ready_df.Metadata_clone = pd.Categorical(
plot_ready_df.Metadata_clone,
categories=clone_order
)
plot_ready_df.head()
total_count = plot_ready_df.profile_count.sum()
total_label = "Total Profile Count: {}".format(total_count)
treatment_count_gg = (
gg.ggplot(plot_ready_df, gg.aes(y="profile_count", x="Metadata_clone")) +
gg.geom_bar(gg.aes(fill="Metadata_treatment"), position="stack", stat="identity") +
gg.coord_flip() +
gg.theme_bw() +
gg.theme(axis_text_y=gg.element_text(size=5)) +
gg.ylab("Profile Count") +
gg.xlab("Clone") +
gg.ggtitle(total_label)
)
output_figure = pathlib.Path("figures", "treatment_count.png")
treatment_count_gg.save(output_figure, height=4, width=5.5, dpi=400, verbose=False)
treatment_count_gg
# How many unique clones
len(sample_treatment_count_df.index.unique())
all_profile_counts = []
for key, value in batch_data.items():
all_profile_counts.append(batch_data[key]["batch_count"])
profile_counts_df = pd.concat(all_profile_counts, axis="rows").reset_index(drop=True)
profile_counts_df
all_treatment_counts = []
for key, value in batch_data.items():
all_treatment_counts.append(batch_data[key]["treatment_count"])
treatment_counts_df = pd.concat(all_treatment_counts, axis="rows", sort=True).reset_index(drop=True)
treatment_counts_df.head()
clone_counts_df = (
treatment_counts_df
.groupby(["Metadata_clone", "Metadata_treatment"])
["profile_count"]
.sum()
.reset_index()
.sort_values(by=["Metadata_clone", "Metadata_treatment"])
)
output_file = pathlib.Path("tables/clone_counts_bortezomib.csv")
clone_counts_df.to_csv(output_file, sep=',', index=False)
clone_counts_df
###Output
_____no_output_____
###Markdown
Visualize Counts
###Code
total_count = profile_counts_df.profile_count.sum()
total_label = "Total Profile Count: {}".format(total_count)
profile_counts_df.Metadata_Plate = profile_counts_df.Metadata_Plate.astype(str)
batch_count_gg = (
gg.ggplot(profile_counts_df, gg.aes(y="profile_count", x="batch")) +
gg.geom_bar(gg.aes(fill="Metadata_Plate"), stat="identity") +
gg.coord_flip() +
gg.theme_bw() +
gg.ylab("Profile Count") +
gg.xlab("Batch") +
gg.ggtitle(total_label)
)
output_figure = pathlib.Path("figures/batch_count.png")
batch_count_gg.save(output_figure, height=4, width=6.5, dpi=400, verbose=False)
batch_count_gg
###Output
_____no_output_____
###Markdown
Output Metadata Counts for Each BatchFor quick description
###Code
suspect_batches = [
"2019_06_25_Batch3", # Too confluent, not even DMSO control
"2019_11_11_Batch4", # Too confluent
"2019_11_19_Batch5", # Too confluent
]
non_suspect_counts = treatment_counts_df.loc[~treatment_counts_df.batch.isin(suspect_batches), :]
treatment_counts_df.Metadata_clone = pd.Categorical(
treatment_counts_df.Metadata_clone,
categories=clone_order
)
total_count = non_suspect_counts.profile_count.sum()
total_label = "Total Usable Profile Count: {}".format(total_count)
treatment_count_by_batch_gg = (
gg.ggplot(treatment_counts_df, gg.aes(y="profile_count", x="Metadata_clone")) +
gg.geom_bar(gg.aes(fill="Metadata_treatment"), position="stack", stat="identity") +
gg.coord_flip() +
gg.facet_wrap("~batch") +
gg.theme_bw() +
gg.theme(
axis_text_y=gg.element_text(size=3.5),
axis_text_x=gg.element_text(size=6),
strip_text=gg.element_text(size=6, color="black"),
strip_background=gg.element_rect(colour="black", fill="#fdfff4")
) +
gg.ylab("Profile Count") +
gg.xlab("Clones") +
gg.ggtitle(total_label)
)
output_figure = pathlib.Path("figures/treatment_count_by_batch.png")
treatment_count_by_batch_gg.save(output_figure, height=8, width=5.5, dpi=400, verbose=False)
treatment_count_by_batch_gg
batch1_40x_df = treatment_counts_df.query("batch == '2019_02_15_Batch1_40X'").dropna(axis="columns")
batch1_40x_df
batch1_20x_df = treatment_counts_df.query("batch == '2019_02_15_Batch1_20X'").dropna(axis="columns")
batch1_20x_df
batch2_df = treatment_counts_df.query("batch == '2019_03_20_Batch2'").dropna(axis="columns")
batch2_df
batch3_df = treatment_counts_df.query("batch == '2019_06_25_Batch3'").dropna(axis="columns")
batch3_df
batch4_df = treatment_counts_df.query("batch == '2019_11_11_Batch4'").dropna(axis="columns")
batch4_df
batch5_df = treatment_counts_df.query("batch == '2019_11_19_Batch5'").dropna(axis="columns")
batch5_df
batch6_df = treatment_counts_df.query("batch == '2019_11_20_Batch6'").dropna(axis="columns")
batch6_df
batch7_df = treatment_counts_df.query("batch == '2019_11_22_Batch7'").dropna(axis="columns")
batch7_df
batch8_df = treatment_counts_df.query("batch == '2020_07_02_Batch8'").dropna(axis="columns")
batch8_df
batch9_df = treatment_counts_df.query("batch == '2020_08_24_Batch9'").dropna(axis="columns")
batch9_df
batch10_df = treatment_counts_df.query("batch == '2020_09_08_Batch10'").dropna(axis="columns")
batch10_df
batch11_df = treatment_counts_df.query("batch == '2021_02_08_Batch11'").dropna(axis="columns")
batch11_df
batch12_df = treatment_counts_df.query("batch == '2021_03_03_Batch12'").dropna(axis="columns")
batch12_df
batch13_df = treatment_counts_df.query("batch == '2021_03_03_Batch13'").dropna(axis="columns")
batch13_df
batch14_df = treatment_counts_df.query("batch == '2021_03_03_Batch14'").dropna(axis="columns")
batch14_df
batch15_df = treatment_counts_df.query("batch == '2021_03_03_Batch15'").dropna(axis="columns")
batch15_df
batch16_df = treatment_counts_df.query("batch == '2021_03_05_Batch16'").dropna(axis="columns")
batch16_df
batch17_df = treatment_counts_df.query("batch == '2021_03_05_Batch17'").dropna(axis="columns")
batch17_df
batch18_df = treatment_counts_df.query("batch == '2021_03_12_Batch18'").dropna(axis="columns")
batch18_df
batch19_df = treatment_counts_df.query("batch == '2021_03_12_Batch19'").dropna(axis="columns")
batch19_df
batch20_df = treatment_counts_df.query("batch == '2021_06_25_Batch20'").dropna(axis="columns")
batch20_df
batch21_df = treatment_counts_df.query("batch == '2021_06_25_Batch21'").dropna(axis="columns")
batch21_df
batch22_df = treatment_counts_df.query("batch == '2021_07_21_Batch22'").dropna(axis="columns")
batch22_df
batch23_df = treatment_counts_df.query("batch == '2021_07_21_Batch23'").dropna(axis="columns")
batch23_df
batch24_df = treatment_counts_df.query("batch == '2021_08_02_Batch24'").dropna(axis="columns")
batch24_df
batch25_df = treatment_counts_df.query("batch == '2021_08_02_Batch25'").dropna(axis="columns")
batch25_df
batch26_df = treatment_counts_df.query("batch == '2021_08_03_Batch26'").dropna(axis="columns")
batch26_df
batch27_df = treatment_counts_df.query("batch == '2021_08_03_Batch27'").dropna(axis="columns")
batch27_df
###Output
_____no_output_____
###Markdown
Describing Data by Batch
###Code
import os
import numpy as np
import pandas as pd
import plotnine as gg
from pycytominer.cyto_utils import infer_cp_features
from scripts.processing_utils import load_data
def get_count_per_batch(df, batch_name):
result = (
df
.Metadata_Plate
.value_counts()
.reset_index()
.rename({
"index": "Metadata_Plate",
"Metadata_Plate": "profile_count"
}, axis="columns")
.assign(batch=batch_name)
)
return result
def count_treatments_per_plate(df, batch_name):
if batch_name in ["2019_02_15_Batch1_20X", "2019_02_15_Batch1_40X", "2019_03_20_Batch2"]:
group_cols = ["Metadata_CellLine", "Metadata_Dosage", "Metadata_Plate"]
elif batch_name in ["2019_06_25_Batch3"]:
group_cols = ["Metadata_clone_number", "Metadata_Plate"]
else:
group_cols = ["Metadata_clone_number", "Metadata_treatment", "Metadata_Plate"]
result = (
df
.groupby(group_cols)
["Metadata_Well"]
.count()
.reset_index()
.rename({
"Metadata_Well": "profile_count",
group_cols[0]: "Metadata_clone"
}, axis="columns")
.assign(batch=batch_name)
)
if batch_name not in ["2019_06_25_Batch3"]:
result = (
result.rename({
group_cols[1]: "Metadata_treatment"
}, axis="columns")
)
return result
def process_counts(batch_name, profile_dir="profiles"):
df = load_data(batch_name, profile_dir, combine_dfs=True)
batch_count = get_count_per_batch(df, batch_name)
treatment_count = count_treatments_per_plate(df, batch_name)
return df, batch_count, treatment_count
profile_dir = os.path.join("..", "0.generate-profiles", "profiles")
batches = [x for x in os.listdir(profile_dir) if x != ".DS_Store"]
batches
batch_data = {}
all_clones = list()
profile_count_list = list()
for batch in batches:
print("Now processing... {}".format(batch))
df, batch_count, treatment_count = process_counts(batch, profile_dir=profile_dir)
batch_data[batch] = {
"dataframe": df,
"metafeatures": infer_cp_features(df, metadata=True),
"batch_count": batch_count,
"treatment_count": treatment_count
}
all_clones += treatment_count.Metadata_clone.unique().tolist()
profile_count_list.append(
treatment_count.loc[:, ["Metadata_clone", "Metadata_treatment", "profile_count"]]
)
sample_count_df = (
pd.DataFrame(
pd.concat(profile_count_list, axis="rows")
.fillna("DMSO")
.reset_index(drop=True)
.groupby(["Metadata_clone", "Metadata_treatment"])
["profile_count"]
.sum()
)
.sort_values("profile_count", ascending=False)
.reset_index()
)
sample_count_df
sample_treatment_count_df = (
sample_count_df
.pivot_table(values="profile_count", index="Metadata_clone", columns="Metadata_treatment")
.fillna(0)
.astype(int)
)
sample_treatment_count_df
len(set(all_clones))
all_profile_counts = []
for key, value in batch_data.items():
all_profile_counts.append(batch_data[key]["batch_count"])
profile_counts_df = pd.concat(all_profile_counts, axis="rows")
profile_counts_df
all_treatment_counts = []
for key, value in batch_data.items():
all_treatment_counts.append(batch_data[key]["treatment_count"])
treatment_counts_df = pd.concat(all_treatment_counts, axis="rows", sort=True)
treatment_counts_df.head()
clone_counts_df = (
treatment_counts_df
.groupby(["Metadata_clone", "Metadata_treatment"])
["profile_count"]
.sum()
.reset_index()
)
output_file = os.path.join("tables", "clone_counts_bortezomib.csv")
clone_counts_df.to_csv(output_file, sep=',', index=False)
clone_counts_df
###Output
_____no_output_____
###Markdown
Visualize Counts
###Code
total_count = profile_counts_df.profile_count.sum()
total_label = "Total Profile Count: {}".format(total_count)
batch_count_gg = (
gg.ggplot(profile_counts_df, gg.aes(y="profile_count", x="batch")) +
gg.geom_bar(gg.aes(fill="Metadata_Plate"), stat="identity") +
gg.coord_flip() +
gg.theme_bw() +
gg.ylab("Profile Count") +
gg.xlab("Batch") +
gg.ggtitle(total_label)
)
output_figure = os.path.join("figures", "batch_count.png")
batch_count_gg.save(output_figure, height=4, width=5.5, dpi=400, verbose=False)
batch_count_gg
###Output
_____no_output_____
###Markdown
Output Metadata Counts for Each BatchFor quick description
###Code
batch1_40x_df = treatment_counts_df.query("batch == '2019_02_15_Batch1_40X'").dropna(axis="columns")
batch1_40x_df
batch1_20x_df = treatment_counts_df.query("batch == '2019_02_15_Batch1_20X'").dropna(axis="columns")
batch1_20x_df
batch2_df = treatment_counts_df.query("batch == '2019_03_20_Batch2'").dropna(axis="columns")
batch2_df
batch3_df = treatment_counts_df.query("batch == '2019_06_25_Batch3'").dropna(axis="columns")
batch3_df
batch4_df = treatment_counts_df.query("batch == '2019_11_11_Batch4'").dropna(axis="columns")
batch4_df
batch5_df = treatment_counts_df.query("batch == '2019_11_19_Batch5'").dropna(axis="columns")
batch5_df
batch6_df = treatment_counts_df.query("batch == '2019_11_20_Batch6'").dropna(axis="columns")
batch6_df
batch7_df = treatment_counts_df.query("batch == '2019_11_22_Batch7'").dropna(axis="columns")
batch7_df
###Output
_____no_output_____
###Markdown
Describing Data by Batch
###Code
import os
import pathlib
import numpy as np
import pandas as pd
import plotnine as gg
from pycytominer.cyto_utils import infer_cp_features
from scripts.processing_utils import load_data
def get_count_per_batch(df, batch_name):
result = (
df
.Metadata_Plate
.value_counts()
.reset_index()
.rename({
"index": "Metadata_Plate",
"Metadata_Plate": "profile_count"
}, axis="columns")
.assign(batch=batch_name)
)
return result
def count_treatments_per_plate(df, batch_name):
group_cols = ["Metadata_clone_number", "Metadata_treatment", "Metadata_Plate"]
result = (
df
.groupby(group_cols)
["Metadata_Well"]
.count()
.reset_index()
.rename({
"Metadata_Well": "profile_count",
group_cols[0]: "Metadata_clone"
}, axis="columns")
.assign(batch=batch_name)
)
return result
def process_counts(batch_name, profile_dir="profiles"):
df = load_data(
batch=batch_name,
plates="all",
profile_dir=profile_dir,
suffix="normalized_feature_selected.csv.gz",
combine_dfs=True,
harmonize_cols=True,
add_cell_count=False,
)
batch_count = get_count_per_batch(df, batch_name)
treatment_count = count_treatments_per_plate(df, batch_name)
return df, batch_count, treatment_count
profile_dir = pathlib.Path("../0.generate-profiles/profiles")
batches = sorted([x for x in os.listdir(profile_dir) if x != ".DS_Store"])
batches
profile_dir
batch_data = {}
profile_count_list = list()
for batch in batches:
print("Now processing... {}".format(batch))
df, batch_count, treatment_count = process_counts(batch, profile_dir=profile_dir)
batch_data[batch] = {
"dataframe": df,
"metafeatures": infer_cp_features(df, metadata=True),
"batch_count": batch_count,
"treatment_count": treatment_count
}
profile_count_list.append(
treatment_count.loc[:, ["Metadata_clone", "Metadata_treatment", "profile_count"]]
)
sample_count_df = (
pd.DataFrame(
pd.concat(profile_count_list, axis="rows")
.fillna("DMSO")
.reset_index(drop=True)
.groupby(["Metadata_clone", "Metadata_treatment"])
["profile_count"]
.sum()
)
.sort_values("profile_count", ascending=False)
.reset_index()
)
sample_treatment_count_df = (
sample_count_df
.pivot_table(
values="profile_count",
index="Metadata_clone",
columns="Metadata_treatment",
aggfunc=lambda x: x.sum()
)
.fillna(0)
.astype(int)
)
sample_treatment_count_df.to_csv(
pathlib.Path("results/sample_summary_profile_counts.tsv"), sep="\t", index=True
)
sample_treatment_count_df
plot_ready_df = (
sample_treatment_count_df
.reset_index()
.melt(
id_vars="Metadata_clone",
value_vars=sample_count_df.Metadata_treatment.unique(),
value_name="profile_count"
)
)
clone_order = (
plot_ready_df
.groupby("Metadata_clone")
.sum()
.reset_index()
.sort_values(by="profile_count")
.Metadata_clone
)
plot_ready_df.Metadata_clone = pd.Categorical(
plot_ready_df.Metadata_clone,
categories=clone_order
)
plot_ready_df.head()
total_count = plot_ready_df.profile_count.sum()
total_label = "Total Profile Count: {}".format(total_count)
treatment_count_gg = (
gg.ggplot(plot_ready_df, gg.aes(y="profile_count", x="Metadata_clone")) +
gg.geom_bar(gg.aes(fill="Metadata_treatment"), position="stack", stat="identity") +
gg.coord_flip() +
gg.theme_bw() +
gg.theme(axis_text_y=gg.element_text(size=5)) +
gg.ylab("Profile Count") +
gg.xlab("Clone") +
gg.ggtitle(total_label)
)
output_figure = pathlib.Path("figures", "treatment_count.png")
treatment_count_gg.save(output_figure, height=4, width=5.5, dpi=400, verbose=False)
treatment_count_gg
# How many unique clones
len(sample_treatment_count_df.index.unique())
all_profile_counts = []
for key, value in batch_data.items():
all_profile_counts.append(batch_data[key]["batch_count"])
profile_counts_df = pd.concat(all_profile_counts, axis="rows").reset_index(drop=True)
profile_counts_df
all_treatment_counts = []
for key, value in batch_data.items():
all_treatment_counts.append(batch_data[key]["treatment_count"])
treatment_counts_df = pd.concat(all_treatment_counts, axis="rows", sort=True).reset_index(drop=True)
treatment_counts_df.head()
clone_counts_df = (
treatment_counts_df
.groupby(["Metadata_clone", "Metadata_treatment"])
["profile_count"]
.sum()
.reset_index()
.sort_values(by=["Metadata_clone", "Metadata_treatment"])
)
output_file = pathlib.Path("tables/clone_counts_bortezomib.csv")
clone_counts_df.to_csv(output_file, sep=',', index=False)
clone_counts_df
###Output
_____no_output_____
###Markdown
Visualize Counts
###Code
total_count = profile_counts_df.profile_count.sum()
total_label = "Total Profile Count: {}".format(total_count)
profile_counts_df.Metadata_Plate = profile_counts_df.Metadata_Plate.astype(str)
batch_count_gg = (
gg.ggplot(profile_counts_df, gg.aes(y="profile_count", x="batch")) +
gg.geom_bar(gg.aes(fill="Metadata_Plate"), stat="identity") +
gg.coord_flip() +
gg.theme_bw() +
gg.ylab("Profile Count") +
gg.xlab("Batch") +
gg.ggtitle(total_label)
)
output_figure = pathlib.Path("figures/batch_count.png")
batch_count_gg.save(output_figure, height=4, width=6.5, dpi=400, verbose=False)
batch_count_gg
###Output
_____no_output_____
###Markdown
Output Metadata Counts for Each BatchFor quick description
###Code
suspect_batches = [
"2019_06_25_Batch3", # Too confluent, not even DMSO control
"2019_11_11_Batch4", # Too confluent
"2019_11_19_Batch5", # Too confluent
]
non_suspect_counts = treatment_counts_df.loc[~treatment_counts_df.batch.isin(suspect_batches), :]
treatment_counts_df.Metadata_clone = pd.Categorical(
treatment_counts_df.Metadata_clone,
categories=clone_order
)
total_count = non_suspect_counts.profile_count.sum()
total_label = "Total Usable Profile Count: {}".format(total_count)
treatment_count_by_batch_gg = (
gg.ggplot(treatment_counts_df, gg.aes(y="profile_count", x="Metadata_clone")) +
gg.geom_bar(gg.aes(fill="Metadata_treatment"), position="stack", stat="identity") +
gg.coord_flip() +
gg.facet_wrap("~batch") +
gg.theme_bw() +
gg.theme(
axis_text_y=gg.element_text(size=3.5),
axis_text_x=gg.element_text(size=6),
strip_text=gg.element_text(size=6, color="black"),
strip_background=gg.element_rect(colour="black", fill="#fdfff4")
) +
gg.ylab("Profile Count") +
gg.xlab("Clones") +
gg.ggtitle(total_label)
)
output_figure = pathlib.Path("figures/treatment_count_by_batch.png")
treatment_count_by_batch_gg.save(output_figure, height=8, width=5.5, dpi=400, verbose=False)
treatment_count_by_batch_gg
batch1_40x_df = treatment_counts_df.query("batch == '2019_02_15_Batch1_40X'").dropna(axis="columns")
batch1_40x_df
batch1_20x_df = treatment_counts_df.query("batch == '2019_02_15_Batch1_20X'").dropna(axis="columns")
batch1_20x_df
batch2_df = treatment_counts_df.query("batch == '2019_03_20_Batch2'").dropna(axis="columns")
batch2_df
batch3_df = treatment_counts_df.query("batch == '2019_06_25_Batch3'").dropna(axis="columns")
batch3_df
batch4_df = treatment_counts_df.query("batch == '2019_11_11_Batch4'").dropna(axis="columns")
batch4_df
batch5_df = treatment_counts_df.query("batch == '2019_11_19_Batch5'").dropna(axis="columns")
batch5_df
batch6_df = treatment_counts_df.query("batch == '2019_11_20_Batch6'").dropna(axis="columns")
batch6_df
batch7_df = treatment_counts_df.query("batch == '2019_11_22_Batch7'").dropna(axis="columns")
batch7_df
batch8_df = treatment_counts_df.query("batch == '2020_07_02_Batch8'").dropna(axis="columns")
batch8_df
batch9_df = treatment_counts_df.query("batch == '2020_08_24_Batch9'").dropna(axis="columns")
batch9_df
batch10_df = treatment_counts_df.query("batch == '2020_09_08_Batch10'").dropna(axis="columns")
batch10_df
batch11_df = treatment_counts_df.query("batch == '2021_02_08_Batch11'").dropna(axis="columns")
batch11_df
batch12_df = treatment_counts_df.query("batch == '2021_03_03_Batch12'").dropna(axis="columns")
batch12_df
batch13_df = treatment_counts_df.query("batch == '2021_03_03_Batch13'").dropna(axis="columns")
batch13_df
batch14_df = treatment_counts_df.query("batch == '2021_03_03_Batch14'").dropna(axis="columns")
batch14_df
batch15_df = treatment_counts_df.query("batch == '2021_03_03_Batch15'").dropna(axis="columns")
batch15_df
batch16_df = treatment_counts_df.query("batch == '2021_03_05_Batch16'").dropna(axis="columns")
batch16_df
batch17_df = treatment_counts_df.query("batch == '2021_03_05_Batch17'").dropna(axis="columns")
batch17_df
batch18_df = treatment_counts_df.query("batch == '2021_03_12_Batch18'").dropna(axis="columns")
batch18_df
batch19_df = treatment_counts_df.query("batch == '2021_03_12_Batch19'").dropna(axis="columns")
batch19_df
batch20_df = treatment_counts_df.query("batch == '2021_06_25_Batch20'").dropna(axis="columns")
batch20_df
batch21_df = treatment_counts_df.query("batch == '2021_06_25_Batch21'").dropna(axis="columns")
batch21_df
batch22_df = treatment_counts_df.query("batch == '2021_07_21_Batch22'").dropna(axis="columns")
batch22_df
batch23_df = treatment_counts_df.query("batch == '2021_07_21_Batch23'").dropna(axis="columns")
batch23_df
batch24_df = treatment_counts_df.query("batch == '2021_08_02_Batch24'").dropna(axis="columns")
batch24_df
batch25_df = treatment_counts_df.query("batch == '2021_08_02_Batch25'").dropna(axis="columns")
batch25_df
batch26_df = treatment_counts_df.query("batch == '2021_08_03_Batch26'").dropna(axis="columns")
batch26_df
batch27_df = treatment_counts_df.query("batch == '2021_08_03_Batch27'").dropna(axis="columns")
batch27_df
###Output
_____no_output_____ |
cada_vae/gating_cada_vae_model_training.ipynb | ###Markdown
Temperature Scaling
###Code
def temp_scale(seen_features, T):
return np.array([np.exp(i)/np.sum(np.exp(i)) for i in (seen_features + 1e-12)/T])
for f in [10]:
print(f)
for t in [4]:
print(t)
fin_val_acc = 0
fin_train_acc = 0
for run in range(1):
prob_unseen_zs = unseen_zs
# prob_unseen_zs = np.array([np.exp(i)/np.sum(np.exp(i)) for i in unseen_zs])
# prob_noun_unseen_zs = np.array([np.exp(i)/np.sum(np.exp(i)) for i in noun_unseen_zs])
# prob_verb_unseen_zs = np.array([np.exp(i)/np.sum(np.exp(i)) for i in verb_unseen_zs])
prob_unseen_train = temp_scale(unseen_train, t)
# prob_unseen_train = np.array([np.exp(i)/np.sum(np.exp(i)) for i in unseen_train])
# np.array([np.exp(i)/np.sum(np.exp(i)) for i in unseen_train])
prob_seen_zs = seen_zs
# prob_seen_zs = np.array([np.exp(i)/np.sum(np.exp(i)) for i in seen_zs])
# prob_noun_seen_zs = np.array([np.exp(i)/np.sum(np.exp(i)) for i in noun_seen_zs])
# prob_verb_seen_zs = np.array([np.exp(i)/np.sum(np.exp(i)) for i in verb_seen_zs])
prob_seen_train = temp_scale(seen_train, t)
# prob_seen_train = np.array([np.exp(i)/np.sum(np.exp(i)) for i in seen_train])
# np.array([np.exp(i)/np.sum(np.exp(i)) for i in seen_train])
feat_unseen_zs = np.sort(prob_unseen_zs, 1)[:,::-1][:,:f]
# feat_noun_unseen_zs = np.sort(prob_noun_unseen_zs, 1)[:,::-1]
# feat_verb_unseen_zs = np.sort(prob_verb_unseen_zs, 1)[:,::-1]
feat_unseen_train = np.sort(prob_unseen_train, 1)[:,::-1][:,:f]
feat_seen_zs = np.sort(prob_seen_zs, 1)[:,::-1][:,:f]
# feat_noun_seen_zs = np.sort(prob_noun_seen_zs, 1)[:,::-1]
# feat_verb_seen_zs = np.sort(prob_verb_seen_zs, 1)[:,::-1]
feat_seen_train = np.sort(prob_seen_train, 1)[:,::-1][:,:f]
val_unseen_inds = np.random.choice(np.arange(feat_unseen_train.shape[0]), 300, replace=False)
val_seen_inds = np.random.choice(np.arange(feat_seen_train.shape[0]), 400, replace=False)
train_unseen_inds = np.array(list(set(list(np.arange(feat_unseen_train.shape[0]))) - set(list(val_unseen_inds))))
train_seen_inds = np.array(list(set(list(np.arange(feat_seen_train.shape[0]))) - set(list(val_seen_inds))))
# mod_unseen_zs_feat = np.concatenate([feat_noun_unseen_zs, feat_verb_unseen_zs], 1)
# mod_seen_zs_feat = np.concatenate([feat_noun_seen_zs, feat_verb_seen_zs], 1)
gating_train_x = np.concatenate([np.concatenate([feat_unseen_zs[train_unseen_inds, :], feat_unseen_train[train_unseen_inds, :]], 1), np.concatenate([feat_seen_zs[train_seen_inds, :], feat_seen_train[train_seen_inds, :]], 1)], 0)
gating_train_y = [0]*len(train_unseen_inds) + [1]*len(train_seen_inds)
gating_val_x = np.concatenate([np.concatenate([feat_unseen_zs[val_unseen_inds, :], feat_unseen_train[val_unseen_inds, :]], 1), np.concatenate([feat_seen_zs[val_seen_inds, :], feat_seen_train[val_seen_inds, :]], 1)], 0)
gating_val_y = [0]*len(val_unseen_inds) + [1]*len(val_seen_inds)
train_inds = np.arange(gating_train_x.shape[0])
np.random.shuffle(train_inds)
# val_inds = np.arange(gating_val_x.shape[0])
# np.random.shuffle(val_inds)
model = LogisticRegression(random_state=0, C=1, solver='lbfgs', n_jobs=-1,
multi_class='multinomial', verbose=1, max_iter=5000,
).fit(gating_train_x[train_inds, :], np.array(gating_train_y)[train_inds])
prob = model.predict_proba(gating_val_x)
best = 0
bestT = 0
for t in range(25, 75, 1):
y = prob[:, 0] > t/100
acc = np.sum((1 - y) == gating_val_y)/len(gating_val_y)
# print(acc)
if acc > best:
best = acc
bestT = t/100
fin_val_acc += best
pred_train = model.predict(gating_train_x)
train_acc = np.sum(pred_train == gating_train_y)/len(gating_train_y)
fin_train_acc += train_acc
print('thresh', bestT)
print(fin_val_acc/1)
print(fin_train_acc/1)
import pickle as pkl
with open('/ssd_scratch/cvit/pranay.gupta/language_modelling/cada_vae_shift_10_r_latent_100/gating_model_t4_thresh0.62_seen.pkl', 'wb') as f:
pkl.dump(model, f)
prob = model.predict_proba(gating_val_x)
best = 0
bestT = 0
for t in range(25, 100, 1):
y = prob[:, 0] > t/100
acc = np.sum((1 - y) == gating_val_y)/len(gating_val_y)
print(acc)
if acc > best:
best = acc
bestT = t/100
bestT
best
###Output
_____no_output_____ |
Implementations/ShuffleNet/ShuffleNet_implementation.ipynb | ###Markdown
Implementation of ShuffleNetWe will use the [tensorflow.keras Functional API](https://www.tensorflow.org/guide/keras/functional) to build ShuffleNet from the original paper: โ[ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices](https://arxiv.org/abs/1707.01083)โ by Xiangyu Zhang, Xinyu Zhou, Mengxiao Lin, Jian Sun.---In the paper we can read:>**[i]** โThe first building block in each stage is applied with stride = 2. Other hyper-parameters within a stage stay the same, and for the next stage the output channels are doubledโ.>>**[ii]** โSimilar to [9], we set the number of bottleneck channels to 1/4 of the output channels for each ShuffleNet unit">>**[iii]** "we add a Batch Normalization layer [15] after each of the convolutions to make end-to-end training easier.">>**[iv]** "Note that for Stage 2, we do not apply group convolution on the first pointwise layer because the number of input channels is relatively small."We will also make use of the following Table **[v]**:as well the following Diagrams **[vi]**Figure 2. ShuffleNet Units. a) bottleneck unit [9] with depthwise convolution (DWConv) [3, 12]; b) ShuffleNet unit with pointwise group convolution (GConv) and channel shuffle; c) ShuffleNet unit with stride = 2.and **[vii]**Figure 1. Channel shuffle with two stacked group convolutions. GConv stands for group convolution. a) two stacked convolution layers with the same number of groups. Each output channel only relates to the input channels within the group. No cross talk; b) input and output channels are fully related when GConv2 takes data from different groups after GConv1; c) an equivalent implementation to b) using channel shuffle.--- Network architectureBased on **[v]** the model starts with a stem of Convolution-Max Pool and continues with a number of **Stages** before the final Global Pool-Fully Connected layers.Each **Stage** consists of two parts:1. One **Shufflenet block** with strides 22. a number of repeated **Shufflenet blocks** with strides 1Each one of the right most columns of **[v]** corresponds to a model architecture with different number of internal groups (g). In our case we are going to implement the "*g = 8*" model, however the code will be general enough to support any other combination of number of:- groups- stages- repetitions per stage Shufflenet blockThe Shufflenet block is the building block of this network. Similar to the ResNet block there are two variations of the block based on whether the spatial dimensions of the input tensor change (strides = 2) or not (strides = 1).In the first case we apply a 3x3 Average Pool with strides 2 at the shortcut connection as depicted at **[vi]**.The main branch of the block consists of:1. 1x1 **Group Convolution** with 1/4 filters (GConv) followed by Batch Normalization and ReLU2. **Channel Shuffle** operation (**[ii]**)3. 3x3 DepthWise Convolution (with or w/o strides=2) followed by Batch Normalizaion4. 1x1 **Group Convolution** followed by Batch NormalizaionThe tensors of the main branch and the shortcut connection are then concatenated and a ReLU activation is applied to the output. Group ConvolutionThe idea of *Group Convolution* is to separate the input tensor to g sub-tensors each one with $1/g$ distinct channels of the initial tesnsor. Then we apply a 1x1 Convolution to each sub-tensor and finally we concatenate all the sub-tensors together (**[vii]**). Channel ShuffleChannel shuffle is an operation of shuffling the channels of the input tensor as shown at **[7b,c]**.In order to shuffle the channels we1. reshape the input tensor:>from: `width x height x channels`>>to: `width x height x groups x (channels/groups)`2. prermute the last two dimensions3. reshape the tensor to the original shapeA simple example of the results of this operation can be seen at the following application of the operation on a 6-element array$$\begin{matrix} 1 & 2 & 3 & 4 & 5 & 6\end{matrix}$$1. reshape to $groups \times \frac{n}{groups} (groups=2)$$$\begin{matrix} 1 & 2 & 3 \\4 & 5 & 6\end{matrix}$$2. prermute the dimensions$$\begin{matrix} 1 & 4 \\2 & 5 \\3 & 6\end{matrix}$$3. reshape to the original shape$$\begin{matrix} 1 & 4 & 2 & 5 & 3 & 6\end{matrix}$$--- WorkflowWe will:1. import the neccesary layers2. write a helper function for the **Stage**3. write a helper function for the **Shufflenet block**4. write a helper function for the **Group Convolution**5. write a helper function for the **Channel Shuffle**6. write the stem of the model7. use the helper function to write the main part of the model8. write the last part of the model and build it--- 1. Imports
###Code
from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, \
Dense, Concatenate, Add, ReLU, BatchNormalization, AvgPool2D, \
MaxPool2D, GlobalAvgPool2D, Reshape, Permute, Lambda
###Output
_____no_output_____
###Markdown
2. StageThe Stage function will:- take as inputs: - a tensor (**`x`**) - the number of channels (also called filters) (**`channels`**) - the number of repetitions of the second part of the stage (**`repetitions`**) - the number of groups for the Group Convolution blocks (**`groups`**)- run: - apply a Shufflenet block with strides=2 - apply **`repetitions`** times a Shufflenet block with strides=1- return the tensor
###Code
def stage(x, channels, repetitions, groups):
x = shufflenet_block(x, channels=channels, strides=2, groups=groups)
for i in range(repetitions):
x = shufflenet_block(x, channels=channels, strides=1, groups=groups)
return x
###Output
_____no_output_____
###Markdown
3. Shufflenet blockThe Shufflenet block will:- take as inputs: - a tensor (**`tensor`**) - the number of channels (**`channels`**) - the strides (**`strides`**) - the number of groups for the Group Convolution blocks (**`groups`**)- run: - apply a Group Convolution block with 1/4 **`channels`** channels followed by *Batch Normalizaion-ReLU* - apply **`Channel Shuffle`** to this tensor - apply a *Depthwise Convolution* layer followed by *Batch Normalizaion* - if **`strides`** is 2: - subtract from **`channels`** the number of channels of **`tensor`** so that after the concatenation the output tensor will have **`channels`** channels - apply a Group Convolution block with **`channels`** channels followed by *Batch Normalizaion* - if **`strides`** is 1: - *add* this tensor with the input **`tensor`** - else: - apply a 3x3 *Average Pool* with strides 2 (**[vi]**) to the input **`tensor`** and *concatenate* it with this tensor - apply *ReLU* activation to the tensor- return the tensorNote that according to **[iv]** we should not apply Group Convolution to the first inupt (24 channels) and apply only the Convolution operation instead which we can code with a simple `if-else` statement. However, for the sake of clarity of the code we ommit it.
###Code
def shufflenet_block(tensor, channels, strides, groups):
x = gconv(tensor, channels=channels // 4, groups=groups)
x = BatchNormalization()(x)
x = ReLU()(x)
x = channel_shuffle(x, groups)
x = DepthwiseConv2D(kernel_size=3, strides=strides, padding='same')(x)
x = BatchNormalization()(x)
if strides == 2:
channels = channels - tensor.get_shape().as_list()[-1]
x = gconv(x, channels=channels, groups=groups)
x = BatchNormalization()(x)
if strides == 1:
x = Add()([tensor, x])
else:
avg = AvgPool2D(pool_size=3, strides=2, padding='same')(tensor)
x = Concatenate()([avg, x])
output = ReLU()(x)
return output
###Output
_____no_output_____
###Markdown
4. Group ConvolutionThe Group Convolution function will:- take as inputs: - a tensor (**`tensor`**) - the number of channels of the output tensor (**`channels`**) - the number of groups (**`groups`**)- run: - get the number of channels (**`input_ch`**) of the input tensor using the get_shape() method - calculate the number of channels per group (**`group_ch`**) by dividing **`input_ch`** by **`groups`** - calculate how many channels will have each group after the Convolution layer. It will be equal to **`channels`** divided by **`groups`** - for every group: - get the **`group_tensor`** which will be a sub-tensor of **`tensor`** with specific channels - apply a 1x1 Convolution layer with **`output_ch`** channels - add the tensor to a list (**`groups_list`**) - *Concatenate* all the tensors of **`groups_list`** to one tensor- return the tensorNote that there is a commented line in the code bellow. One can get a slice of a tensor by using the simple slicing notation `a[:, b:c, d:e]` but the code takes too long to run (as it is in the case of tensorflow.slice()). By using a Lambda layer and applying it on the tensor we have the same result but much faster.
###Code
def gconv(tensor, channels, groups):
input_ch = tensor.get_shape().as_list()[-1]
group_ch = input_ch // groups
output_ch = channels // groups
groups_list = []
for i in range(groups):
group_tensor = tensor[:, :, :, i * group_ch: (i+1) * group_ch]
# group_tensor = Lambda(lambda x: x[:, :, :, i * group_ch: (i+1) * group_ch])(tensor)
group_tensor = Conv2D(output_ch, 1)(group_tensor)
groups_list.append(group_tensor)
output = Concatenate()(groups_list)
return output
###Output
_____no_output_____
###Markdown
5. Channel ShuffleThe Channel Shuffle function will:- take as inputs: - a tensor (**`x`**) - the number of groups (**`groups`**)- run: - get the dimensions (**`width, height, channels`**) of the input tensor. Note that the first number of `x.get_shape().as_list()` will be the batch size. - calculate the number of channels per group (**`group_ch`**) - reshape **`x`** to **`width`** x **`height`** x **`group_ch`** x **`groups`** - permute the last two dimensions of the tensor (**`group_ch`** x **`groups`** -> **`groups`** x **`group_ch`**) - reshape **`x`** to its original shape (**`width`** x **`height`** x **`channels`**)- return the tensor
###Code
def channel_shuffle(x, groups):
_, width, height, channels = x.get_shape().as_list()
group_ch = channels // groups
x = Reshape([width, height, group_ch, groups])(x)
x = Permute([1, 2, 4, 3])(x)
x = Reshape([width, height, channels])(x)
return x
###Output
_____no_output_____
###Markdown
6. Stem of the modelNow we can start coding the model. We will start with the model's stem. According to **[v]** the first layer of the model is a 3x3 Convolution layer with 24 filters followed by (**[iii]**) a BatchNormalization and a ReLU activation.The next layer is a 3x3 Max Pool with strides 2.
###Code
input = Input([224, 224, 3])
x = Conv2D(filters=24, kernel_size=3, strides=2, padding='same')(input)
x = BatchNormalization()(x)
x = ReLU()(x)
x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)
###Output
_____no_output_____
###Markdown
7. Main part of the modelThe main part of the model consists of **`Stage`** blocks. We first define the hyperparameters **`repetitions`**, **`initial_channels`** acoording to **[v]** and **`groups`**. Then for each number of repetitions we calculate the number of channels according to **[i]** and apply the `stage()` function on the tensor.
###Code
repetitions = 3, 7, 3
initial_channels = 384
groups = 8
for i, reps in enumerate(repetitions):
channels = initial_channels * (2**i)
x = stage(x, channels, reps, groups)
###Output
_____no_output_____
###Markdown
8. Rest of the modelThe model closes with a Global Pool layer and a Fully Connected one with 1000 classes (**[v]**).
###Code
x = GlobalAvgPool2D()(x)
output = Dense(1000, activation='softmax')(x)
from tensorflow.keras import Model
model = Model(input, output)
from tensorflow.keras.utils import plot_model
plot_model(model, show_shapes=True)
###Output
_____no_output_____
###Markdown
Final code
###Code
from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, \
Dense, Concatenate, Add, ReLU, BatchNormalization, AvgPool2D, \
MaxPool2D, GlobalAvgPool2D, Reshape, Permute, Lambda
def stage(x, channels, repetitions, groups):
x = shufflenet_block(x, channels=channels, strides=2, groups=groups)
for i in range(repetitions):
x = shufflenet_block(x, channels=channels, strides=1, groups=groups)
return x
def shufflenet_block(tensor, channels, strides, groups):
x = gconv(tensor, channels=channels // 4, groups=groups)
x = BatchNormalization()(x)
x = ReLU()(x)
x = channel_shuffle(x, groups)
x = DepthwiseConv2D(kernel_size=3, strides=strides, padding='same')(x)
x = BatchNormalization()(x)
if strides == 2:
channels = channels - tensor.get_shape().as_list()[-1]
x = gconv(x, channels=channels, groups=groups)
x = BatchNormalization()(x)
if strides == 1:
x = Add()([tensor, x])
else:
avg = AvgPool2D(pool_size=3, strides=2, padding='same')(tensor)
x = Concatenate()([avg, x])
output = ReLU()(x)
return output
def gconv(tensor, channels, groups):
input_ch = tensor.get_shape().as_list()[-1]
group_ch = input_ch // groups
output_ch = channels // groups
groups_list = []
for i in range(groups):
# group_tensor = tensor[:, :, :, i * group_ch: (i+1) * group_ch]
group_tensor = Lambda(lambda x: x[:, :, :, i * group_ch: (i+1) * group_ch])(tensor)
group_tensor = Conv2D(output_ch, 1)(group_tensor)
groups_list.append(group_tensor)
output = Concatenate()(groups_list)
return output
def channel_shuffle(x, groups):
_, width, height, channels = x.get_shape().as_list()
group_ch = channels // groups
x = Reshape([width, height, group_ch, groups])(x)
x = Permute([1, 2, 4, 3])(x)
x = Reshape([width, height, channels])(x)
return x
input = Input([224, 224, 3])
x = Conv2D(filters=24, kernel_size=3, strides=2, padding='same')(input)
x = BatchNormalization()(x)
x = ReLU()(x)
x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)
repetitions = 3, 7, 3
initial_channels = 384
groups = 8
for i, reps in enumerate(repetitions):
channels = initial_channels * (2**i)
x = stage(x, channels, reps, groups)
x = GlobalAvgPool2D()(x)
output = Dense(1000, activation='softmax')(x)
from tensorflow.keras import Model
model = Model(input, output)
###Output
_____no_output_____ |
Analysis_software/Figures_2_S2/Figures_S2C.ipynb | ###Markdown
Born to slide: mobile origin licensing factors confer resistance to transcription conflicts Jupyter notebooks Figures:+ S2C Archives:+ Photobleaching_stalled_RNAP.yama Additional data:+ none Initialize ImageJ
###Code
import imagej
ij = imagej.init('/Applications/Fiji.app')
ij.getVersion()
###Output
_____no_output_____
###Markdown
Imports
###Code
import sys
sys.path.insert(0, '..')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import re
import awesome_data
from marspy.convert.archive import DnaMoleculeArchive, instantiate_archive, describe_archives
from marspy.stats import bootstrap, calc_ci
from tqdm.notebook import tqdm
from functools import reduce
###Output
marspy initialized.
###Markdown
Select archives
###Code
selected_archives = ['Photobleaching_stalled_RNAP.yama']
###Output
_____no_output_____
###Markdown
Figure style and output directory
###Code
from figure_style import *
set_style_paper()
dir_out = '/Volumes/pool-duderstadt/Matthias/Manuscript/Figures/SVGs/Figure2/'
dir_out
###Output
_____no_output_____
###Markdown
Instantiate selected archives
###Code
for archive in tqdm(selected_archives):
instantiate_archive(archive, awesome_data.datasets)
###Output
_____no_output_____
###Markdown
Archives Overview
###Code
describe_archives(DnaMoleculeArchive.instances)
###Output
_____no_output_____
###Markdown
Load segment tables, apply filter & calculate bleaching steps
###Code
for index,archive in enumerate(tqdm(DnaMoleculeArchive.instances)):
archive.add_segments_tables()
for i in range(5):
archive.filter_segments()
archive.calc_bleaching_steps()
###Output
_____no_output_____
###Markdown
Generate main df
###Code
def setup_pandas_df(archive_instances):
'''Sets up a 2-level df with all relevant information'''
#indices will be UIDs
#outside: set comprehension to cover all possible prefixes in all instances
col_1 = sorted(list({prefix for prefix in archive.prefixes for archive in archive_instances}))
#inside
col_2 = 'position_on_dna bleaching_steps initial_intensity lifetime'.split()
hier_index = pd.MultiIndex.from_product([col_1,col_2],names=['molecule','properties'])
return pd.DataFrame(columns=hier_index)
df = setup_pandas_df(DnaMoleculeArchive.instances)
# sort MultiIndexCols for performance
df.sort_index(axis=1, inplace=True)
df.head()
# fill dataframe
for archive in tqdm(DnaMoleculeArchive.instances):
for molecule in archive.molecules:
#general columns first
#this could be generalized with mol prefixes
df.loc[molecule.uid,'number_t7'] = molecule.params['Number_T7']
df.loc[molecule.uid,'tags'] = reduce(lambda tag1,tag2: tag1+','+tag2, molecule.tags)
df.loc[molecule.uid,'nucleotide'] = archive.nucleotide
df.loc[molecule.uid,'dna_length'] = molecule.calc_length_dna()
for prefix in molecule.prefixes:
#mean position
df.loc[molecule.uid,(prefix,'position_on_dna')] = molecule.df[prefix+'Position_on_DNA'].mean()
#sometimes bleaching is rejected (reject_bleach_prefix)
if not 'reject_bleach_'+prefix in molecule.tags:
#from attribute
df.loc[molecule.uid,(prefix,'bleaching_steps')] = molecule.bleaching_steps[prefix]
#take intensity from corrected segment table (intensity in first segment)
df.loc[molecule.uid,(prefix,'initial_intensity')] = \
list(filter(lambda seg_df: seg_df.prefix == prefix, molecule.seg_dfs))[0].df.loc[0,'y1']
#lifetime (in frames) defined on tracking length
df.loc[molecule.uid,(prefix,'lifetime')] = len(molecule.df[prefix+'y'].dropna())
# convert tags back to list
df['tags'] = df['tags'].apply(lambda tags: tags.split(','))
# again sort MultiIndexCols for performance
df.sort_index(axis=1, inplace=True)
# fix data types
df = df.infer_objects()
df.head()
###Output
_____no_output_____
###Markdown
Figures Figure S2C: T7 RNAP bleaching steps
###Code
# estimate standard error by bootstrapping
_plot_df = pd.DataFrame(columns=['bleaching_steps'])
protein = 'T7'
_temp_df = pd.DataFrame(data=(pd.melt(df.filter(regex=(protein+"_"),axis=1)
.xs('bleaching_steps',level='properties',axis=1))
['value'].dropna().value_counts(normalize=True).rename('probability').
reset_index().sort_values('index')))
_temp_df.columns = ['bleaching_steps','probability']
_plot_df = pd.concat([_plot_df,_temp_df])
_plot_df = _plot_df.convert_dtypes()
_plot_df.reset_index(drop=True,inplace=True)
_plot_df
for row in _plot_df.index:
bootstrap_data = pd.DataFrame(data=(pd.melt(df.filter(regex=(protein+"_"),axis=1)
.xs('bleaching_steps',level='properties',axis=1))['value'].dropna()))
bootstrap_data['value'] = bootstrap_data['value'].apply(lambda value: 1 if value == _plot_df.loc[row,'bleaching_steps'] else 0)
bootstrap_means = bootstrap(bootstrap_data['value'],n_boot=10000)
_plot_df.loc[row,'bs_mean'] = np.mean(bootstrap_means)
_plot_df.loc[row,'standard_error'] = np.std(bootstrap_means)
_plot_df.loc[row,'68_ci_lower'], _plot_df.loc[row,'68_ci_upper'] = calc_ci(bootstrap_means,ci=68)
_plot_df.loc[row,'95_ci_lower'], _plot_df.loc[row,'95_ci_upper'] = calc_ci(bootstrap_means,ci=95)
_plot_df
fig,axes = plt.subplots()
protein = 'T7'
sns.barplot(x='bleaching_steps',y='probability',data=_plot_df, lw=0.5, edgecolor='.15',
palette=palettes[protein][1:], ax=axes)
axes.errorbar(x=_plot_df.index,y='probability',data=_plot_df, yerr='standard_error',fmt='none',
ecolor='.15', elinewidth=0.5, capsize=2, capthick=0.5, capstyle='round')
axes.set_xlim(-0.5,8)
axes.set_ylim(0,1)
axes.set_xlabel('RNAP bleaching steps')
axes.set_ylabel('Probability')
axes.text(0.05, 0.95, f"n = {len(pd.melt(df.filter(regex=(protein+'_'),axis=1).xs('bleaching_steps',level='properties',axis=1))['value'].dropna())}",
verticalalignment='top', horizontalalignment='left',
transform=axes.transAxes)
#fig.tight_layout()
sns.despine()
# save
plt.savefig(dir_out+'S2C_GAC_loaded_T7_bleaching.svg',transparent=True)
###Output
_____no_output_____
###Markdown
Born to slide: mobile origin licensing factors confer resistance to conflicts with RNA polymerase Jupyter notebooks Figures:+ S2C Archives:+ Photobleaching_stalled_RNAP.yama Additional data:+ none Initialize ImageJ
###Code
import imagej
ij = imagej.init('/Applications/Fiji.app')
ij.getVersion()
###Output
_____no_output_____
###Markdown
Imports
###Code
import sys
sys.path.insert(0, '..')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import re
import awesome_data
from marspy.convert.archive import DnaMoleculeArchive, instantiate_archive, describe_archives
from marspy.stats import bootstrap, calc_ci
from tqdm.notebook import tqdm
from functools import reduce
###Output
marspy initialized.
###Markdown
Select archives
###Code
selected_archives = ['Photobleaching_stalled_RNAP.yama']
###Output
_____no_output_____
###Markdown
Figure style and output directory
###Code
from figure_style import *
set_style_paper()
dir_out = '/Volumes/pool-duderstadt/Matthias/Manuscript/Figures/SVGs/Figure2/'
dir_out
###Output
_____no_output_____
###Markdown
Instantiate selected archives
###Code
for archive in tqdm(selected_archives):
instantiate_archive(archive, awesome_data.datasets)
###Output
_____no_output_____
###Markdown
Archives Overview
###Code
describe_archives(DnaMoleculeArchive.instances)
###Output
_____no_output_____
###Markdown
Load segment tables, apply filter & calculate bleaching steps
###Code
for index,archive in enumerate(tqdm(DnaMoleculeArchive.instances)):
archive.add_segments_tables()
for i in range(5):
archive.filter_segments()
archive.calc_bleaching_steps()
###Output
_____no_output_____
###Markdown
Generate main df
###Code
def setup_pandas_df(archive_instances):
'''Sets up a 2-level df with all relevant information'''
#indices will be UIDs
#outside: set comprehension to cover all possible prefixes in all instances
col_1 = sorted(list({prefix for prefix in archive.prefixes for archive in archive_instances}))
#inside
col_2 = 'position_on_dna bleaching_steps initial_intensity lifetime'.split()
hier_index = pd.MultiIndex.from_product([col_1,col_2],names=['molecule','properties'])
return pd.DataFrame(columns=hier_index)
df = setup_pandas_df(DnaMoleculeArchive.instances)
# sort MultiIndexCols for performance
df.sort_index(axis=1, inplace=True)
df.head()
# fill dataframe
for archive in tqdm(DnaMoleculeArchive.instances):
for molecule in archive.molecules:
#general columns first
#this could be generalized with mol prefixes
df.loc[molecule.uid,'number_t7'] = molecule.params['Number_T7']
df.loc[molecule.uid,'tags'] = reduce(lambda tag1,tag2: tag1+','+tag2, molecule.tags)
df.loc[molecule.uid,'nucleotide'] = archive.nucleotide
df.loc[molecule.uid,'dna_length'] = molecule.calc_length_dna()
for prefix in molecule.prefixes:
#mean position
df.loc[molecule.uid,(prefix,'position_on_dna')] = molecule.df[prefix+'Position_on_DNA'].mean()
#sometimes bleaching is rejected (reject_bleach_prefix)
if not 'reject_bleach_'+prefix in molecule.tags:
#from attribute
df.loc[molecule.uid,(prefix,'bleaching_steps')] = molecule.bleaching_steps[prefix]
#take intensity from corrected segment table (intensity in first segment)
df.loc[molecule.uid,(prefix,'initial_intensity')] = \
list(filter(lambda seg_df: seg_df.prefix == prefix, molecule.seg_dfs))[0].df.loc[0,'y1']
#lifetime (in frames) defined on tracking length
df.loc[molecule.uid,(prefix,'lifetime')] = len(molecule.df[prefix+'y'].dropna())
# convert tags back to list
df['tags'] = df['tags'].apply(lambda tags: tags.split(','))
# again sort MultiIndexCols for performance
df.sort_index(axis=1, inplace=True)
# fix data types
df = df.infer_objects()
df.head()
###Output
_____no_output_____
###Markdown
Figures Figure S2C: T7 RNAP bleaching steps
###Code
# estimate standard error by bootstrapping
_plot_df = pd.DataFrame(columns=['bleaching_steps'])
protein = 'T7'
_temp_df = pd.DataFrame(data=(pd.melt(df.filter(regex=(protein+"_"),axis=1)
.xs('bleaching_steps',level='properties',axis=1))
['value'].dropna().value_counts(normalize=True).rename('probability').
reset_index().sort_values('index')))
_temp_df.columns = ['bleaching_steps','probability']
_plot_df = pd.concat([_plot_df,_temp_df])
_plot_df = _plot_df.convert_dtypes()
_plot_df.reset_index(drop=True,inplace=True)
_plot_df
for row in _plot_df.index:
bootstrap_data = pd.DataFrame(data=(pd.melt(df.filter(regex=(protein+"_"),axis=1)
.xs('bleaching_steps',level='properties',axis=1))['value'].dropna()))
bootstrap_data['value'] = bootstrap_data['value'].apply(lambda value: 1 if value == _plot_df.loc[row,'bleaching_steps'] else 0)
bootstrap_means = bootstrap(bootstrap_data['value'],n_boot=10000)
_plot_df.loc[row,'bs_mean'] = np.mean(bootstrap_means)
_plot_df.loc[row,'standard_error'] = np.std(bootstrap_means)
_plot_df.loc[row,'68_ci_lower'], _plot_df.loc[row,'68_ci_upper'] = calc_ci(bootstrap_means,ci=68)
_plot_df.loc[row,'95_ci_lower'], _plot_df.loc[row,'95_ci_upper'] = calc_ci(bootstrap_means,ci=95)
_plot_df
fig,axes = plt.subplots()
protein = 'T7'
sns.barplot(x='bleaching_steps',y='probability',data=_plot_df, lw=0.5, edgecolor='.15',
palette=palettes[protein][::2], ax=axes)
axes.errorbar(x=_plot_df.index,y='probability',data=_plot_df, yerr='standard_error',fmt='none',
ecolor='.15', elinewidth=0.5, capsize=2, capthick=0.5, capstyle='round')
axes.set_xlim(-0.5,8)
axes.set_ylim(0,1)
axes.set_xlabel('RNAP bleaching steps')
axes.set_ylabel('Probability')
axes.text(0.05, 0.95, f"n = {len(pd.melt(df.filter(regex=(protein+'_'),axis=1).xs('bleaching_steps',level='properties',axis=1))['value'].dropna())}",
verticalalignment='top', horizontalalignment='left',
transform=axes.transAxes)
#fig.tight_layout()
sns.despine()
# save
plt.savefig(dir_out+'S2C_GAC_loaded_T7_bleaching.svg',transparent=True)
###Output
_____no_output_____ |
notebooks/tensorflow_lattice/00_test_install.ipynb | ###Markdown
Use shift+enter to run each cell
###Code
import tensorflow as tf
import tensorflow_lattice as tfl
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Check TensorFlow version. We expect 1.3.0.
###Code
print("You have version %s" % tf.__version__)
# Reset the default graph to clean up things.
tf.reset_default_graph()
# placeholder is a way to feed a data from python to a computational graph.
# x is a 2d tensor with shape [?, 2].
#
# In TensorFlow, the first dimension is usually reserved for a batch size.
# Often we want to tune the batch size for SGD to maximize the througput during
# the training, batch_size is usually set to "None" to let TensorFlow know the
# size is unknown when we create the graph.
x = tf.placeholder(tf.float32, shape=(None, 2))
# Here we use lattice_layer to define a lattice model.
# lattice_layer expects 2d tensor [batch_size, input_dim] as an input.
# In this case, since x's shape is [?, 2], batch_size is unknown, but
# input_dim == 2.
# Here we set lattice_sizes = (2, 2) which means this lattice_layer defines
# 2 x 2 lattice.
# lattice_layer returns 4 elements:
# 1. output tensor
# 2. lookup param tensor
# 3. Projection operator
# 4. regularization loss (scalar tensor)
#
# We will cover other three components later, so let's focus on the output
# tensor.
# The output tensor is the output of this layer. Its shape is
# [batch_size, output_dim], where the default output_dim == 1.
# So in this case, y's shape is [?, 1].
(y, _, _, _) = tfl.lattice_layer(x, lattice_sizes=(2, 2))
# Run Session to get the value. Feel free to feed different values other than
# [[0.0, 0.0]].
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
result = sess.run(y, feed_dict={x: [[0.0, 0.0]]})
# We expect -0.5 as an output
assert (result[0][0] + 0.5) < 1e-7
print(result)
###Output
_____no_output_____
###Markdown
Check whether matplotlib is working
###Code
%matplotlib inline
# create some data using numpy. y = x * 0.1 + 0.3 + noise
x = np.random.rand(100).astype(np.float32)
noise = np.random.normal(scale=0.01, size=len(x))
y = x
# plot it
plt.plot(x, y, '.')
###Output
_____no_output_____
###Markdown
Use shift+enter to run each cell
###Code
import tensorflow as tf
import tensorflow_lattice as tfl
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Check TensorFlow version. We expect >= 1.4.0.
###Code
print("You have version %s" % tf.__version__)
# Reset the default graph to clean up things.
tf.reset_default_graph()
# placeholder is a way to feed a data from python to a computational graph.
# x is a 2d tensor with shape [?, 2].
#
# In TensorFlow, the first dimension is usually reserved for a batch size.
# Often we want to tune the batch size for SGD to maximize the througput during
# the training, batch_size is usually set to "None" to let TensorFlow know the
# size is unknown when we create the graph.
x = tf.placeholder(tf.float32, shape=(None, 2))
# Here we use lattice_layer to define a lattice model.
# lattice_layer expects 2d tensor [batch_size, input_dim] as an input.
# In this case, since x's shape is [?, 2], batch_size is unknown, but
# input_dim == 2.
# Here we set lattice_sizes = (2, 2) which means this lattice_layer defines
# 2 x 2 lattice.
# lattice_layer returns 4 elements:
# 1. output tensor
# 2. lookup param tensor
# 3. Projection operator
# 4. regularization loss (scalar tensor)
#
# We will cover other three components later, so let's focus on the output
# tensor.
# The output tensor is the output of this layer. Its shape is
# [batch_size, output_dim], where the default output_dim == 1.
# So in this case, y's shape is [?, 1].
(y, _, _, _) = tfl.lattice_layer(x, lattice_sizes=(2, 2))
# Run Session to get the value. Feel free to feed different values other than
# [[0.0, 0.0]].
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
result = sess.run(y, feed_dict={x: [[0.0, 0.0]]})
# We expect 0.0 as an output
assert (result[0][0]) < 1e-7
print(result)
###Output
_____no_output_____
###Markdown
Check whether matplotlib is working
###Code
%matplotlib inline
# create some data using numpy. y = x * 0.1 + 0.3 + noise
x = np.random.rand(100).astype(np.float32)
noise = np.random.normal(scale=0.01, size=len(x))
y = x
# plot it
plt.plot(x, y, '.')
###Output
_____no_output_____ |
EVC_p_alpha.ipynb | ###Markdown
EVC for p-alpha scattering, addressing non-local potential and Coulomb potential. Here we do tests of eigenvector continuation (EC) for proton-alpha scattering in the $S_{1/2}$ and $P_{3/2}$ channels based on a non-local potential from [Rev. Mod. Phys. 57, 923 (1985)](https://journals.aps.org/rmp/abstract/10.1103/RevModPhys.57.923). The codes are the same as those in the NN scattering notebook (`EVC_NN.ipynb`). The inputs are different here. Figures S3 and 5 from [arXiv:2007.03635](https://arxiv.org/abs/2007.03635), "Efficient emulators for scattering using eigenvector continuation" by R.J. Furnstahl, P.J. Millican, and Xilin Zhang are generated here. Table of contents1. [A code for testing the covex hull of basis points](convex_hull_code)1. [Set up the ouput directory](output_dir)1. [Code for generating results](main_code)1. [Proton-alpha scattering](Main_calculations) 1. [Set the global parameters and define the potential](global_para_setup) 1. [S-wave: test EC using a sample of test points in a 2-dim parameter space](test_EC_2dim) 1. [P-wave (3/2): test EC using a sample of test points in a 2-dim parameter space](test_EC_2dim_p3/2)1. [Collect the results and make plots for both channels (Figs. S3 and 5)](plots)
###Code
%load_ext autoreload
%autoreload 2
import numpy as np
from scipy.integrate import simps
from scipy.optimize import linprog
import matplotlib.pyplot as plt
from pyDOE import lhs # https://pythonhosted.org/pyDOE/
import pickle
# local imports
from Constants import *
from two_body_pot import two_body_pot, rmat_ini
from evc_two_body import EigenvectorContinuationScattering
###Output
_____no_output_____
###Markdown
A code for testing the convex hull of basis points. This checks within a parameter space whether a point is inside or outside the convex hull of a given set of basis points.
###Code
def in_hull(points, x):
'''points are basis points that define the convex hull,
while x is the point to be tested.
'''
n_points = len(points)
n_dim = len(x)
c = np.zeros(n_points)
A = np.r_[points.T,np.ones((1,n_points))]
b = np.r_[x, np.ones(1)]
lp = linprog(c, A_eq=A, b_eq=b)
return lp.success
###Output
_____no_output_____
###Markdown
Set up the ouput directory Set up a dictionary to save the calculation results. The pkl file will be saved in the output directory.
###Code
outdir='./Notebook_output/p_alpha/'
res_dict={}
###Output
_____no_output_____
###Markdown
Code for generating results As in the NN notebook (`EVC_NN.ipynb`):
###Code
def collect_results(outdir='./', channel='1S0', nuggetsize=1.e-10, num_basis_list=range(3,10),
basis_potpara_array_list=np.array([]), test_potpara_list=np.array([]),
local_pot_func= lambda r : 0, nonlocal_pot_func=lambda rvec:0 ):
test_pc_list=[ ]
num_test, dim_para=np.shape(test_potpara_list)
for i, test_potpara in enumerate(test_potpara_list):
if i%10==0: print(f'setting up test potential class list: {i, num_test }')
test_local_pot_func= lambda r : local_pot_func(r, test_potpara)
test_nonlocal_pot_func= lambda rvec : nonlocal_pot_func(rvec, test_potpara)
test_pc = two_body_pot(pot_func=test_nonlocal_pot_func, local_pot_func= test_local_pot_func,
mu=mu, hbar=hbarc, E_mesh=E_pts, r_c=r_c, r_max=r_max, r_in_mesh_pts=nr, r_out_mesh_pts=nr,
angL=angL, z_t=z_t, z_p= z_p)
test_pc_list.append(test_pc)
test_pc_list=np.array(test_pc_list)
for i, num_basis in enumerate(num_basis_list):
# num_basis is the number of potential-basis
print('looping over basis array: ' + f'{num_basis}')
output_file_prefix=outdir +channel + '_para-dim-' + f'{dim_para}' \
+ '_basis-' + f'{num_basis}'+'_full-sample-' + f'{num_test}' + '_'
# output-file prefix, which is used as key for the result dictionary.
basis_potpara_array = basis_potpara_array_list[i]
testinouthaull= np.apply_along_axis(lambda x : in_hull(basis_potpara_array, x), 1, test_potpara_list)
# testinouthuall: a boolen list of whether the test point is insdie or outside the convex haul of basis points
local_pot_func_array = [(lambda V0var:
(lambda r : local_pot_func(r, V0var ) ) )(V0val) for V0val in basis_potpara_array ]
nonlocal_pot_func_array = [(lambda V0var:
(lambda rvec : nonlocal_pot_func(rvec, V0var ) ) )(V0val) for V0val in basis_potpara_array ]
pc_array = [two_body_pot( pot_func=nonlocal_pot_func_array[ii], local_pot_func= local_pot_func, mu=mu, hbar=hbarc,
E_mesh=E_pts, r_c=r_c, r_max=r_max, r_in_mesh_pts=nr, r_out_mesh_pts=nr,
angL=angL, z_t=z_t, z_p= z_p) for ii, local_pot_func in enumerate(local_pot_func_array)]
tau_var_mesh_list= []
ere_var_mesh_list=[]
c_vec_mesh_list=[]
lag_mesh_list=[]
delta_tilde_U_condition_mesh_list=[]
test_pc_list_E_mesh_list=[]
test_pc_list_delta_mesh_list=[]
test_pc_list_tau_mesh_list=[]
test_pc_list_ere_mesh_list=[]
for j, test_potpara in enumerate(test_potpara_list):
test_local_pot_func= lambda r : local_pot_func(r, test_potpara)
test_nonlocal_pot_func= lambda rvec : nonlocal_pot_func(rvec, test_potpara)
my_evc = EigenvectorContinuationScattering(pc_array)
tau_var_mesh, ere_var_mesh, c_vec_mesh, lag_mesh, delta_tilde_U_condition_mesh = \
my_evc.find_EVC_scattering(new_pot_func=test_nonlocal_pot_func, new_local_pot_func=test_local_pot_func,
pinv=False, nugget=True, cond=nuggetsize)
tau_var_mesh_list.append(tau_var_mesh )
ere_var_mesh_list.append(ere_var_mesh)
c_vec_mesh_list.append(c_vec_mesh)
lag_mesh_list.append(lag_mesh)
delta_tilde_U_condition_mesh_list.append(delta_tilde_U_condition_mesh)
test_pc_list_E_mesh_list.append(test_pc_list[j].E_mesh)
test_pc_list_delta_mesh_list.append(test_pc_list[j].delta_mesh)
test_pc_list_tau_mesh_list.append(test_pc_list[j].tau_mesh)
test_pc_list_ere_mesh_list.append(test_pc_list[j].ere_mesh)
res_dict[output_file_prefix] = {"test_potpara_list":test_potpara_list, "basis_potpara_array":basis_potpara_array,
"test_pc_list_E_mesh_list": np.array(test_pc_list_E_mesh_list),
"test_pc_list_delta_mesh_list": np.array(test_pc_list_delta_mesh_list),
"test_pc_list_tau_mesh_list": np.array(test_pc_list_tau_mesh_list),
"test_pc_list_ere_mesh_list": np.array(test_pc_list_ere_mesh_list),
"testinouthaull": testinouthaull,
"tau_var_mesh_list": np.array(tau_var_mesh_list),
"ere_var_mesh_list": np.array(ere_var_mesh_list),
"c_vec_mesh_list ": np.array(c_vec_mesh_list) , "lag_mesh_list" : np.array(lag_mesh_list),
"delta_tilde_U_condition_mesh_list" : np.array(delta_tilde_U_condition_mesh_list) }
return test_pc_list
###Output
_____no_output_____
###Markdown
Proton-alpha scattering Set the global parameters and define the potential Note: for faster evaluation, choose `nr=50`, `r_c=12`. Explanations of variables can be found in the NN notebook (`EVC_NN.ipynb`).
###Code
mu = 4*M_N/5; hbar = hbarc; r_c = 12.; r_max = 20; z_p = 1; z_t = 2;
nr = 50;
E_pts = np.linspace(0.01, 30, 100)
def f(r, angL, beta):
return r**angL * np.exp(-beta*r)
def nonlocal_pot_func_p_alpha(rvec, angL, V0, beta):
r = rvec[0]
rp = rvec[1]
return V0 * f(r, angL, beta ) * f(rp, angL, beta)
V_shalf = -6.5 * hbarc**2 / 2 / mu; beta_shalf = 0.8;
V_p3half = -11.25 * hbarc**2 / 2 / mu; beta_p3half = 1.25;
V_shalf, V_p3half
###Output
_____no_output_____
###Markdown
S-wave: test EC using a sample of test points in a 2-dim parameter space Vary both the potential strength and width
###Code
angL=0
channel='Shalf'
deltaV= 100 # MeV, the plus and minus range of potential-strength's variation
deltawidth_relative=0.5
num_full=200 # number of test-potential points
dim_para=2 # dim of the parameter space
nuggetsize=1.e-8
np.random.seed(8209)
test_potpara_list= np.apply_along_axis(lambda v : np.array( [ V_shalf+ (v[0] - 0.5)*2*deltaV ,
beta_shalf*( 1 + (v[1] - 0.5)/0.5*deltawidth_relative) ] ) ,
1, np.random.rand(num_full, dim_para))
num_basis_list= np.arange(3,10)
np.random.seed(754)
basis_potpara_array_list= [np.apply_along_axis(lambda v : np.array( [ V_shalf+ (v[0] - 0.5)*2*deltaV ,
beta_shalf*( 1 + (v[1] - 0.5)/0.5*deltawidth_relative) ] ) ,
1, lhs(dim_para,num_basis)) for num_basis in num_basis_list]
local_pot_func= lambda r, potpara_vec : 0
nonlocal_pot_func= lambda rvec, potpara_vec: nonlocal_pot_func_p_alpha(rvec, angL, V0=potpara_vec[0], beta=potpara_vec[1])
test_pc_list_Shalf = collect_results(outdir=outdir, channel=channel, nuggetsize=nuggetsize, num_basis_list=num_basis_list,
basis_potpara_array_list=basis_potpara_array_list, test_potpara_list=test_potpara_list,
local_pot_func= local_pot_func, nonlocal_pot_func=nonlocal_pot_func)
### save the results to a file
with open(outdir + 'res_dict.pkl', 'wb') as output:
pickle.dump(res_dict, output, pickle.HIGHEST_PROTOCOL)
###Output
_____no_output_____
###Markdown
P-wave (3/2): test EC using a sample of test points in a 2-dim parameter space Vary both potential strength and width
###Code
angL=1
channel='P3half'
deltaV = 100 # MeV, the plus and minus range of potential-strength's variation
deltawidth_relative = 0.5
num_full = 200 # number of test-potential points
dim_para = 2 # dim of the parameter space
nuggetsize = 1.e-8
np.random.seed(82349)
test_potpara_list = np.apply_along_axis(lambda v : np.array( [ V_p3half+ (v[0] - 0.5)*2*deltaV ,
beta_p3half*( 1 + (v[1] - 0.5)/0.5*deltawidth_relative)] ) ,
1, np.random.rand(num_full, dim_para))
num_basis_list = np.arange(3,10)
np.random.seed(65324)
basis_potpara_array_list = [np.apply_along_axis(lambda v : np.array( [ V_p3half+ (v[0] - 0.5)*2*deltaV ,
beta_p3half*( 1 + (v[1] - 0.5)/0.5*deltawidth_relative) ] ) ,
1, lhs(dim_para,num_basis)) for num_basis in num_basis_list]
local_pot_func = lambda r, potpara_vec : 0
nonlocal_pot_func = lambda rvec, potpara_vec: nonlocal_pot_func_p_alpha(rvec, angL, V0=potpara_vec[0], beta=potpara_vec[1])
test_pc_list_P3half = collect_results(outdir=outdir, channel=channel, nuggetsize=nuggetsize, num_basis_list=num_basis_list,
basis_potpara_array_list=basis_potpara_array_list, test_potpara_list=test_potpara_list,
local_pot_func= local_pot_func, nonlocal_pot_func=nonlocal_pot_func)
### CONTINUE to save the results to a file
with open(outdir+'res_dict.pkl', 'wb') as output:
pickle.dump(res_dict, output, pickle.HIGHEST_PROTOCOL)
###Output
_____no_output_____
###Markdown
Collect the results and make plots for both channels
###Code
with open(outdir+'res_dict.pkl', 'rb') as output:
res_dict=pickle.load(output)
# for shalf
channel , dim_para, num_full = 'Shalf', 2, 200
color_list= ['red', 'blue', 'black']
linestyle_list= ['--', '-', ':']
fig, ax = plt.subplots(ncols=2, nrows=2, figsize=(3.4*2, 3.4*2*2/3), sharex=True, sharey=True)
fig.suptitle('Relative error for '+r'$p$-$\alpha$ ($S_{1/2}$)'+' scattering; varying '+r'$V_{s}$, $\beta_{s}$' , x=0.5, y= 1.02, fontsize=12)
ax[0,0].set_yscale('log')
ax[0,0].tick_params(bottom=True, top=True, left=True, right=True)
ax[0,0].set_title('Mean for interpolation', fontsize= 10)
ax[0,1].set_yscale('log')
ax[0,1].tick_params(bottom=True, top=True, left=True, right=True)
ax[0,1].set_title('Mean for extrapolation', fontsize= 10)
ax[1,0].set_yscale('log')
ax[1,0].set_xlabel('E (MeV)', fontsize=10)
ax[1,0].tick_params(bottom=True, top=True, left=True, right=True)
ax[1,0].set_title('Std for interpolation', fontsize= 10)
ax[1,1].set_yscale('log')
ax[1,1].set_xlabel('E (MeV)', fontsize=10)
ax[1,1].tick_params(bottom=True, top=True, left=True, right=True)
ax[1,1].set_title('Std for extrapolation', fontsize= 10)
########### with num_basis = [5, 7, 9]
for i, num_basis in enumerate([5,7,9]) :
output_file_prefix=outdir + channel + '_para-dim-' + f'{dim_para}' \
+ '_basis-' + f'{num_basis}'+'_full-sample-' + f'{num_full}' + '_'
test_pc_list_tau_mesh_list, testinouthaull, tau_var_mesh_list = \
map(res_dict[output_file_prefix].get, ['test_pc_list_tau_mesh_list', 'testinouthaull', 'tau_var_mesh_list'] )
tau_mesh_list_1 = test_pc_list_tau_mesh_list[testinouthaull]
tau_mesh_list_2 = test_pc_list_tau_mesh_list[np.logical_not(testinouthaull)]
tau_var_mesh_list_1=tau_var_mesh_list[testinouthaull]
tau_var_mesh_list_2=tau_var_mesh_list[np.logical_not(testinouthaull)]
ax[0,0].plot(E_pts[1:], np.mean( np.array( [ np.absolute(tau_mesh/tau_var_mesh_list_1[i]-1)
for i, tau_mesh in enumerate(tau_mesh_list_1) ]) , axis=0 )[1:], label = r"$N_b=$"+f'{num_basis}',
color=color_list[i], linestyle=linestyle_list[i])
ax[0,1].plot(E_pts[1:], np.mean( np.array( [ np.absolute(tau_mesh/tau_var_mesh_list_2[i]-1)
for i, tau_mesh in enumerate(tau_mesh_list_2) ]) , axis=0 )[1:], label = r"$N_b=$"+f'{num_basis}',
color=color_list[i], linestyle=linestyle_list[i])
ax[1,0].plot(E_pts[1:], np.std(np.array( [ np.absolute(tau_mesh/tau_var_mesh_list_1[i]-1)
for i, tau_mesh in enumerate(tau_mesh_list_1) ]) , axis=0 )[1:], label = r"$N_b=$"+f'{num_basis}',
color=color_list[i], linestyle=linestyle_list[i])
ax[1,1].plot(E_pts[1:], np.std(np.array( [ np.absolute(tau_mesh/tau_var_mesh_list_2[i]-1)
for i, tau_mesh in enumerate(tau_mesh_list_2) ]) , axis=0 )[1:], label = r"$N_b=$"+f'{num_basis}',
color=color_list[i], linestyle=linestyle_list[i])
ax[0,0].legend(loc='best', fontsize=10, ncol=3 , columnspacing=1, handlelength=1, handletextpad=0.5)
#ax[0,1].legend(loc='best', fontsize=10, ncol=3 , columnspacing=1, handlelength=1, handletextpad=0.5)
#ax[1,0].legend(loc='best', fontsize=10, ncol=3 , columnspacing=1, handlelength=1, handletextpad=0.5)
#ax[1,1].legend(loc='best', fontsize=10, ncol=3 , columnspacing=1, handlelength=1, handletextpad=0.5)
fig.tight_layout()
plt.savefig(outdir+'Shalf_'+'interpolation_vs_extrapolation_rel_error_tau.pdf', bbox_inches='tight')
# for P3half
channel , dim_para, num_full = 'P3half', 2, 200
color_list= ['red', 'blue', 'black']
linestyle_list= ['--', '-', ':']
fig, ax = plt.subplots(ncols=2, nrows=2, figsize=(3.4*2, 3.4*2*2/3), sharex=True, sharey=True)
fig.suptitle('Relative error for '+r'$p$-$\alpha$ ($P_{3/2}$)'+' scattering; varying '+r'$V_{p3/2}$, $\beta_{p3/2}$' ,
x=0.5, y= 1.02, fontsize=12)
ax[0,0].set_yscale('log')
ax[0,0].tick_params(bottom=True, top=True, left=True, right=True)
ax[0,0].set_title('Mean for interpolation', fontsize= 10)
ax[0,1].set_yscale('log')
ax[0,1].tick_params(bottom=True, top=True, left=True, right=True)
ax[0,1].set_title('Mean for extrapolation', fontsize= 10)
ax[1,0].set_yscale('log')
ax[1,0].set_xlabel('E (MeV)', fontsize=10)
ax[1,0].tick_params(bottom=True, top=True, left=True, right=True)
ax[1,0].set_title('Std for interpolation', fontsize= 10)
ax[1,1].set_yscale('log')
ax[1,1].set_xlabel('E (MeV)', fontsize=10)
ax[1,1].tick_params(bottom=True, top=True, left=True, right=True)
ax[1,1].set_title('Std for extrapolation', fontsize= 10)
########### with num_basis = [5, 7, 9]
for i, num_basis in enumerate([5,7,9]) :
output_file_prefix=outdir + channel + '_para-dim-' + f'{dim_para}' \
+ '_basis-' + f'{num_basis}'+'_full-sample-' + f'{num_full}' + '_'
test_pc_list_tau_mesh_list, testinouthaull, tau_var_mesh_list = \
map(res_dict[output_file_prefix].get, ['test_pc_list_tau_mesh_list', 'testinouthaull', 'tau_var_mesh_list'] )
tau_mesh_list_1 = test_pc_list_tau_mesh_list[testinouthaull]
tau_mesh_list_2 = test_pc_list_tau_mesh_list[np.logical_not(testinouthaull)]
tau_var_mesh_list_1=tau_var_mesh_list[testinouthaull]
tau_var_mesh_list_2=tau_var_mesh_list[np.logical_not(testinouthaull)]
ax[0,0].plot(E_pts[1:], np.mean( np.array( [ np.absolute(tau_mesh/tau_var_mesh_list_1[i]-1)
for i, tau_mesh in enumerate(tau_mesh_list_1) ]) , axis=0 )[1:], label = r"$N_b=$"+f'{num_basis}',
color=color_list[i], linestyle=linestyle_list[i])
ax[0,1].plot(E_pts[1:], np.mean( np.array( [ np.absolute(tau_mesh/tau_var_mesh_list_2[i]-1)
for i, tau_mesh in enumerate(tau_mesh_list_2) ]) , axis=0 )[1:], label = r"$N_b=$"+f'{num_basis}',
color=color_list[i], linestyle=linestyle_list[i])
ax[1,0].plot(E_pts[1:], np.std(np.array( [ np.absolute(tau_mesh/tau_var_mesh_list_1[i]-1)
for i, tau_mesh in enumerate(tau_mesh_list_1) ]) , axis=0 )[1:], label = r"$N_b=$"+f'{num_basis}',
color=color_list[i], linestyle=linestyle_list[i])
ax[1,1].plot(E_pts[1:], np.std(np.array( [ np.absolute(tau_mesh/tau_var_mesh_list_2[i]-1)
for i, tau_mesh in enumerate(tau_mesh_list_2) ]) , axis=0 )[1:], label = r"$N_b=$"+f'{num_basis}' ,
color=color_list[i], linestyle=linestyle_list[i])
ax[0,0].legend(loc='best', fontsize=10, ncol=3 , columnspacing=1, handlelength=1, handletextpad=0.5)
#ax[0,1].legend(loc='best', fontsize=10, ncol=3 , columnspacing=1, handlelength=1, handletextpad=0.5)
#ax[1,0].legend(loc='best', fontsize=10, ncol=3 , columnspacing=1, handlelength=1, handletextpad=0.5)
#ax[1,1].legend(loc='best', fontsize=10, ncol=3 , columnspacing=1, handlelength=1, handletextpad=0.5)
fig.tight_layout()
plt.savefig(outdir+'P3half_'+'interpolation_vs_extrapolation_rel_error_tau.pdf', bbox_inches='tight')
###Output
_____no_output_____ |
cross_asset_skewness.ipynb | ###Markdown
Cross-asset skewnessThis notebook analyses cross-asset cross-sectional skewness strategy. The strategy takes long positions on contracts with most negative historical skewness and short positions on ones with most positive skewness.
###Code
%matplotlib inline
from datetime import datetime
import logging
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
plt.style.use('bmh')
from vivace.backtest import signal
from vivace.backtest import processing
from vivace.backtest.contract import all_futures_baltas2019
from vivace.backtest.engine import BacktestEngine
from vivace.backtest.enums import Strategy
from vivace.backtest.stats import Performance
###Output
_____no_output_____
###Markdown
DataVarious futures contracts in commodity, currency, government bond futures and equity index futures are tested. Some contracts are missing in this data set due to data availability.
###Code
all_futures_baltas2019
all_futures_baltas2019.shape
###Output
_____no_output_____
###Markdown
Performance Run backtestFor each asset class, a simple portfolio is constructed by using trailing 1-year returns of each futures. Unlike studies in equities, the recent 1-month is included in the formation period. Positions are rebalanced on a monthly basis.
###Code
engine_commodity = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_baltas2019.query('asset_class == "commodity"').index,
signal=signal.XSSkewness(lookback=252,
post_process=processing.Pipeline([
processing.Negate(),
processing.AsFreq(freq='m', method='pad')
])),
log_level=logging.WARN,
)
engine_commodity.run()
commodity_portfolio_return = (engine_commodity.calculate_equity_curve(calculate_net=False)
.rename('Commodity skewness portfolio'))
engine_equity = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_baltas2019.query('asset_class == "equity"').index,
signal=signal.XSSkewness(lookback=252,
post_process=processing.Pipeline([
processing.Negate(),
processing.AsFreq(freq='m', method='pad')
])),
log_level=logging.WARN,
)
engine_equity.run()
equity_portfolio_return = (engine_equity.calculate_equity_curve(calculate_net=False)
.rename('Equity skewness portfolio'))
engine_fixed_income = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_baltas2019.query('asset_class == "fixed_income"').index,
signal=signal.XSSkewness(lookback=252,
post_process=processing.Pipeline([
processing.Negate(),
processing.AsFreq(freq='m', method='pad')
])),
log_level=logging.WARN,
)
engine_fixed_income.run()
fixed_income_portfolio_return = (engine_fixed_income.calculate_equity_curve(calculate_net=False)
.rename('Fixed income skewness portfolio'))
engine_currency = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_baltas2019.query('asset_class == "currency"').index,
signal=signal.XSSkewness(lookback=252,
post_process=processing.Pipeline([
processing.Negate(),
processing.AsFreq(freq='m', method='pad')
])),
log_level=logging.WARN,
)
engine_currency.run()
currency_portfolio_return = (engine_currency.calculate_equity_curve(calculate_net=False)
.rename('Currency skewness portfolio'))
fig, ax = plt.subplots(2, 2, figsize=(14, 8), sharex=True)
commodity_portfolio_return.plot(ax=ax[0][0], logy=True)
equity_portfolio_return.plot(ax=ax[0][1], logy=True)
fixed_income_portfolio_return.plot(ax=ax[1][0], logy=True)
currency_portfolio_return.plot(ax=ax[1][1], logy=True)
ax[0][0].set_title('Commodity skewness portfolio')
ax[0][1].set_title('Equity skewness portfolio')
ax[1][0].set_title('Fixed income skewness portfolio')
ax[1][1].set_title('Currency skewness portfolio')
ax[0][0].set_ylabel('Cumulative returns');
ax[1][0].set_ylabel('Cumulative returns');
pd.concat((
commodity_portfolio_return.pipe(Performance).summary(),
equity_portfolio_return.pipe(Performance).summary(),
fixed_income_portfolio_return.pipe(Performance).summary(),
currency_portfolio_return.pipe(Performance).summary(),
), axis=1)
###Output
_____no_output_____
###Markdown
Performance since 1990In the original paper, performance since 1990 is reported. The result below confirms that all skewness based portfolios exhibited positive performance over time.Interestingly the equity portfolio somewhat performed weakly in the backtest. This could be due to the slightly different data set.
###Code
fig, ax = plt.subplots(2, 2, figsize=(14, 8), sharex=True)
commodity_portfolio_return['1990':].plot(ax=ax[0][0], logy=True)
equity_portfolio_return['1990':].plot(ax=ax[0][1], logy=True)
fixed_income_portfolio_return['1990':].plot(ax=ax[1][0], logy=True)
currency_portfolio_return['1990':].plot(ax=ax[1][1], logy=True)
ax[0][0].set_title('Commodity skewness portfolio')
ax[0][1].set_title('Equity skewness portfolio')
ax[1][0].set_title('Fixed income skewness portfolio')
ax[1][1].set_title('Currency skewness portfolio')
ax[0][0].set_ylabel('Cumulative returns');
ax[1][0].set_ylabel('Cumulative returns');
###Output
_____no_output_____
###Markdown
GSFThe authors defines the global skewness factor (GSF) by combining the 4 asset classes with equal vol weighting. Here, the 4 backtests are simply combined with each ex-post realised volatility.
###Code
def get_leverage(equity_curve: pd.Series) -> float:
return 0.1 / (equity_curve.pct_change().std() * (252 ** 0.5))
gsf = pd.concat((
commodity_portfolio_return.pct_change() * get_leverage(commodity_portfolio_return),
equity_portfolio_return.pct_change() * get_leverage(equity_portfolio_return),
fixed_income_portfolio_return.pct_change() * get_leverage(fixed_income_portfolio_return),
currency_portfolio_return.pct_change() * get_leverage(currency_portfolio_return),
), axis=1).mean(axis=1)
gsf = gsf.fillna(0).add(1).cumprod().rename('GSF')
fig, ax = plt.subplots(1, 2, figsize=(14, 4))
gsf.plot(ax=ax[0], logy=True);
gsf['1990':].plot(ax=ax[1], logy=True);
ax[0].set_title('GSF portfolio')
ax[1].set_title('Since 1990')
ax[0].set_ylabel('Cumulative returns');
pd.concat((
gsf.pipe(Performance).summary(),
gsf['1990':].pipe(Performance).summary().add_suffix(' (since 1990)')
), axis=1)
###Output
_____no_output_____
###Markdown
Post publication
###Code
publication_date = datetime(2019, 12, 16)
fig, ax = plt.subplots(1, 2, figsize=(14, 4))
gsf.plot(ax=ax[0], logy=True);
ax[0].set_title('GSF portfolio')
ax[0].set_ylabel('Cumulative returns');
ax[0].axvline(publication_date, lw=1, ls='--', color='black')
ax[0].text(publication_date, 0.6, 'Publication date ', ha='right')
gsf.loc[publication_date:].plot(ax=ax[1], logy=True);
ax[1].set_title('GSF portfolio (post publication)');
###Output
_____no_output_____
###Markdown
Recent performance
###Code
fig, ax = plt.subplots(figsize=(8, 4.5))
gsf.tail(252 * 2).plot(ax=ax, logy=True);
ax.set_title('GSF portfolio')
ax.set_ylabel('Cumulative returns');
###Output
_____no_output_____
###Markdown
Reference- Baltas, N. and Salinas, G., 2019. Cross-Asset Skew. Available at SSRN.
###Code
print(f'Updated: {datetime.utcnow().strftime("%d-%b-%Y %H:%M")}')
###Output
Updated: 28-Dec-2021 09:06
###Markdown
Cross-asset skewnessThis notebook analyses cross-asset cross-sectional skewness strategy. The strategy takes long positions on contracts with most negative historical skewness and short positions on ones with most positive skewness.
###Code
%matplotlib inline
from datetime import datetime
import logging
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
plt.style.use('bmh')
from vivace.backtest import signal
from vivace.backtest import processing
from vivace.backtest.contract import all_futures_baltas2019
from vivace.backtest.engine import BacktestEngine
from vivace.backtest.enums import Strategy
from vivace.backtest.stats import Performance
###Output
_____no_output_____
###Markdown
DataVarious futures contracts in commodity, currency, government bond futures and equity index futures are tested. Some contracts are missing in this data set due to data availability.
###Code
all_futures_baltas2019
all_futures_baltas2019.shape
###Output
_____no_output_____
###Markdown
Performance Run backtestFor each asset class, a simple portfolio is constructed by using trailing 1-year returns of each futures. Unlike studies in equities, the recent 1-month is included in the formation period. Positions are rebalanced on a monthly basis.
###Code
engine_commodity = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_baltas2019.query('asset_class == "commodity"').index,
signal=signal.XSSkewness(lookback=252,
post_process=processing.Pipeline([
processing.Negate(),
processing.AsFreq(freq='m', method='pad')
])),
log_level=logging.WARN,
)
engine_commodity.run()
commodity_portfolio_return = (engine_commodity.calculate_equity_curve(calculate_net=False)
.rename('Commodity skewness portfolio'))
engine_equity = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_baltas2019.query('asset_class == "equity"').index,
signal=signal.XSSkewness(lookback=252,
post_process=processing.Pipeline([
processing.Negate(),
processing.AsFreq(freq='m', method='pad')
])),
log_level=logging.WARN,
)
engine_equity.run()
equity_portfolio_return = (engine_equity.calculate_equity_curve(calculate_net=False)
.rename('Equity skewness portfolio'))
engine_fixed_income = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_baltas2019.query('asset_class == "fixed_income"').index,
signal=signal.XSSkewness(lookback=252,
post_process=processing.Pipeline([
processing.Negate(),
processing.AsFreq(freq='m', method='pad')
])),
log_level=logging.WARN,
)
engine_fixed_income.run()
fixed_income_portfolio_return = (engine_fixed_income.calculate_equity_curve(calculate_net=False)
.rename('Fixed income skewness portfolio'))
engine_currency = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_baltas2019.query('asset_class == "currency"').index,
signal=signal.XSSkewness(lookback=252,
post_process=processing.Pipeline([
processing.Negate(),
processing.AsFreq(freq='m', method='pad')
])),
log_level=logging.WARN,
)
engine_currency.run()
currency_portfolio_return = (engine_currency.calculate_equity_curve(calculate_net=False)
.rename('Currency skewness portfolio'))
fig, ax = plt.subplots(2, 2, figsize=(14, 8), sharex=True)
commodity_portfolio_return.plot(ax=ax[0][0], logy=True)
equity_portfolio_return.plot(ax=ax[0][1], logy=True)
fixed_income_portfolio_return.plot(ax=ax[1][0], logy=True)
currency_portfolio_return.plot(ax=ax[1][1], logy=True)
ax[0][0].set_title('Commodity skewness portfolio')
ax[0][1].set_title('Equity skewness portfolio')
ax[1][0].set_title('Fixed income skewness portfolio')
ax[1][1].set_title('Currency skewness portfolio')
ax[0][0].set_ylabel('Cumulative returns');
ax[1][0].set_ylabel('Cumulative returns');
pd.concat((
commodity_portfolio_return.pipe(Performance).summary(),
equity_portfolio_return.pipe(Performance).summary(),
fixed_income_portfolio_return.pipe(Performance).summary(),
currency_portfolio_return.pipe(Performance).summary(),
), axis=1)
###Output
_____no_output_____
###Markdown
Performance since 1990In the original paper, performance since 1990 is reported. The result below confirms that all skewness based portfolios exhibited positive performance over time.Interestingly the equity portfolio somewhat performed weakly in the backtest. This could be due to the slightly different data set.
###Code
fig, ax = plt.subplots(2, 2, figsize=(14, 8), sharex=True)
commodity_portfolio_return['1990':].plot(ax=ax[0][0], logy=True)
equity_portfolio_return['1990':].plot(ax=ax[0][1], logy=True)
fixed_income_portfolio_return['1990':].plot(ax=ax[1][0], logy=True)
currency_portfolio_return['1990':].plot(ax=ax[1][1], logy=True)
ax[0][0].set_title('Commodity skewness portfolio')
ax[0][1].set_title('Equity skewness portfolio')
ax[1][0].set_title('Fixed income skewness portfolio')
ax[1][1].set_title('Currency skewness portfolio')
ax[0][0].set_ylabel('Cumulative returns');
ax[1][0].set_ylabel('Cumulative returns');
###Output
_____no_output_____
###Markdown
GSFThe authors defines the global skewness factor (GSF) by combining the 4 asset classes with equal vol weighting. Here, the 4 backtests are simply combined with each ex-post realised volatility.
###Code
def get_leverage(equity_curve: pd.Series) -> float:
return 0.1 / (equity_curve.pct_change().std() * (252 ** 0.5))
gsf = pd.concat((
commodity_portfolio_return.pct_change() * get_leverage(commodity_portfolio_return),
equity_portfolio_return.pct_change() * get_leverage(equity_portfolio_return),
fixed_income_portfolio_return.pct_change() * get_leverage(fixed_income_portfolio_return),
currency_portfolio_return.pct_change() * get_leverage(currency_portfolio_return),
), axis=1).mean(axis=1)
gsf = gsf.fillna(0).add(1).cumprod().rename('GSF')
fig, ax = plt.subplots(1, 2, figsize=(14, 4))
gsf.plot(ax=ax[0], logy=True);
gsf['1990':].plot(ax=ax[1], logy=True);
ax[0].set_title('GSF portfolio')
ax[1].set_title('Since 1990')
ax[0].set_ylabel('Cumulative returns');
pd.concat((
gsf.pipe(Performance).summary(),
gsf['1990':].pipe(Performance).summary().add_suffix(' (since 1990)')
), axis=1)
###Output
_____no_output_____
###Markdown
Post publication
###Code
publication_date = datetime(2019, 12, 16)
fig, ax = plt.subplots(1, 2, figsize=(14, 4))
gsf.plot(ax=ax[0], logy=True);
ax[0].set_title('GSF portfolio')
ax[0].set_ylabel('Cumulative returns');
ax[0].axvline(publication_date, lw=1, ls='--', color='black')
ax[0].text(publication_date, 0.6, 'Publication date ', ha='right')
gsf.loc[publication_date:].plot(ax=ax[1], logy=True);
ax[1].set_title('GSF portfolio (post publication)');
###Output
_____no_output_____
###Markdown
Recent performance
###Code
fig, ax = plt.subplots(figsize=(8, 4.5))
gsf.tail(252 * 2).plot(ax=ax, logy=True);
ax.set_title('GSF portfolio')
ax.set_ylabel('Cumulative returns');
###Output
_____no_output_____
###Markdown
Reference- Baltas, N. and Salinas, G., 2019. Cross-Asset Skew. Available at SSRN.
###Code
print(f'Updated: {datetime.utcnow().strftime("%d-%b-%Y %H:%M")}')
###Output
Updated: 06-Feb-2022 06:59
###Markdown
Cross-asset skewnessThis notebook analyses cross-asset cross-sectional skewness strategy. The strategy takes long positions on contracts with most negative historical skewness and short positions on ones with most positive skewness.
###Code
%matplotlib inline
from datetime import datetime
import logging
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
plt.style.use('bmh')
from vivace.backtest import signal
from vivace.backtest import processing
from vivace.backtest.contract import all_futures_baltas2019
from vivace.backtest.engine import BacktestEngine
from vivace.backtest.enums import Strategy
from vivace.backtest.stats import Performance
###Output
_____no_output_____
###Markdown
DataVarious futures contracts in commodity, currency, government bond futures and equity index futures are tested. Some contracts are missing in this data set due to data availability.
###Code
all_futures_baltas2019
all_futures_baltas2019.shape
###Output
_____no_output_____
###Markdown
Performance Run backtestFor each asset class, a simple portfolio is constructed by using trailing 1-year returns of each futures. Unlike studies in equities, the recent 1-month is included in the formation period. Positions are rebalanced on a monthly basis.
###Code
engine_commodity = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_baltas2019.query('asset_class == "commodity"').index,
signal=signal.XSSkewness(lookback=252,
post_process=processing.Pipeline([
processing.Negate(),
processing.AsFreq(freq='m', method='pad')
])),
log_level=logging.WARN,
)
engine_commodity.run()
commodity_portfolio_return = (engine_commodity.calculate_equity_curve(calculate_net=False)
.rename('Commodity skewness portfolio'))
engine_equity = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_baltas2019.query('asset_class == "equity"').index,
signal=signal.XSSkewness(lookback=252,
post_process=processing.Pipeline([
processing.Negate(),
processing.AsFreq(freq='m', method='pad')
])),
log_level=logging.WARN,
)
engine_equity.run()
equity_portfolio_return = (engine_equity.calculate_equity_curve(calculate_net=False)
.rename('Equity skewness portfolio'))
engine_fixed_income = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_baltas2019.query('asset_class == "fixed_income"').index,
signal=signal.XSSkewness(lookback=252,
post_process=processing.Pipeline([
processing.Negate(),
processing.AsFreq(freq='m', method='pad')
])),
log_level=logging.WARN,
)
engine_fixed_income.run()
fixed_income_portfolio_return = (engine_fixed_income.calculate_equity_curve(calculate_net=False)
.rename('Fixed income skewness portfolio'))
engine_currency = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_baltas2019.query('asset_class == "currency"').index,
signal=signal.XSSkewness(lookback=252,
post_process=processing.Pipeline([
processing.Negate(),
processing.AsFreq(freq='m', method='pad')
])),
log_level=logging.WARN,
)
engine_currency.run()
currency_portfolio_return = (engine_currency.calculate_equity_curve(calculate_net=False)
.rename('Currency skewness portfolio'))
fig, ax = plt.subplots(2, 2, figsize=(14, 8), sharex=True)
commodity_portfolio_return.plot(ax=ax[0][0], logy=True)
equity_portfolio_return.plot(ax=ax[0][1], logy=True)
fixed_income_portfolio_return.plot(ax=ax[1][0], logy=True)
currency_portfolio_return.plot(ax=ax[1][1], logy=True)
ax[0][0].set_title('Commodity skewness portfolio')
ax[0][1].set_title('Equity skewness portfolio')
ax[1][0].set_title('Fixed income skewness portfolio')
ax[1][1].set_title('Currency skewness portfolio')
ax[0][0].set_ylabel('Cumulative returns');
ax[1][0].set_ylabel('Cumulative returns');
pd.concat((
commodity_portfolio_return.pipe(Performance).summary(),
equity_portfolio_return.pipe(Performance).summary(),
fixed_income_portfolio_return.pipe(Performance).summary(),
currency_portfolio_return.pipe(Performance).summary(),
), axis=1)
###Output
_____no_output_____
###Markdown
Performance since 1990In the original paper, performance since 1990 is reported. The result below confirms that all skewness based portfolios exhibited positive performance over time.Interestingly the equity portfolio somewhat performed weakly in the backtest. This could be due to the slightly different data set.
###Code
fig, ax = plt.subplots(2, 2, figsize=(14, 8), sharex=True)
commodity_portfolio_return['1990':].plot(ax=ax[0][0], logy=True)
equity_portfolio_return['1990':].plot(ax=ax[0][1], logy=True)
fixed_income_portfolio_return['1990':].plot(ax=ax[1][0], logy=True)
currency_portfolio_return['1990':].plot(ax=ax[1][1], logy=True)
ax[0][0].set_title('Commodity skewness portfolio')
ax[0][1].set_title('Equity skewness portfolio')
ax[1][0].set_title('Fixed income skewness portfolio')
ax[1][1].set_title('Currency skewness portfolio')
ax[0][0].set_ylabel('Cumulative returns');
ax[1][0].set_ylabel('Cumulative returns');
###Output
_____no_output_____
###Markdown
GSFThe authors defines the global skewness factor (GSF) by combining the 4 asset classes with equal vol weighting. Here, the 4 backtests are simply combined with each ex-post realised volatility.
###Code
def get_leverage(equity_curve: pd.Series) -> float:
return 0.1 / (equity_curve.pct_change().std() * (252 ** 0.5))
gsf = pd.concat((
commodity_portfolio_return.pct_change() * get_leverage(commodity_portfolio_return),
equity_portfolio_return.pct_change() * get_leverage(equity_portfolio_return),
fixed_income_portfolio_return.pct_change() * get_leverage(fixed_income_portfolio_return),
currency_portfolio_return.pct_change() * get_leverage(currency_portfolio_return),
), axis=1).mean(axis=1)
gsf = gsf.fillna(0).add(1).cumprod().rename('GSF')
fig, ax = plt.subplots(1, 2, figsize=(14, 4))
gsf.plot(ax=ax[0], logy=True);
gsf['1990':].plot(ax=ax[1], logy=True);
ax[0].set_title('GSF portfolio')
ax[1].set_title('Since 1990')
ax[0].set_ylabel('Cumulative returns');
pd.concat((
gsf.pipe(Performance).summary(),
gsf['1990':].pipe(Performance).summary().add_suffix(' (since 1990)')
), axis=1)
###Output
_____no_output_____
###Markdown
Post publication
###Code
publication_date = datetime(2019, 12, 16)
fig, ax = plt.subplots(1, 2, figsize=(14, 4))
gsf.plot(ax=ax[0], logy=True);
ax[0].set_title('GSF portfolio')
ax[0].set_ylabel('Cumulative returns');
ax[0].axvline(publication_date, lw=1, ls='--', color='black')
ax[0].text(publication_date, 0.6, 'Publication date ', ha='right')
gsf.loc[publication_date:].plot(ax=ax[1], logy=True);
ax[1].set_title('GSF portfolio (post publication)');
###Output
_____no_output_____
###Markdown
Recent performance
###Code
fig, ax = plt.subplots(figsize=(8, 4.5))
gsf.tail(252 * 2).plot(ax=ax, logy=True);
ax.set_title('GSF portfolio')
ax.set_ylabel('Cumulative returns');
###Output
_____no_output_____
###Markdown
Reference- Baltas, N. and Salinas, G., 2019. Cross-Asset Skew. Available at SSRN.
###Code
print(f'Updated: {datetime.utcnow().strftime("%d-%b-%Y %H:%M")}')
###Output
Updated: 04-Jun-2022 09:56
|
19 Geopandas/Install Geopandas.ipynb | ###Markdown
Install Geopandas Wir rรผsten uns noch rasch mit einigen neuen Modulen auf.Im Terminal, die folgenden Codezeilen eingeben:(make sure you work on your working environment) * `brew install geos`* `brew install gdal`* `brew install spatialindex`* `pip install pillow`* `pip install pysal`* `pip install geopandas`* `pip install rtree`* `pip install descartes` Test Und falls das richtig geklappt hat, funktioniert das jetzt schnรถrkellos: Vorbereitung
###Code
import pandas as pd
import geopandas as gpd
%matplotlib inline
###Output
_____no_output_____
###Markdown
File รffnen
###Code
path = 'shp/g1l17.shp'
gdf = gpd.read_file(path)
gdf
###Output
_____no_output_____
###Markdown
Plot
###Code
gdf.plot(edgecolor='grey', color='white', alpha=1, linewidth=2, figsize=(20,13))
###Output
_____no_output_____ |
python/training.ipynb | ###Markdown
Train model
###Code
from nn_model import NeuralNetworkModel, LSTMNeuralNetworkModel
model_path = './output/'+body_part+'_lstm'
if not os.path.exists(model_path):
os.makedirs(model_path)
model_name = model_path+'/best_model'
joblib.dump(sensors_scaler, model_path+'/scaler.pkl')
model = LSTMNeuralNetworkModel(name=body_part, hidden_size=100, look_back=look_back)
# model = NeuralNetworkModel(name=body_part, hidden_size=100)
model.fit(x=train_in, y=train_out, x_val=test_in, y_val=test_out, save_path=model_name,
iteration=500, patience=100, batch_size=1000)
###Output
/home/roboy/anaconda3/envs/roboy/lib/python3.8/site-packages/tensorflow/python/keras/legacy_tf_layers/core.py:171: UserWarning: `tf.layers.dense` is deprecated and will be removed in a future version. Please use `tf.keras.layers.Dense` instead.
warnings.warn('`tf.layers.dense` is deprecated and '
/home/roboy/anaconda3/envs/roboy/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer_v1.py:1692: UserWarning: `layer.apply` is deprecated and will be removed in a future version. Please use `layer.__call__` method instead.
warnings.warn('`layer.apply` is deprecated and '
|
interactive.ipynb | ###Markdown
headingfirst import all the stuff we need
###Code
%matplotlib widget
#import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mp
import numpy as np
from matplotlib.backend_bases import MouseButton
import cmath as c
print('imports done')
def on_click(event):
if event.button is MouseButton.LEFT:
if event.inaxes == axs[0]:
vector.set_visible(True)
wave.set_visible(True)
z = complex(event.xdata, event.ydata)
draw_complex_wave(z)
def on_motion(event):
if event.button is MouseButton.LEFT:
if event.inaxes == axs[0]:
z = complex(event.xdata, event.ydata)
draw_complex_wave(z)
def on_release(event):
pass
def draw_complex_wave(z):
amp, phase = c.polar(z)
vector.set_positions((0,0),(z.real,z.imag))
wave.set_xdata(x)
wave.set_ydata(amp*np.sin(x+phase))
fig.canvas.draw()
fig, axs = plt.subplots(1,2)
fig.canvas.mpl_connect('button_press_event', on_click)
fig.canvas.mpl_connect('motion_notify_event', on_motion)
fig.canvas.mpl_connect('button_release_event', on_release)
for i, ax in enumerate(axs):
ax.axis('equal')
ax.set_xlim([-4, 4])
ax.set_ylim([-4, 4])
ax.spines[["left", "bottom"]].set_position(('data', 0))
ax.spines[["top", "right"]].set_visible(False)
ax.set_box_aspect(1)
#set initial amplitude and phase
z = complex(1,0)
amp, phase = c.polar(z)
x = np.linspace(-np.pi, np.pi, 100)
x_unit = np.cos(x)
y_unit = np.sin(x)
unit_circle, = axs[0].plot(x_unit, y_unit, linestyle='dotted', color='green')
unit_sine, = axs[1].plot(x, y_unit, linestyle='dotted', color='green')
unit_vector = mp.FancyArrowPatch((0, 0), (1, 0),mutation_scale=5, color='green')
vector = mp.FancyArrowPatch((0, 0), (1, 0),mutation_scale=5, color='red', visible=False)
wave, = axs[1].plot(x, amp*np.sin(x+phase), visible=False, color='red')
axs[0].add_patch(unit_vector)
axs[0].add_patch(vector)
fig.canvas.draw()
###Output
_____no_output_____
###Markdown
experimentsdataset | level | shape | acc--- | --- | --- | ---cora | 0 | 81.1citeseer | 1402, 128 | 65.20pubmed | 7903, 128 | 79.8
###Code
# emb_cora_l1 = np.load('../graphzoom/embed_results/cora/cora_level_2.npy')
emb_cora_l1 = np.load('citeseer_emb_level_1.npy')
emb_cora_l1.shape
# emb_cora_l1
!pwd
from train_gnn import FACTORY, create_model, GCN
import numpy as np
import torch
from graphzoom.embed_methods.supervised.dgl_gcn import (GAT, GCN, _sample_mask,
load_data)
from dgl.data import RedditDataset
data = RedditDataset()
g = data.graph
model = GCN(g, data.features.shape[1], 128, 41,1).cuda()
dataset='reddit'
original_adj, labels, train_ids, test_ids, train_labels, test_labels, feats = load_data(
f'/mnt/yushi/dataset/{dataset}', dataset)
model.load_state_dict(torch.load('embeddings/GCN_reddit_emb_level_1_params.pth.tar'))
model
final_emb = np.load('graphzoom/embed_results/reddit_level_1.npy')
final_emb = torch.Tensor(final_emb).cuda()
# %timeit
res = model.layers[1](g, final_emb)
res = res.argmax(1)
res.shape
# res = res.argmax(1)
# res.shape
from sklearn.metrics import accuracy_score
accuracy_score(test_labels, res[test_ids].tolist())
from torch.distributions import Categorical
dist = torch.Tensor([[0.1,0.9],[0.2,0.8]])
Categorical(dist).entropy()
###Output
_____no_output_____
###Markdown
A Model of Prevalence for COVID-19**This worksheet presents hypothetical mathematical models of COVID -- and it is too early to draw conclusions about which represents the current epidemic. In all cases you should follow the advice of public health authorities to stay at home (for everyone who can) and self isolate if you have any symptoms. For authoritative advice see: https://www.nhs.uk/conditions/coronavirus-covid-19/**Author George Danezis University College London Twitter: @gdanezis Web: http://www0.cs.ucl.ac.uk/staff/G.Danezis/ Code and Data, as well as Jupyter notebook available here:https://github.com/gdanezis/COVID-Prevalence I use here the time series of reported outcomes from COVID-19, namely recoveries and deaths in different countries, to estimate the prevalence of the virus, as well as to project its growth. Under most scenarios a significant fraction of the population will be infected in the next 4-6 weeks, unless the latest public heath measures lower its growth. However, there are scenarios that explain the current apparent high association with fatalities, through association rather than causation. So it is possible that COVID-19 is highly infectious (particularly within hospitals) but does not cause significant fatalities (CFR < 0.1%). Of course other scenarios with a CFR of about 1% (high) are also possible.
###Code
import sys
from analytical import *
###Output
_____no_output_____
###Markdown
The modelWe are trying to infer 4 key variables* Prevalence $p$.* Testing rate $f$ of mild or asympromatic carriers.* Total infected population $R_p$.We are given as data:* The number of deaths $D$ that tested positive with COVID.* The number of recoveries $R$ that tested positive with COVID.* The total population $P$.We have to make assumptions about:* The Case Fatality Rate (CFR) due to COVID $\text{CFR}_c$.* The CFR due to other reasons with similar symptoms as COVID $\text{CFR}_o$. We set it to $0.01/100$ (Annual mortality of 1% * 1/12 months * ~1/10 serious conditions mau be confused.)* The increase in risk to be infected with COVID if in a serious condition $\mu$. We usually set $\mu=10$, namely being in a serious medical condition leading to death (for other reasons) also exposes a patient to the equavalent of x10 to catch COVID -- due to hospital infection rates.The system of equations we need to solve are:* The hospital prevalence $p_h$:\begin{equation}h_p = 1 - (1 - p)^{\mu}\end{equation}* The actual deaths due to COVID. All deaths minus the ones due to other causes that still tested positive for COVID:\begin{equation}D_a = \max{(D - p_h \cdot \text{CFR}_o \cdot P, 1)}\end{equation}* The total infections is the population is actual infected and deaths due to Covid\begin{equation}p = (R_p + D) / P\end{equation}* The observed recoveries are a fraction of the actual one based on testing rates.\begin{equation}R = f \cdot R_p\end{equation}* The definition of the Case Fatality Rate is, the number of deaths due to the virus, devided by the number of infections (sum of all dead with virus, and those recovered).\begin{equation}\text{CFR}_c = \frac{D_a}{D + R_p}\end{equation}This system of equaltions is non-linear but we can solve it numericaly using iterative methods. Limitations* The Testing rate $f$ assumes totaly random testing in the population that is not a fatality. However most countries do use some symptoms or at least self-selection as a gate for testing. Therefore a lower rate of testing can justify the observed recovered cases and the reported rate of testing may be a factor of 5-10 lower than the one reported here. (depending of how well symptoms guide testing).* We assume all Deaths either caused or associated with COVID are tested, and reported in $D$. However, it is not clear that health authorities are testing dead people, and many cases resulting in fatalities may not have been reported. * The Case Fatality Rate $\text{CFR}_c$ measure the fatalities **caused** by COVID-19, rather than the ones merely associated with COVID. The raw data about recoveries and deaths can only be used directly to estimate the latter (association) since it is not clear whether a fatality is due to COVID or something else (but the patient also tested positive). As a result the CFR we estimate can be much lower than other studies, since a lot of deaths may simply be due to other causes (evidenced by high comorbidity, and potentially already high prevalence in some places). Results for different $\text{CFR}_c$ Discussion for CFR=1%A CFR of around 1%-2% is an estimate that was feared early on from experiences in China and elsewhere. However, given this CFR the prevalence in South Korea is so small that the resting rate should be close to 31%. In face we know that about 1-in-170 people have been tested there (huge, but not 31%), weakening the evidence for such a CFR. Other testing rates also seem an order of magniture off. Comorbidity figures are much lower than those reported from Italy.
###Code
# Since we measure prevalence based on outcomes, the figures lag by about 20 days.
CFR_covid = 0.01 # CFR medium high: 1%
hospital_infection_mult = 5.0
make_table(populations, CFR_covid, hospital_infection_mult, flx=sys.stdout)
###Output
Assumptions: COVID CFR: 1.00% In Hospital factor: x5.0
Country Prev CFR Testing Comorb. Infected
--------------------------------------------------------------
Japan 0.00% 1.00% 5.94% 4.76% 3,958
USA 0.02% 1.00% 0.61% 4.76% 48,721
Germany 0.01% 1.00% 3.91% 4.76% 11,591
Italy 0.96% 1.00% 1.30% 4.68% 573,208
Spain 0.45% 1.00% 1.61% 4.72% 208,073
Belgium 0.07% 1.00% 4.84% 4.76% 8,293
Switzerland 0.13% 1.00% 1.18% 4.75% 11,121
Iran 0.21% 1.00% 4.90% 4.74% 170,794
Korea, South 0.02% 1.00% 30.27% 4.76% 10,460
United Kingdom 0.05% 1.00% 0.43% 4.76% 31,571
Netherlands 0.12% 1.00% 0.01% 4.75% 20,074
France 0.12% 1.00% 2.71% 4.75% 81,054
###Markdown
Discussion for CFR=0.1%A CFR of 0.1% is on the low side, and lower than one estimated by most studies. In fact it would put COVID-19 on par with seasonal viruses in terms of fatality rate. Such a CFR would require Italy and Spain to have had a single digit percentage of their populations infected in early March, which means that by now (end of March) about 50% of the population must have had COVID-19 (if the increase is at a similar rate, see projection section below). The testing rate for South Korea, and others, is still too large (1-in-20 rather than 1-in-170).
###Code
# Since we measure prevalence based on outcomes, the figures lag by about 20 days.
CFR_covid = 0.001 # CFR low: 0.1%
hospital_infection_mult = 5.0
make_table(populations, CFR_covid, hospital_infection_mult, flx=sys.stdout)
###Output
Assumptions: COVID CFR: 0.10% In Hospital factor: x5.0
Country Prev CFR Testing Comorb. Infected
--------------------------------------------------------------
Japan 0.02% 0.10% 0.84% 33.32% 27,962
USA 0.11% 0.10% 0.09% 33.29% 344,391
Germany 0.10% 0.10% 0.55% 33.29% 81,931
Italy 7.00% 0.10% 0.18% 30.30% 4,229,650
Spain 3.22% 0.10% 0.22% 31.92% 1,500,361
Belgium 0.52% 0.10% 0.68% 33.10% 58,780
Switzerland 0.92% 0.10% 0.17% 32.92% 79,031
Iran 1.50% 0.10% 0.69% 32.67% 1,218,232
Korea, South 0.14% 0.10% 4.28% 33.27% 73,959
United Kingdom 0.34% 0.10% 0.06% 33.18% 223,499
Netherlands 0.83% 0.10% 0.00% 32.96% 142,571
France 0.86% 0.10% 0.38% 32.95% 575,753
###Markdown
Discussion for CFR=0.001This is a negligible Case Fatality rate, and as a result most deaths with COVID are due to other reasons rather than the COVID virus. As a result the comorbidity rates are high (>90%) which is compatible with what was observed in Italy. This scenario would mean that Italy has long reached now the >60% herd immunity threshold, and we should be seeing the tail end of the epidemic soon.
###Code
# Since we measure prevalence based on outcomes, the figures lag by about 20 days.
CFR_covid_low = 0.0001 # CFR very low: 0.01%
hospital_infection_mult = 10.0
make_table(populations, CFR_covid_low, hospital_infection_mult, flx=sys.stdout)
###Output
Assumptions: COVID CFR: 0.01% In Hospital factor: x10.0
Country Prev CFR Testing Comorb. Infected
--------------------------------------------------------------
Japan 0.03% 0.01% 0.62% 90.90% 38,186
USA 0.14% 0.01% 0.06% 90.86% 472,266
Germany 0.14% 0.01% 0.40% 90.86% 112,317
Italy 16.65% 0.01% 0.07% 83.43% 10,066,389
Spain 5.30% 0.01% 0.14% 88.79% 2,471,375
Belgium 0.72% 0.01% 0.49% 90.64% 82,301
Switzerland 1.32% 0.01% 0.12% 90.41% 113,058
Iran 2.22% 0.01% 0.47% 90.06% 1,799,738
Korea, South 0.20% 0.01% 3.12% 90.84% 101,616
United Kingdom 0.47% 0.01% 0.04% 90.73% 310,069
Netherlands 1.18% 0.01% 0.00% 90.46% 202,948
France 1.23% 0.01% 0.27% 90.44% 820,873
###Markdown
DriftMLP demonstration- This a very quick and brief demonstration of the pathways which the driftmlp finds.- The intended use of the application is to have a brief preview of the results which the driftmlp package and method supplies. InstructionsSelect two locations using the four sliders below. Upon releasing the mouse a map will be shown displaying the most likely pathway from the transition matrix.The blue pathway shows the path going from (lon_from, lat_from) to (lon_to, lat_to); the red pathway shows the return.The two points are shown in the top plot; from in blue, to in red.Use the dropdown menu to select which drifter data subset to use to estimate the transition matrix.- Drogued drifters will give pathways corresponding to top 15m flows. The drifters in this dataset have less of a wind forcing.- Undrogued drifters will give pathways corresponding to near surface flows, with a stronger influence from the surface stress winds.- Both is simply just a mixture of both datasets.Typically undrogued drifters and the both options will have shorter travel times.
###Code
from ipywidgets import interact, widgets, interactive, fixed, interact_manual, FloatSlider
from app_backend import interactive_app
import zipfile
graph_zip = zipfile.ZipFile("graph_files.zip")
graph_zip.extractall()
p = widgets.Dropdown(
options=[('No Drogued Drifters', 'nodrg'), ('Drogued Drifters', 'drg'), ('Both Drogued and Undrogued', 'both')],
description='Drogued',
disabled=False,
)
# Set this finer for more precise locations
step_size=1e-3
# The application will only update on mouse release
continuous_update=False
loc_sliders=[FloatSlider(value=-158, min=-180, max=180, step=step_size, continuous_update=False),
FloatSlider(value=44, min=-80, max=80, step=step_size, continuous_update=False),
FloatSlider(value=-19.55, min=-180, max=180, step=step_size, continuous_update=False),
FloatSlider(value=-52, min=-80, max=80, step=step_size, continuous_update=False)
]
interact_inst = interactive_app()
w = interactive(interact_inst.__call__,
lon_from = loc_sliders[0],
lat_from = loc_sliders[1],
lon_to = loc_sliders[2],
lat_to = loc_sliders[3],
network_type=p
)
output_folium = w.children[-1]
output_folium.layout.height = '900px'
output_folium.layout.width = '600px'
display(w)
###Output
_____no_output_____
###Markdown
Defining and training the model Load the dataset - `A` (adjacency matrix) is a `scipy.sparse.csr_matrix` of size `[N, N]` - `X` (attribute matrix) is a `scipy.sparse.csr_matrix` of size `[N, D]` - `Z_gt` (binary community affiliation matrix) is a `np.ndarray` of size `[N, K]`
###Code
loader = nocd.data.load_dataset('data/mag_cs.npz')
A, X, Z_gt = loader['A'], loader['X'], loader['Z']
N, K = Z_gt.shape
###Output
_____no_output_____
###Markdown
Define the hyperparameters
###Code
hidden_sizes = [128] # hidden sizes of the GNN
weight_decay = 1e-2 # strength of L2 regularization on GNN weights
dropout = 0.5 # whether to use dropout
batch_norm = True # whether to use batch norm
lr = 1e-3 # learning rate
max_epochs = 500 # number of epochs to train
display_step = 25 # how often to compute validation loss
balance_loss = True # whether to use balanced loss
stochastic_loss = True # whether to use stochastic or full-batch training
batch_size = 20000 # batch size (only for stochastic training)
###Output
_____no_output_____
###Markdown
Select & normalize the feature matrixFor some datasets where the features are very informative / correlated with the community structure it's better to use `X` as input (e.g. co-authorship networks w/ keywords as node features). Otherwise, you should try using `A` or `[A, X]` as input.
###Code
x_norm = normalize(X) # node features
# x_norm = normalize(A) # adjacency matrix
# x_norm = sp.hstack([normalize(X), normalize(A)]) # concatenate A and X
x_norm = nocd.utils.to_sparse_tensor(x_norm).cuda()
###Output
_____no_output_____
###Markdown
Define the GNN model
###Code
sampler = nocd.sampler.get_edge_sampler(A, batch_size, batch_size, num_workers=5)
gnn = nocd.nn.GCN(x_norm.shape[1], hidden_sizes, K, batch_norm=batch_norm, dropout=dropout).cuda()
adj_norm = gnn.normalize_adj(A)
decoder = nocd.nn.BerpoDecoder(N, A.nnz, balance_loss=balance_loss)
opt = torch.optim.Adam(gnn.parameters(), lr=lr)
def get_nmi(thresh=0.5):
"""Compute Overlapping NMI of the communities predicted by the GNN."""
gnn.eval()
Z = F.relu(gnn(x_norm, adj_norm))
Z_pred = Z.cpu().detach().numpy() > thresh
nmi = nocd.metrics.overlapping_nmi(Z_pred, Z_gt)
return nmi
###Output
_____no_output_____
###Markdown
Training loop
###Code
val_loss = np.inf
validation_fn = lambda: val_loss
early_stopping = nocd.train.NoImprovementStopping(validation_fn, patience=10)
model_saver = nocd.train.ModelSaver(gnn)
for epoch, batch in enumerate(sampler):
if epoch > max_epochs:
break
if epoch % 25 == 0:
with torch.no_grad():
gnn.eval()
# Compute validation loss
Z = F.relu(gnn(x_norm, adj_norm))
val_loss = decoder.loss_full(Z, A)
print(f'Epoch {epoch:4d}, loss.full = {val_loss:.4f}, nmi = {get_nmi():.2f}')
# Check if it's time for early stopping / to save the model
early_stopping.next_step()
if early_stopping.should_save():
model_saver.save()
if early_stopping.should_stop():
print(f'Breaking due to early stopping at epoch {epoch}')
break
# Training step
gnn.train()
opt.zero_grad()
Z = F.relu(gnn(x_norm, adj_norm))
ones_idx, zeros_idx = batch
if stochastic_loss:
loss = decoder.loss_batch(Z, ones_idx, zeros_idx)
else:
loss = decoder.loss_full(Z, A)
loss += nocd.utils.l2_reg_loss(gnn, scale=weight_decay)
loss.backward()
opt.step()
###Output
Epoch 0, loss.full = 1.0301, nmi = 0.03
Epoch 25, loss.full = 0.2925, nmi = 0.34
Epoch 50, loss.full = 0.2478, nmi = 0.42
Epoch 75, loss.full = 0.2237, nmi = 0.44
Epoch 100, loss.full = 0.2137, nmi = 0.47
Epoch 125, loss.full = 0.1989, nmi = 0.46
Epoch 150, loss.full = 0.1946, nmi = 0.48
Epoch 175, loss.full = 0.1899, nmi = 0.49
Epoch 200, loss.full = 0.1988, nmi = 0.48
Epoch 225, loss.full = 0.1899, nmi = 0.50
Epoch 250, loss.full = 0.1944, nmi = 0.48
Epoch 275, loss.full = 0.1897, nmi = 0.49
Epoch 300, loss.full = 0.1852, nmi = 0.49
Epoch 325, loss.full = 0.1899, nmi = 0.52
Epoch 350, loss.full = 0.1885, nmi = 0.49
Epoch 375, loss.full = 0.1893, nmi = 0.48
Epoch 400, loss.full = 0.1842, nmi = 0.50
Epoch 425, loss.full = 0.1890, nmi = 0.49
Epoch 450, loss.full = 0.1873, nmi = 0.49
Epoch 475, loss.full = 0.1879, nmi = 0.49
Epoch 500, loss.full = 0.1828, nmi = 0.50
###Markdown
Depending on whether you use balanced loss or not, you should (probably) use different threshold values. From my experience, following are reasonable defaults: - for `balance_loss = True`: `thresh = 0.5` - for `balance_loss = False`: `thresh = 0.01` You can look at the distribution of the non-zero entries of `Z` to decide on a good value for the threshold. I guess it makes sense to select a value that lies in the leftmost "valley" of histogram below. You can also look at the unsupervised metrics in the next section of this notebook to make an informed choice.Note that all of these are just speculations based on the behavior that I observed for a handful of datasets, YMMV.
###Code
plt.hist(Z[Z > 0].cpu().detach().numpy(), 100);
thresh = 0.5
Z = F.relu(gnn(x_norm, adj_norm))
Z_pred = Z.cpu().detach().numpy() > thresh
model_saver.restore()
print(f'Final nmi = {get_nmi(thresh):.3f}')
###Output
Final nmi = 0.498
###Markdown
Analyzing the results Visualize the adjacency matrix sorted by the communities
###Code
plt.figure(figsize=[10, 10])
z = np.argmax(Z_pred, 1)
o = np.argsort(z)
nocd.utils.plot_sparse_clustered_adjacency(A, K, z, o, markersize=0.05)
# Sizes of detected communities
print(Z_pred.sum(0))
###Output
[1047 2023 1618 520 1127 1703 2561 1884 2056 1959 883 1750 1546 0
970 1853 2071 1591]
###Markdown
Quantify quality of the communities based on unsupervised metrics. Metrics:* **Coverage**: what percentage of the edges is explained by at least one community? (i.e. if $(u, v)$ is an edge, both nodes share at least one community) Higher is better.$$\textrm{Coverage}(C_1, ..., C_K) = \frac{1}{|E|}\sum_{u, v \in E} \mathbb{1}[z_u^T z_v > 0]$$* **Density**: average density of the detected communities (weighted by community size). Higher is better.$$\rho(C) = \frac{\text{ existing edges in $C$}}{\text{ of possible edges in $C$}}$$$$\textrm{AvgDensity}(C_1, ..., C_K) = \frac{1}{\sum_i |C_i|}\sum_i \rho(C_i) \cdot |C_i|$$* **Conductance**: average conductance of the detected communities (weighted by community size). Lower is better.$$\textrm{outside}(C) = \sum_{u \in C, v \notin C} A_{uv}$$$$\textrm{inside}(C) = \sum_{u \in C, v \in C, v \ne u} A_{uv}$$$$\textrm{Conductance}(C) = \frac{\textrm{outside}(C)}{\textrm{inside}(C) + \textrm{outside}(C)}$$$$\textrm{AvgConductance}(C_1, ..., C_K) = \frac{1}{\sum_i |C_i|}\sum_i \textrm{Conductance}(C_i) \cdot |C_i|$$* **Clustering coefficient**: average clustering coefficient of the detected communities (weighted by community size). Higher is better.$$\textrm{ClustCoef}(C) = \frac{\text{ existing triangles in $C$}}{\text{ of possible triangles in $C$}}$$$$\textrm{AvgClustCoef}(C_1, ..., C_K) = \frac{1}{\sum_i |C_i|}\sum_i \textrm{ClustCoef}(C_i) \cdot |C_i|$$ Clustering coefficient & density of the entire graph
###Code
density_baseline = A.nnz / (N**2 - N)
num_triangles = (A @ A @ A).diagonal().sum() / 6
num_possible_triangles = (N - 2) * (N - 1) * N / 6
clust_coef_baseline = num_triangles / num_possible_triangles
print(f'Background (over the entire graph):\n'
f' - density = {density_baseline:.3e}\n'
f' - clust_coef = {clust_coef_baseline:.3e}')
metrics = nocd.metrics.evaluate_unsupervised(Z_gt, A)
print(f"Ground truth communities:\n"
f" - coverage = {metrics['coverage']:.4f}\n"
f" - conductance = {metrics['conductance']:.4f}\n"
f" - density = {metrics['density']:.3e}\n"
f" - clust_coef = {metrics['clustering_coef']:.3e}")
metrics = nocd.metrics.evaluate_unsupervised(Z_pred, A)
print(f"Predicted communities:\n"
f" - coverage = {metrics['coverage']:.4f}\n"
f" - conductance = {metrics['conductance']:.4f}\n"
f" - density = {metrics['density']:.3e}\n"
f" - clust_coef = {metrics['clustering_coef']:.3e}")
###Output
Predicted communities:
- coverage = 0.9297
- conductance = 0.2347
- density = 4.861e-03
- clust_coef = 1.164e-05
|
assignments/0307--CUDA_Memory_pre-class-assignment.ipynb | ###Markdown
[Link to this document's Jupyter Notebook](./0307--CUDA_Memory_pre-class-assignment.ipynb) In order to successfully complete this assignment you must do the required reading, watch the provided videos and complete all instructions. The embedded survey form must be entirely filled out and submitted on or before **11:59pm on Sunday March 7**. Students must come to class the next day prepared to discuss the material covered in this assignment. --- Pre-Class Assignment: The CUDA Memory Model Goals for today's pre-class assignment 1. [Quick Overview of the CUDA Memory Model](Quick-Overview-of-the-CUDA-Memory-Model)2. [Coalescing global memory accesses](Coalescing-global-memory-accesses)3. [Using shared memory](Using-shared-memory)4. [CUDA Memory Example by doing Reduction](CUDA-Memory-Example-by-doing-Reduction)5. [Assignment wrap-up](Assignment_wrap-up) --- 1. Quick Overview of the CUDA Memory ModelThe following video shows you how to schedule basic CUDA jobs on the HPCC.
###Code
from IPython.display import YouTubeVideo
YouTubeVideo("HQejUtJtBlg",width=640,height=360)
###Output
_____no_output_____
###Markdown
--- 2. Coalescing global memory accesses
###Code
from IPython.display import YouTubeVideo
YouTubeVideo("mLxZyWOI340",width=640,height=360)
###Output
_____no_output_____
###Markdown
&9989; **QUESTION:** Why does a large stride lower performance? Put your answer to the above question here. The following is a great reference on how to access global memory effectively: - https://devblogs.nvidia.com/how-access-global-memory-efficiently-cuda-c-kernels/ --- 3. Using shared memoryThe following is a great reference on using Shared memory on cuda:- https://devblogs.nvidia.com/using-shared-memory-cuda-cc/The basic syntac can be found here: Static Shared memory:```c++__global__ void staticReverse(int *d, int n){ __shared__ int s[64]; int t = threadIdx.x; int tr = n-t-1; s[t] = d[t]; __syncthreads(); d[t] = s[tr];}```Dynamic Shared Memory:```c++dynamicReverse>>(d_d, n);``` --- 4. CUDA Memory Example by doing Reduction
###Code
from IPython.display import YouTubeVideo
YouTubeVideo("RzPDlnZhxtQ",width=640,height=360)
###Output
_____no_output_____
###Markdown
&9989; **DO THIS:** I found the following code on Github which does a reduction similar to the one shown in the above video. Download the code and get it working on the HPCC.- https://github.com/likefudan/cudaReduction/blob/master/cuda_prog_1.cuYou will need to do the following* Comment out the CPU print statements (lines 341 and 342)* Remove the `````` syntax error on line 217&9989; **DO THIS:** Change the ```kernel``` function pointer on line 233 to point to the five different kernel options. Try each one and record the timing differences as compared to the cpu. Fill out the following table.- [Google Timing Table](https://docs.google.com/spreadsheets/d/1WBf9TxULtwIv5D59tk5M-VRxBIHXCby79Q4cgEF7H7o/editgid=61281276) ---- 5. Assignment wrap-upPlease fill out the form that appears when you run the code below. **You must completely fill this out in order to receive credits for the assignment!**[Direct Link to Google Form](https://cmse.msu.edu/cmse401-pc-survey)If you have trouble with the embedded form, please make sure you log on with your MSU google account at [googleapps.msu.edu](https://googleapps.msu.edu) and then click on the direct link above. &9989; **Assignment-Specific QUESTION:** Were you able to get the CUDA reduction example working on the HPCC? If not, where did you get stuck? Put your answer to the above question here &9989; **QUESTION:** Summarize what you did in this assignment. Put your answer to the above question here &9989; **QUESTION:** What questions do you have, if any, about any of the topics discussed in this assignment after working through the jupyter notebook? Put your answer to the above question here &9989; **QUESTION:** How well do you feel this assignment helped you to achieve a better understanding of the above mentioned topic(s)? Put your answer to the above question here &9989; **QUESTION:** What was the **most** challenging part of this assignment for you? Put your answer to the above question here &9989; **QUESTION:** What was the **least** challenging part of this assignment for you? Put your answer to the above question here &9989; **QUESTION:** What kind of additional questions or support, if any, do you feel you need to have a better understanding of the content in this assignment? Put your answer to the above question here &9989; **QUESTION:** Do you have any further questions or comments about this material, or anything else that's going on in class? Put your answer to the above question here &9989; **QUESTION:** Approximately how long did this pre-class assignment take? Put your answer to the above question here
###Code
from IPython.display import HTML
HTML(
"""
<iframe
src="https://cmse.msu.edu/cmse401-pc-survey"
width="100%"
height="500px"
frameborder="0"
marginheight="0"
marginwidth="0">
Loading...
</iframe>
"""
)
###Output
_____no_output_____ |
Handson_intro to python/PY0101EN-1-1-Types.ipynb | ###Markdown
Python - Writing Your First Python Code! Welcome! This notebook will teach you the basics of the Python programming language. Although the information presented here is quite basic, it is an important foundation that will help you read and write Python code. By the end of this notebook, you'll know the basics of Python, including how to write basic commands, understand some basic types, and how to perform simple operations on them. [Tip]: To execute the Python code in the code cell below, click on the cell to select it and press Shift + Enter.
###Code
# Try your first Python output
print('Hello, Python!')
###Output
Hello, Python!
###Markdown
After executing the cell above, you should see that Python prints Hello, Python!. Congratulations on running your first Python code! [Tip:] print() is a function. You passed the string 'Hello, Python!' as an argument to instruct Python on what to print.
###Code
# Check the Python Version
import sys
print(sys.version)
###Output
3.7.3 (default, Mar 27 2019, 22:11:17)
[GCC 7.3.0]
###Markdown
Writing comments in Python
###Code
# Practice on writing comments
print('Hello, Python!') # This line prints a string
# print('Hi')
###Output
_____no_output_____
###Markdown
Errors in Python
###Code
# Print string as error message
frint("Hello, Python!")
# Try to see build in error message
print("Hello, Python!)
###Output
_____no_output_____
###Markdown
Try to run the code in the cell below and see what happens:
###Code
# Print string and error to see the running order
print("This will be printed")
frint("This will cause an error")
print("This will NOT be printed")
###Output
_____no_output_____
###Markdown
Types of objects in Python Python is an object-oriented language. There are many different types of objects in Python. Let's start with the most common object types: strings, integers and floats. Anytime you write words (text) in Python, you're using character strings (strings for short). The most common numbers, on the other hand, are integers (e.g. -1, 0, 100) and floats, which represent real numbers (e.g. 3.14, -42.0). The following code cells contain some examples.
###Code
# Integer
11
# Float
2.14
# String
"Hello, Python 101!"
###Output
_____no_output_____
###Markdown
You can get Python to tell you the type of an expression by using the built-in type() function. You'll notice that Python refers to integers as int, floats as float, and character strings as str.
###Code
# Type of 12
type(12)
# Type of 2.14
type(2.14)
# Type of "Hello, Python 101!"
type("Hello, Python 101!")
###Output
_____no_output_____
###Markdown
Integers Here are some examples of integers. Integers can be negative or positive numbers: We can verify this is the case by using, you guessed it, the type() function:
###Code
# Print the type of -1
type(-1)
# Print the type of 4
type(4)
###Output
_____no_output_____
###Markdown
Floats
###Code
# Print the type of 1.0
type(1.0) # Notice that 1 is an int, and 1.0 is a float
# Print the type of 0.5
type(0.5)
# Print the type of 0.56
type(0.56)
###Output
_____no_output_____
###Markdown
Converting from one object type to a different object type You can change the type of the object in Python; this is called typecasting. For example, you can convert an integer into a float (e.g. 2 to 2.0).Let's try it:
###Code
# Verify that this is an integer
type(2)
###Output
_____no_output_____
###Markdown
Converting integers to floatsLet's cast integer 2 to float:
###Code
# Convert 2 to a float
float(2)
# Convert integer 2 to a float and check its type
type(float(2))
###Output
_____no_output_____
###Markdown
When we convert an integer into a float, we don't really change the value (i.e., the significand) of the number. However, if we cast a float into an integer, we could potentially lose some information. For example, if we cast the float 1.1 to integer we will get 1 and lose the decimal information (i.e., 0.1):
###Code
# Casting 1.1 to integer will result in loss of information
int(1.1)
###Output
_____no_output_____
###Markdown
Converting from strings to integers or floats Sometimes, we can have a string that contains a number within it. If this is the case, we can cast that string that represents a number into an integer using int():
###Code
# Convert a string into an integer
int('1')
###Output
_____no_output_____
###Markdown
But if you try to do so with a string that is not a perfect match for a number, you'll get an error. Try the following:
###Code
# Convert a string into an integer with error
int('1 or 2 people')
###Output
_____no_output_____
###Markdown
You can also convert strings containing floating point numbers into float objects:
###Code
# Convert the string "1.2" into a float
float('1.2')
###Output
_____no_output_____
###Markdown
[Tip:] Note that strings can be represented with single quotes ('1.2') or double quotes ("1.2"), but you can't mix both (e.g., "1.2'). Converting numbers to strings If we can convert strings to numbers, it is only natural to assume that we can convert numbers to strings, right?
###Code
# Convert an integer to a string
str(1)
###Output
_____no_output_____
###Markdown
And there is no reason why we shouldn't be able to make floats into strings as well:
###Code
# Convert a float to a string
str(1.2)
###Output
_____no_output_____
###Markdown
Boolean data type Boolean is another important type in Python. An object of type Boolean can take on one of two values: True or False:
###Code
# Value true
True
###Output
_____no_output_____
###Markdown
Notice that the value True has an uppercase "T". The same is true for False (i.e. you must use the uppercase "F").
###Code
# Value false
False
###Output
_____no_output_____
###Markdown
When you ask Python to display the type of a boolean object it will show bool which stands for boolean:
###Code
# Type of True
type(True)
# Type of False
type(False)
###Output
_____no_output_____
###Markdown
We can cast boolean objects to other data types. If we cast a boolean with a value of True to an integer or float we will get a one. If we cast a boolean with a value of False to an integer or float we will get a zero. Similarly, if we cast a 1 to a Boolean, you get a True. And if we cast a 0 to a Boolean we will get a False. Let's give it a try:
###Code
# Convert True to int
int(True)
# Convert 1 to boolean
bool(1)
# Convert 0 to boolean
bool(0)
# Convert True to float
float(True)
###Output
_____no_output_____
###Markdown
Exercise: Types What is the data type of the result of: 6 / 2?
###Code
# Write your code below. Don't forget to press Shift+Enter to execute the cell
###Output
_____no_output_____
###Markdown
What is the type of the result of: 6 // 2? (Note the double slash //.)
###Code
# Write your code below. Don't forget to press Shift+Enter to execute the cell
###Output
_____no_output_____
###Markdown
Expression and Variables Expressions Expressions in Python can include operations among compatible types (e.g., integers and floats). For example, basic arithmetic operations like adding multiple numbers:
###Code
# Addition operation expression
43 + 60 + 16 + 41
###Output
_____no_output_____
###Markdown
We can perform subtraction operations using the minus operator. In this case the result is a negative number:
###Code
# Subtraction operation expression
50 - 60
###Output
_____no_output_____
###Markdown
We can do multiplication using an asterisk:
###Code
# Multiplication operation expression
5 * 5
###Output
_____no_output_____
###Markdown
We can also perform division with the forward slash:
###Code
# Division operation expression
25 / 5
# Division operation expression
25 / 6
###Output
_____no_output_____
###Markdown
As seen in the quiz above, we can use the double slash for integer division, where the result is rounded to the nearest integer:
###Code
# Integer division operation expression
25 // 5
# Integer division operation expression
25 // 6
# Mathematical expression
30 + 2 * 60
# Mathematical expression
(30 + 2) * 60
###Output
_____no_output_____
###Markdown
Variables Just like with most programming languages, we can store values in variables, so we can use them later on. For example:
###Code
# Store value into variable
x = 43 + 60 + 16 + 41
###Output
_____no_output_____
###Markdown
To see the value of x in a Notebook, we can simply place it on the last line of a cell:
###Code
# Print out the value in variable
x
###Output
_____no_output_____
###Markdown
We can also perform operations on x and save the result to a new variable:
###Code
# Use another variable to store the result of the operation between variable and value
y = x / 60
y
###Output
_____no_output_____
###Markdown
If we save a value to an existing variable, the new value will overwrite the previous value:
###Code
# Overwrite variable with new value
x = x / 60
x
###Output
_____no_output_____
###Markdown
It's a good practice to use meaningful variable names, so you and others can read the code and understand it more easily:
###Code
# Name the variables meaningfully
total_min = 43 + 42 + 57 # Total length of albums in minutes
total_min
# Name the variables meaningfully
total_hours = total_min / 60 # Total length of albums in hours
total_hours
# Complicate expression
total_hours = (43 + 42 + 57) / 60 # Total hours in a single expression
total_hours
###Output
_____no_output_____ |
ipynb/Spindle_detection.ipynb | ###Markdown
**Spindle detection**
###Code
import sys
sys.path.insert(0, 'D:/Beths/')
import os
import re
import math
import pandas as pd
import numpy as np
import yasa
import mne
from mne.filter import filter_data, resample
import matplotlib.pyplot as plt
from scipy import signal
## Import from my files
from data_lfp import mne_lfp_Axona, load_lfp_Axona
from data_pos import RecPos
df = pd.read_csv('data_scheme.csv')
sleep_files = df.loc[df.sleep == 1, ['folder', 'filename']].agg('/'.join, axis=1).values
###Output
_____no_output_____
###Markdown
Auxiliary functions
###Code
def mark_moving(file, tresh=2.5): # 1 if speed > tresh
pos = RecPos(file)
speed = pos.get_speed()
moving = np.zeros(len(speed)*5)
for i in range(0, len(speed)):
if speed[i] > tresh:
moving[5*i:5*i+5] = 1
return moving
def create_events(record, events):
'''Create events on MNE object
Inputs:
record(mne_object): recording to add events
events_time(2D np array): array 0,1 with same lenght of recording dimension (1, lengt(record))
output:
record(mne_object): Record with events added
'''
try:
assert len(record.times) == events.shape[1]
stim_data = events
info = mne.create_info(['STI'], record1.info['sfreq'], ['stim'])
stim_raw = mne.io.RawArray(stim_data, info)
record.add_channels([stim_raw], force_update_info=True)
except AssertionError as error:
print(error)
print('The lenght of events needs to be equal to record lenght.')
return record
def plot_treshold(file):
pos = RecPos(file)
x, y = pos.get_position()
resting = 1-mark_moving(file, tresh=1.) # 1-moving
print(f'Proportion of resting time: {(100 * sum(resting))/len(resting):.1f}%')
fig, (ax, ax1) = plt.subplots(1, 2, figsize=(6,3))
ax.plot(x,y, c= 'black', alpha = .7) # plot background
mx = np.asarray([n*m for n, m in zip(x, resting)])
my = np.asarray([n*m for n, m in zip(y, resting)])
mx[mx==0] = np.nan
my[my==0] = np.nan
ax.scatter(mx,my, c='r', marker='.',alpha = .5)
speed = pos.get_speed()
ax1.hist(speed, bins=np.linspace(0,20))
fig.tight_layout()
return plt.show()
## Selecting files that the animal was not moving for > 20% of the recording
true_sleep = []
for file in sleep_files:
pos = RecPos(file)
x, y = pos.get_position()
resting = 1-mark_moving(file, tresh=1.) # 1-moving
if (100 * (sum(resting))/len(resting)) > 25:
true_sleep.append(file.strip())
true_sleeps = [r.strip().split('/')[-1] for r in true_sleep]
sleep_files = df[df.filename.isin(true_sleeps)]
###Output
_____no_output_____
###Markdown
**Recordings which animals spent at least 25% of the time resting (speed < 1 cm/s)**
###Code
print(f'Number of animals with sleep recordings: {len(df[df.sleep == 1].rat.unique())}')
print(f'Number of animals with > 20% resting: {len(sleep_files.rat.unique())}')
ax = sleep_files['rat'].value_counts().plot(kind='bar',
figsize=(9,5),
title="Number of sleep recordings per animal.")
ax.set_xlabel("Rat ID")
ax.set_ylabel("Frequency of recordings")
plt.show()
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=0.5,
proj=True, picks=picks, baseline=(None, 0),
preload=True, reject=reject)
###Output
_____no_output_____ |
discriminant_analysis_pw4.ipynb | ###Markdown
Discriminant AnalysisPW4Sci-Kit Learn IntroductionIn this practical work, we will compare several classification models to see the difference of performance between them. Table of Contents1. Installing Dependencies2. Data Loading, Preprocessing and Visualisization3. Logistic Regression, Prediction and Evaluation4. Linear Discriminant Anlaysis5. Quadratic Discriminant Analysis 1.Installig dependencies
###Code
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import numpy as np
%matplotlib inline
###Output
_____no_output_____
###Markdown
2.Data Loading, Preprocessing and Visualization
###Code
!git clone https://github.com/shivang98/Social-Network-ads-Boost
%%shell
cd Social-Network-ads-Boost
ls
###Output
random_forest_classification.py README.md Social_Network_Ads.csv
###Markdown
Dataset extraction and Preprocessing
###Code
dataset= pd.read_csv("Social-Network-ads-Boost/Social_Network_Ads.csv")
dataset.head()
X = np.asarray(dataset[[ 'Age', 'EstimatedSalary']])
y=np.asarray(dataset["Purchased"])
from sklearn import preprocessing
X = preprocessing.StandardScaler().fit(X).transform(X)
###Output
_____no_output_____
###Markdown
Dataset Visualization
###Code
from statsmodels.graphics.mosaicplot import mosaic
data = pd.DataFrame({'Purchased': np.asarray(dataset[ 'Purchased']), 'Gender':np.asarray(dataset[ 'Gender'])})
mosaic(data,["Gender","Purchased"])
plt.show()
data["Gender"]=pd.get_dummies(data["Gender"])
from scipy.stats import chisquare
chisquare(data[["Purchased","Gender"]])
###Output
_____no_output_____
###Markdown
Data Split
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=4)
print ('Train set:', X_train.shape, y_train.shape)
print ('Test set:', X_test.shape, y_test.shape)
###Output
Train set: (300, 2) (300,)
Test set: (100, 2) (100,)
###Markdown
3.Logistic Regression, Prediction and Evaluation
###Code
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
LR = LogisticRegression(C=0.01, solver='liblinear').fit(X_train,y_train)
print(LR)
yhat = LR.predict(X_test)
print(yhat)
yhat_prob = LR.predict_proba(X_test)
print(yhat_prob)
from sklearn.metrics import classification_report, confusion_matrix
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
print(confusion_matrix(y_test, yhat, labels=[1,0]))
cnf_matrix = confusion_matrix(y_test, yhat, labels=[1,0])
np.set_printoptions(precision=2)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Purchased=1','Purchased=0'],normalize= False, title='Confusion matrix')
print (classification_report(y_test, yhat))
from sklearn.metrics import roc_curve, auc
roc_curve(y_test, yhat)
probs = yhat_prob
preds = probs[:,1]
fpr, tpr, threshold = roc_curve(y_test, preds)
roc_auc = auc(fpr, tpr)
import matplotlib.pyplot as plt
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
###Output
precision recall f1-score support
0 0.89 0.93 0.91 70
1 0.81 0.73 0.77 30
accuracy 0.87 100
macro avg 0.85 0.83 0.84 100
weighted avg 0.87 0.87 0.87 100
###Markdown
4.Linear Discriminant Analysis
###Code
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
LDA=LinearDiscriminantAnalysis(solver='svd').fit(X_train,y_train)
print(LDA)
print(LDA.coef_)
print(LDA.means_)
print(LDA.priors_)
yhat2=LDA.predict(X_test)
print(yhat2)
yhat_prob2 = LDA.predict_proba(X_test)
print(yhat_prob2)
print("Confusion Matrix LR\n",confusion_matrix(y_test, yhat, labels=[1,0]))
print("Confusion Matrix LDA\n",confusion_matrix(y_test, yhat2, labels=[1,0]))
print ("Performance LR\n",classification_report(y_test, yhat))
print ("Performance LDA\n",classification_report(y_test, yhat2))
###Output
Performance LR
precision recall f1-score support
0 0.89 0.93 0.91 70
1 0.81 0.73 0.77 30
accuracy 0.87 100
macro avg 0.85 0.83 0.84 100
weighted avg 0.87 0.87 0.87 100
Performance LDA
precision recall f1-score support
0 0.90 0.93 0.92 70
1 0.82 0.77 0.79 30
accuracy 0.88 100
macro avg 0.86 0.85 0.85 100
weighted avg 0.88 0.88 0.88 100
###Markdown
Almost equal between logistic regression and linear discriminant analysis.
###Code
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
aranged_pc1 = np.arange(start = X_set[:, 0].min(), stop = X_set[:, 0].max(), step = 0.01)
aranged_pc2 = np.arange(start = X_set[:, 1].min(), stop = X_set[:, 1].max(), step = 0.01)
X1, X2 = np.meshgrid(aranged_pc1, aranged_pc2)
plt.contourf(X1, X2, LDA.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.5, cmap = ListedColormap(('orange', 'blue', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green','blue'))(i), label = j)
plt.title('Decision Boundary LDA')
plt.xlabel('Age')
plt.ylabel('EstimatedSalary')
plt.legend()
plt.show()
###Output
'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.
'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.
###Markdown
5.Quadratic Discriminant Analysis
###Code
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
QDA=QuadraticDiscriminantAnalysis().fit(X_train,y_train)
print(QDA)
print("QDA means\n",QDA.means_)
print("QDA priors\n",QDA.priors_)
yhat3=QDA.predict(X_test)
print(yhat3)
yhat_prob3 = QDA.predict_proba(X_test)
print(yhat_prob3)
print("Confusion Matrix QDA\n",confusion_matrix(y_test, yhat3, labels=[1,0]))
print ("Performance QDA\n",classification_report(y_test, yhat3))
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
aranged_pc1 = np.arange(start = X_set[:, 0].min(), stop = X_set[:, 0].max(), step = 0.01)
aranged_pc2 = np.arange(start = X_set[:, 1].min(), stop = X_set[:, 1].max(), step = 0.01)
X1, X2 = np.meshgrid(aranged_pc1, aranged_pc2)
plt.contourf(X1, X2, QDA.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.5, cmap = ListedColormap(('orange', 'blue', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green','blue'))(i), label = j)
plt.title('Decision Boundary QDA')
plt.xlabel('Age')
plt.ylabel('EstimatedSalary')
plt.legend()
plt.show()
probs1 = yhat_prob
probs2= yhat_prob2
probs3= yhat_prob3
preds1 = probs1[:,1]
preds2 = probs2[:,1]
preds3 = probs3[:,1]
fpr1, tpr1, threshold = roc_curve(y_test, preds1)
roc_auc1 = auc(fpr1, tpr1)
fpr2, tpr2, threshold = roc_curve(y_test, preds2)
roc_auc2 = auc(fpr2, tpr2)
fpr3, tpr3, threshold = roc_curve(y_test, preds3)
roc_auc3 = auc(fpr3, tpr3)
# method I: plt
import matplotlib.pyplot as plt
plt.title('Receiver Operating Characteristic')
plt.plot(fpr1, tpr1, 'b', label = 'AUC LR = %0.2f' % roc_auc1)
plt.plot(fpr2, tpr2, 'r', label = 'AUC LDA = %0.2f' % roc_auc2)
plt.plot(fpr3, tpr3, 'g', label = 'AUC QDA = %0.2f' % roc_auc3)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
###Output
_____no_output_____
###Markdown
The Logistic Regression and Linear Discriminant Analysis models have a similar AUC and and a similar ROC Curve.The Quadratic Model has a higher AUC and the Roc Curve is higher. It has a better accuracy.The 3 models are relevant because their AUC is >0.8.
###Code
###Output
_____no_output_____ |
Hansard_to_CSV2.ipynb | ###Markdown
something will have to go here, to:1. itterate through all XMLs! but make sure is not re-writing each one2. elif *poem*(para) print: else=False
###Code
df.to_csv("output.csv")
###Output
_____no_output_____ |
Classification/SMT202_YouthSurvey_SentimentAnalysis.ipynb | ###Markdown
VADER - Valence Aware Dictionary and Sentiment Reasonerlexicon and rule-based sentiment analysis tool that is specifically attuned to sentiments expressed in social media
###Code
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
analyser = SentimentIntensityAnalyzer()
###Output
_____no_output_____
###Markdown
SENTIMENT ANALYSIS
###Code
#function for sentiment analysis
def intensity_sentiment(data):
intensity = {"sentence":[],"positive (%)":[],"neutral (%)":[],"negative (%)":[],"compound":[]}
clean_data=[]
x= train_data[str(data)]
for i in x:
if len(i)>4:
clean_data.append(i)
print(len(x))
print(len(clean_data))
for sentence in clean_data:
score = analyser.polarity_scores(sentence)
intensity['sentence'].append(sentence)
intensity['positive (%)'].append(round(score['pos']*100))
intensity['neutral (%)'].append(round(score['neu']*100))
intensity['negative (%)'].append(round(score['neg']*100))
intensity['compound'].append(score['compound'])
result = pd.DataFrame(intensity)
return result
###Output
_____no_output_____
###Markdown
sentiment analysis for intergenerational hub
###Code
x = intensity_sentiment('further_thoughts')
x.head()
mean_sentiment_1 = x.mean(axis=0)
mean_sentiment_1
###Output
_____no_output_____
###Markdown
sentiment analysis for branding opp for SMU
###Code
x =intensity_sentiment('branding_smu')
x.head()
mean_sentiment_2 = x.mean(axis=0)
mean_sentiment_2
###Output
_____no_output_____
###Markdown
sentiment analysis for disruptions to student life
###Code
x =intensity_sentiment('disrupt_reason')
x.head()
mean_sentiment_3= x.mean(axis=0)
mean_sentiment_3
compiled_sentiment ={'positive (%)':[],'neutral (%)':[],'negative (%)':[],'compound':[]}
x =['positive (%)','neutral (%)','negative (%)','compound']
for i in range(0,len(x)-1):
compiled_sentiment[x[i]].append(mean_sentiment_1[i])
compiled_sentiment[x[i]].append(mean_sentiment_2[i])
compiled_sentiment[x[i]].append(mean_sentiment_3[i])
compiled_sentiment['compound'].append(mean_sentiment_1['compound'])
compiled_sentiment['compound'].append(mean_sentiment_2['compound'])
compiled_sentiment['compound'].append(mean_sentiment_3['compound'])
compiled_sentiment_df = pd.DataFrame(compiled_sentiment)
compiled_sentiment_df.rename(index={0:'Thoughts on Intergenerational Hub',1:'Thoughts on Intergenerational Hub',2:'Potential Disruptions to Student Life'}, inplace=True)
compiled_sentiment_df
compiled_sentiment_df.plot.bar(stacked=True,rot=25, title="Sentiment Analysis for Three Questions");
###Output
_____no_output_____
###Markdown
WORD CLOUD
###Code
from wordcloud import WordCloud, STOPWORDS
stopwords = set(STOPWORDS)
def wordcloud_sentiment(data):
clean_data=[]
clean_data_str=''
x= train_data[str(data)]
for i in x:
if len(i)>4:
clean_data.append(i.lower())
clean_data_str = ' '.join(sentence for sentence in clean_data)
#print(clean_data_str)
wordcloud = WordCloud(width = 800, height = 800,
background_color ='white',
stopwords = stopwords,
min_font_size = 15).generate(clean_data_str)
# plot the WordCloud image
plt.figure(figsize = (7, 7), facecolor = None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad = 0)
plt.show()
return
wordcloud_sentiment('further_thoughts')
wordcloud_sentiment('branding_smu')
wordcloud_sentiment('disrupt_reason')
###Output
_____no_output_____ |
PyTorch-Early-Access/NoteBooks/FL/Provisioning.ipynb | ###Markdown
Provisioning Federated learning (FL) Tool FL has been simplified in V3.1 to have a provisioning tool that allows admins to:- Configure FL experiment- Send startup packages to FL clients (password protected zip file)By the end of this notebook you would be able to provision an FL experiment and start the server. Prerequisites- Running this notebook from within clara docker following setup in [readMe.md](../../readMe.md)- Provisioning doesn't require GPUs. ResourcesYou could watch the free GTC 2020 talks covering Clara Train SDK - [Clara Developer Day: Federated Learning using Clara Train SDK](https://developer.nvidia.com/gtc/2020/video/S22564) DataSet No dataset is used in this notebook Lets get startedCell below defines functions that we will use throughout the notebook
###Code
def listDirs(newMMARDir):
!ls $newMMARDir
!echo ----config
!ls $newMMARDir/config
!echo ----commands
!ls $newMMARDir/commands
def printFile(filePath,lnSt,lnOffset):
print ("showing ",str(lnOffset)," lines from file ",filePath, "starting at line",str(lnSt))
lnOffset=lnSt+lnOffset
!< $filePath head -n "$lnOffset" | tail -n +"$lnSt"
###Output
_____no_output_____
###Markdown
Provisioning ComponentsThe provisioning tool is the first step to configure a FL experiment. This consists of creating: 1. Project yaml file, which defines: project name, participants, server name and other settings2. Authorization json file which defines: groups, roles, rights. 3. Run provisioning tool 1. UI tool to generate project.yaml and authorization.jsonWe have developed a simple html page that would generate the project.yaml and authorization.json files for you.Simply open the html or run cell below to see the page. You would need to:- Change the servername.- Add/remove groups.- Add/remove polices- Add/remove users - Click `Generate artifacts`- Click download or copy / past the files as new yaml and json files
###Code
import IPython
IPython.display.IFrame('./FLprovUI.html',width=850,height=700)
###Output
_____no_output_____
###Markdown
2 Run Provisioning tool For simplicity we have included a project1.yaml and project1auth.json files for you to use in this notebook.In order to see their content simply run cell below
###Code
MMAR_ROOT="/claraDevDay/FL/"
PROV_DIR="provisioning"
PROJ_NAME="project1"
printFile(MMAR_ROOT+PROJ_NAME+".yml",0,50)
print("---------------------")
printFile(MMAR_ROOT+PROJ_NAME+"auth.json",0,200)
###Output
_____no_output_____
###Markdown
2.1 Run provisioning toolCell below show help on how to use the cli for the provisioning tool
###Code
!provision -h
%cd $MMAR_ROOT
!rm -r $PROJ_NAME
%mkdir -p $PROJ_NAME/$PROV_DIR
# !ln -s /opt/nvidia/medical/tools/project.yml $MMAR_ROOT/project.yml
PROJ_PATH=MMAR_ROOT+PROJ_NAME+"/"
PROV_PATH=PROJ_PATH+PROV_DIR+"/"
%cd $PROJ_PATH
!provision -p $MMAR_ROOT/$PROJ_NAME'.yml' -o $PROV_DIR -t $PROV_PATH/audit.pkl -a $MMAR_ROOT/$PROJ_NAME'auth.json'
###Output
_____no_output_____
###Markdown
3. Send startup kits to participantsIn a real experiment, you would send packages to each site so they would run it on their system. Here we would extract and simulate a server, 3 clients and an admin all in this tutorial. Cell above should have printed out passwords for each package. You should replace the password from above cell to the corresponding file in cell below
###Code
%cd $PROV_PATH
server,client1,client2,client3,client4="server","client1","client2","client3","client4"
admin,leadIT,siteResearch,leadITSec="admin","leadIT","siteResearch","leadITSec"
!unzip -oP Gt70p3kYKoIVfM48 server.zip -d ../$server
!unzip -oP E9HCjgF6VBMoALrU client1.zip -d ../$client1
!unzip -oP mXoq4RdhItNuDvPe client2.zip -d ../$client2
!unzip -oP E9HCjgF6VBMoALrU client3.zip -d ../$client3
!unzip -oP E9HCjgF6VBMoALrU client4.zip -d ../$client4
!unzip -oP ecpUmT10J0WDhsKu [email protected] -d ../$admin
!unzip -oP ecpUmT10J0WDhsKu [email protected] -d ../$leadIT
!unzip -oP ecpUmT10J0WDhsKu [email protected] -d ../$siteResearch
!unzip -oP ecpUmT10J0WDhsKu [email protected] -d ../$leadITSec
###Output
_____no_output_____ |
regressao_svr_planodesaude.ipynb | ###Markdown
Montagem de dataframe
###Code
import pandas as pd
base = pd.read_csv('plano_saude2.csv')
x = base.iloc[:, 0:1].values
y = base.iloc[:, 1:2].values
###Output
_____no_output_____
###Markdown
Prรฉ-processamento dos dados
###Code
from sklearn.preprocessing import StandardScaler
scaler_x = StandardScaler()
x = scaler_x.fit_transform(x)
scaler_y = StandardScaler()
y = scaler_y.fit_transform(y)
###Output
_____no_output_____
###Markdown
Regressรฃo kernel linear
###Code
from sklearn.svm import SVR
regressor_linear = SVR(kernel='linear')
regressor_linear.fit(x, y)
regressor_linear.score(x, y)
import matplotlib.pyplot as plt
import numpy as np
plt.scatter(x, y)
plt.plot(x, regressor_linear.predict(x), color='red')
plt.title('Regressรฃo random forest')
plt.xlabel('Idade')
plt.ylabel('Custo')
###Output
_____no_output_____
###Markdown
Regressรฃo kernel polinomial
###Code
regressor_poly = SVR(kernel='poly', degree=3)
regressor_poly.fit(x, y)
regressor_poly.score(x, y)
plt.scatter(x, y)
plt.plot(x, regressor_poly.predict(x), color='red')
plt.title('Regressรฃo random forest')
plt.xlabel('Idade')
plt.ylabel('Custo')
###Output
_____no_output_____
###Markdown
Regressรฃo kernel RBF
###Code
regressor_rbf = SVR(kernel='rbf')
regressor_rbf.fit(x, y)
regressor_rbf.score(x, y)
plt.scatter(x, y)
plt.plot(x, regressor_rbf.predict(y), color='red')
plt.title('Regressรฃo random forest')
plt.xlabel('Idade')
plt.ylabel('Custo')
previsao_1 = scaler_y.inverse_transform(regressor_linear.predict(scaler_x.transform(np.array(40).reshape(1, -1))))
previsao_1
previsao_2 = scaler_y.inverse_transform(regressor_poly.predict(scaler_x.transform(np.array(40).reshape(1, -1))))
previsao_2
previsao_3 = scaler_y.inverse_transform(regressor_rbf.predict(scaler_x.transform(np.array(40).reshape(1, -1))))
previsao_3
###Output
_____no_output_____ |
site/tr/r1/tutorials/distribute/training_loops.ipynb | ###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Egitim donguleri ile tf.distribute.Strategy Run in Google Colab View source on GitHub Note: Bu dรถkรผmanlar TensorFlow gรถnรผllรผ kullanฤฑcฤฑlarฤฑ tarafฤฑndan รงevirilmiลtir.Topluluk tarafฤฑndan saฤlananan รงeviriler gรถnรผllรผlerin ellerinden geldiฤincegรผncellendiฤi iรงin [Resmi ฤฐngilizce dรถkรผmanlar](https://www.tensorflow.org/?hl=en)ile bire bir aynฤฑ olmasฤฑnฤฑ garantileyemeyiz. Eฤer bu tercรผmeleri iyileลtirmekiรงin รถnerileriniz var ise lรผtfen [tensorflow/docs](https://github.com/tensorflow/docs)havuzuna pull request gรถnderin. Gรถnรผllรผ olarak รงevirilere katkฤฑda bulunmak iรงin[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-tr)listesi ile iletiลime geรงebilirsiniz. Bu rehber egitim donguleri ile [`tf.distribute.Strategy`](https://www.tensorflow.org/r1/guide/distribute_strategy)'nin nasil kullanildigini gosteriyor. Basit bir CNN modelini Fashion MNIST veri seti ile egitecegiz. Bu veri seti icinde 28X28 boyutunda 60000 egitim resmini ve 28X28 boyutunda 10000 test resmini barindirir.Burada bize esneklik ve daha cok kontrol kazandirmasi icin ozellestirilmis egitim donguleri kullanacagiz. Ustelik, bu ozel donguler modeli ve egitim dongulerindeki hatalari ayiklamamizi da kolaylastiracaktir.
###Code
# TensorFlow'u yukleyelim
import tensorflow.compat.v1 as tf
# Yardimci kutuphaneler
import numpy as np
import os
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Fashion MNIST veri setini indirelim
###Code
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Diziye yeni bir boyut ekleyelim-> new shape == (28, 28, 1)
# Bunu yapmamizin sebebi ise modelimizin ilk katmaninin katlamali olmasi
# ve 4D bir girdiye ihtiyac duyar (batch_size, height, width, channels).
# batch_size boyutunu daha sonra ekleyecegiz.
train_images = train_images[..., None]
test_images = test_images[..., None]
# Resimleri [0, 1] araligina indirgeyelim.
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
train_labels = train_labels.astype('int64')
test_labels = test_labels.astype('int64')
###Output
_____no_output_____
###Markdown
Degiskenleri ve grafigi dagitmak icin bir taktik olusturalim `tf.distribute.MirroredStrategy` nasil calisir?* Butun degiskenler ve model grafigi birkac kere kopyalanir.* Girdi bu kopyalara esit olarak dagitilir.* Her kopya verilen girdiye gore bir kayip ve degisim tablosu hesaplar.* Butun degisim verileri toplanir ve kopyalardaki degerler bu toplama gore guncellenir.* Bu islemden sonra, ayni guncelleme degiskenlerin kopyalarina da uygulanir. Note: Butun kodu tek bir kapsam icine koyabilirsiniz, fakat biz burada daha aciklayici olmasi icin kodu boluyoruz.
###Code
# Eger kullanilacak cihazlar `tf.distribute.MirroredStrategy` yapicisinda belirtilmediyse
# otomatik olarak bulunacaktir.
strategy = tf.distribute.MirroredStrategy()
print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
###Output
_____no_output_____
###Markdown
Girdi hattinin kurulmasi Eger bir model birden fazla GPU'da egitiliyorsa, grup boyutu buna orantili olarak arttirilmalidir ki fazla bilgisayar gucunu verimli bir sekilde kullanabilelim. Ayrica, egitim hizi da orantili olarak ayarlanmaidir.
###Code
BUFFER_SIZE = len(train_images)
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 10
###Output
_____no_output_____
###Markdown
`strategy.make_dataset_iterator`, veriyi kopyalara esit olarak dagitan bir iterator olusturur. Note: Bu API yakin zamanda degisecektir.
###Code
with strategy.scope():
train_dataset = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_iterator = strategy.make_dataset_iterator(train_dataset)
test_dataset = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels)).batch(BATCH_SIZE)
test_iterator = strategy.make_dataset_iterator(test_dataset)
###Output
_____no_output_____
###Markdown
Modelin olusturulmasi`tf.keras.Sequential` ile modelimizi olusturalim. Model Subclassing API'yini da kullanarak bu islemi yapabiliriz.
###Code
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.train.GradientDescentOptimizer(0.001)
###Output
_____no_output_____
###Markdown
Kayip fonksiyonunu tanimlayalimNormalde, eger 1 GPU/CPU'lu bir makine kullaniyorsak, kayip girdi grubundaki ornek sayisina bolunur.*Peki `tf.distribute.Strategy` ile kayip nasil hesaplanir?*> Ornegin, 4 GPU'muz ve boyutu 64 olan girdimiz oldugunu varsayalim. Bu girdiler esit olarak 4 GPU (4 kopya) ustune bolunur, yani her kopyaya giden girdi grub boyutu 16 idir.> Her kopyadaki model icindeki girdinin ustunden gecerek kayip degerini hesaplar. Simdi, bu kayip degerini icindeki girdi sayisina (16) bolmek yerine, en bastaki evrensel girdi miktarina (64) boler. *Neden bu islem boyle yaplir?*> Cunku degisim degerleri her kopyada hesaplandiktan sonra, butun kopyalardaki degerler butun degisim degerlerinin toplamina esitlenir.*Bunu TensorFlow'da nasil yapabiliriz?*Eger ozellestirilmis bir egitim dongusu yaziyorsaniz, her ornekteki kayiplari toplayip butun orneklerin toplamina bolmelisiniz:```GLOBAL_BATCH_SIZE:`scale_loss = tf.reduce_sum(loss) * (1. / GLOBAL_BATCH_SIZE)````* `tf.reduce_mean` metodunu kullanmanizi tavsiye etmiyoruz. Bu metod kayip degerini kopyalardaki ornek sayisina boler ki bu her adimda degisebilir.* Bu indirgeme ve olcekleme keras'ta otomatok olarak yapilir: model.fit ve model.compile ile* Eger `tf.keras.losses` siniflarini kullaniyorsaniz, kayip indirgemesinin ozellikle `NONE` ya da `SUM` olarak belirtilmesi gerekmektedir. `AUTO` ve `SUM_OVER_BATCH_SIZE` ise `tf.distribute.Strategy` ile birlikte kullanilamaz. Cunku kullanicilarin `AUTO` kullanmadan once yaptiklari indirgemenin o anki dagitim ornegindeki dogrulugundan emin olmalari gerekir. `SUM_OVER_BATCH_SIZE` kullanilamaz cunku su anki haliyle sadece kopyadaki ornek sayisina bolum yapip asil toplam ornek sayisina bolme islemini kullaniciya birakir, ki bu cok kolay gozden kacabilecek bir noktadir. Onun yerine kullanicinin indirgemeyi kendilerinin yapmalarini istiyoruz. Egitim dongusu
###Code
with strategy.scope():
def train_step():
def step_fn(inputs):
images, labels = inputs
logits = model(images)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.reduce_sum(cross_entropy) * (1.0 / BATCH_SIZE)
train_op = optimizer.minimize(loss)
with tf.control_dependencies([train_op]):
return tf.identity(loss)
per_replica_losses = strategy.experimental_run(
step_fn, train_iterator)
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
with strategy.scope():
iterator_init = train_iterator.initialize()
var_init = tf.global_variables_initializer()
loss = train_step()
with tf.Session() as sess:
sess.run([var_init])
for epoch in range(EPOCHS):
sess.run([iterator_init])
for step in range(10000):
if step % 1000 == 0:
print('Epoch {} Step {} Loss {:.4f}'.format(epoch+1,
step,
sess.run(loss)))
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Egitim donguleri ile tf.distribute.Strategy Run in Google Colab View source on GitHub Note: Bu dรถkรผmanlar TensorFlow gรถnรผllรผ kullanฤฑcฤฑlarฤฑ tarafฤฑndan รงevirilmiลtir.Topluluk tarafฤฑndan saฤlananan รงeviriler gรถnรผllรผlerin ellerinden geldiฤincegรผncellendiฤi iรงin [Resmi ฤฐngilizce dรถkรผmanlar](https://www.tensorflow.org/?hl=en)ile bire bir aynฤฑ olmasฤฑnฤฑ garantileyemeyiz. Eฤer bu tercรผmeleri iyileลtirmekiรงin รถnerileriniz var ise lรผtfen [tensorflow/docs](https://github.com/tensorflow/docs)havuzuna pull request gรถnderin. Gรถnรผllรผ olarak รงevirilere katkฤฑda bulunmak iรงin[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-tr)listesi ile iletiลime geรงebilirsiniz. Bu rehber egitim donguleri ile [`tf.distribute.Strategy`](https://www.tensorflow.org/r1/guide/distribute_strategy)'nin nasil kullanildigini gosteriyor. Basit bir CNN modelini Fashion MNIST veri seti ile egitecegiz. Bu veri seti icinde 28X28 boyutunda 60000 egitim resmini ve 28X28 boyutunda 10000 test resmini barindirir.Burada bize esneklik ve daha cok kontrol kazandirmasi icin ozellestirilmis egitim donguleri kullanacagiz. Ustelik, bu ozel donguler modeli ve egitim dongulerindeki hatalari ayiklamamizi da kolaylastiracaktir.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow'u yukleyelim
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow.compat.v1 as tf
# Yardimci kutuphaneler
import numpy as np
import os
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Fashion MNIST veri setini indirelim
###Code
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Diziye yeni bir boyut ekleyelim-> new shape == (28, 28, 1)
# Bunu yapmamizin sebebi ise modelimizin ilk katmaninin katlamali olmasi
# ve 4D bir girdiye ihtiyac duyar (batch_size, height, width, channels).
# batch_size boyutunu daha sonra ekleyecegiz.
train_images = train_images[..., None]
test_images = test_images[..., None]
# Resimleri [0, 1] araligina indirgeyelim.
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
train_labels = train_labels.astype('int64')
test_labels = test_labels.astype('int64')
###Output
_____no_output_____
###Markdown
Degiskenleri ve grafigi dagitmak icin bir taktik olusturalim `tf.distribute.MirroredStrategy` nasil calisir?* Butun degiskenler ve model grafigi birkac kere kopyalanir.* Girdi bu kopyalara esit olarak dagitilir.* Her kopya verilen girdiye gore bir kayip ve degisim tablosu hesaplar.* Butun degisim verileri toplanir ve kopyalardaki degerler bu toplama gore guncellenir.* Bu islemden sonra, ayni guncelleme degiskenlerin kopyalarina da uygulanir. Note: Butun kodu tek bir kapsam icine koyabilirsiniz, fakat biz burada daha aciklayici olmasi icin kodu boluyoruz.
###Code
# Eger kullanilacak cihazlar `tf.distribute.MirroredStrategy` yapicisinda belirtilmediyse
# otomatik olarak bulunacaktir.
strategy = tf.distribute.MirroredStrategy()
print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
###Output
_____no_output_____
###Markdown
Girdi hattinin kurulmasi Eger bir model birden fazla GPU'da egitiliyorsa, grup boyutu buna orantili olarak arttirilmalidir ki fazla bilgisayar gucunu verimli bir sekilde kullanabilelim. Ayrica, egitim hizi da orantili olarak ayarlanmaidir.
###Code
BUFFER_SIZE = len(train_images)
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 10
###Output
_____no_output_____
###Markdown
`strategy.make_dataset_iterator`, veriyi kopyalara esit olarak dagitan bir iterator olusturur. Note: Bu API yakin zamanda degisecektir.
###Code
with strategy.scope():
train_dataset = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_iterator = strategy.make_dataset_iterator(train_dataset)
test_dataset = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels)).batch(BATCH_SIZE)
test_iterator = strategy.make_dataset_iterator(test_dataset)
###Output
_____no_output_____
###Markdown
Modelin olusturulmasi`tf.keras.Sequential` ile modelimizi olusturalim. Model Subclassing API'yini da kullanarak bu islemi yapabiliriz.
###Code
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.train.GradientDescentOptimizer(0.001)
###Output
_____no_output_____
###Markdown
Kayip fonksiyonunu tanimlayalimNormalde, eger 1 GPU/CPU'lu bir makine kullaniyorsak, kayip girdi grubundaki ornek sayisina bolunur.*Peki `tf.distribute.Strategy` ile kayip nasil hesaplanir?*> Ornegin, 4 GPU'muz ve boyutu 64 olan girdimiz oldugunu varsayalim. Bu girdiler esit olarak 4 GPU (4 kopya) ustune bolunur, yani her kopyaya giden girdi grub boyutu 16 idir.> Her kopyadaki model icindeki girdinin ustunden gecerek kayip degerini hesaplar. Simdi, bu kayip degerini icindeki girdi sayisina (16) bolmek yerine, en bastaki evrensel girdi miktarina (64) boler. *Neden bu islem boyle yaplir?*> Cunku degisim degerleri her kopyada hesaplandiktan sonra, butun kopyalardaki degerler butun degisim degerlerinin toplamina esitlenir.*Bunu TensorFlow'da nasil yapabiliriz?*Eger ozellestirilmis bir egitim dongusu yaziyorsaniz, her ornekteki kayiplari toplayip butun orneklerin toplamina bolmelisiniz:```GLOBAL_BATCH_SIZE:`scale_loss = tf.reduce_sum(loss) * (1. / GLOBAL_BATCH_SIZE)````* `tf.reduce_mean` metodunu kullanmanizi tavsiye etmiyoruz. Bu metod kayip degerini kopyalardaki ornek sayisina boler ki bu her adimda degisebilir.* Bu indirgeme ve olcekleme keras'ta otomatok olarak yapilir: model.fit ve model.compile ile* Eger `tf.keras.losses` siniflarini kullaniyorsaniz, kayip indirgemesinin ozellikle `NONE` ya da `SUM` olarak belirtilmesi gerekmektedir. `AUTO` ve `SUM_OVER_BATCH_SIZE` ise `tf.distribute.Strategy` ile birlikte kullanilamaz. Cunku kullanicilarin `AUTO` kullanmadan once yaptiklari indirgemenin o anki dagitim ornegindeki dogrulugundan emin olmalari gerekir. `SUM_OVER_BATCH_SIZE` kullanilamaz cunku su anki haliyle sadece kopyadaki ornek sayisina bolum yapip asil toplam ornek sayisina bolme islemini kullaniciya birakir, ki bu cok kolay gozden kacabilecek bir noktadir. Onun yerine kullanicinin indirgemeyi kendilerinin yapmalarini istiyoruz. Egitim dongusu
###Code
with strategy.scope():
def train_step():
def step_fn(inputs):
images, labels = inputs
logits = model(images)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.reduce_sum(cross_entropy) * (1.0 / BATCH_SIZE)
train_op = optimizer.minimize(loss)
with tf.control_dependencies([train_op]):
return tf.identity(loss)
per_replica_losses = strategy.experimental_run(
step_fn, train_iterator)
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
with strategy.scope():
iterator_init = train_iterator.initialize()
var_init = tf.global_variables_initializer()
loss = train_step()
with tf.Session() as sess:
sess.run([var_init])
for epoch in range(EPOCHS):
sess.run([iterator_init])
for step in range(10000):
if step % 1000 == 0:
print('Epoch {} Step {} Loss {:.4f}'.format(epoch+1,
step,
sess.run(loss)))
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Egitim donguleri ile tf.distribute.Strategy Run in Google Colab View source on GitHub Note: Bu dรถkรผmanlar TensorFlow gรถnรผllรผ kullanฤฑcฤฑlarฤฑ tarafฤฑndan รงevirilmiลtir.Topluluk tarafฤฑndan saฤlananan รงeviriler gรถnรผllรผlerin ellerinden geldiฤincegรผncellendiฤi iรงin [Resmi ฤฐngilizce dรถkรผmanlar](https://www.tensorflow.org/?hl=en)ile bire bir aynฤฑ olmasฤฑnฤฑ garantileyemeyiz. Eฤer bu tercรผmeleri iyileลtirmekiรงin รถnerileriniz var ise lรผtfen [tensorflow/docs](https://github.com/tensorflow/docs)havuzuna pull request gรถnderin. Gรถnรผllรผ olarak รงevirilere katkฤฑda bulunmak iรงin[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-tr)listesi ile iletiลime geรงebilirsiniz. Bu rehber egitim donguleri ile [`tf.distribute.Strategy`](https://www.tensorflow.org/r1/guide/distribute_strategy)'nin nasil kullanildigini gosteriyor. Basit bir CNN modelini Fashion MNIST veri seti ile egitecegiz. Bu veri seti icinde 28X28 boyutunda 60000 egitim resmini ve 28X28 boyutunda 10000 test resmini barindirir.Burada bize esneklik ve daha cok kontrol kazandirmasi icin ozellestirilmis egitim donguleri kullanacagiz. Ustelik, bu ozel donguler modeli ve egitim dongulerindeki hatalari ayiklamamizi da kolaylastiracaktir.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow'u yukleyelim
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow.compat.v1 as tf
# Yardimci kutuphaneler
import numpy as np
import os
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Fashion MNIST veri setini indirelim
###Code
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Diziye yeni bir boyut ekleyelim-> new shape == (28, 28, 1)
# Bunu yapmamizin sebebi ise modelimizin ilk katmaninin katlamali olmasi
# ve 4D bir girdiye ihtiyac duyar (batch_size, height, width, channels).
# batch_size boyutunu daha sonra ekleyecegiz.
train_images = train_images[..., None]
test_images = test_images[..., None]
# Resimleri [0, 1] araligina indirgeyelim.
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
train_labels = train_labels.astype('int64')
test_labels = test_labels.astype('int64')
###Output
_____no_output_____
###Markdown
Degiskenleri ve grafigi dagitmak icin bir taktik olusturalim `tf.distribute.MirroredStrategy` nasil calisir?* Butun degiskenler ve model grafigi birkac kere kopyalanir.* Girdi bu kopyalara esit olarak dagitilir.* Her kopya verilen girdiye gore bir kayip ve degisim tablosu hesaplar.* Butun degisim verileri toplanir ve kopyalardaki degerler bu toplama gore guncellenir.* Bu islemden sonra, ayni guncelleme degiskenlerin kopyalarina da uygulanir. Note: Butun kodu tek bir kapsam icine koyabilirsiniz, fakat biz burada daha aciklayici olmasi icin kodu boluyoruz.
###Code
# Eger kullanilacak cihazlar `tf.distribute.MirroredStrategy` yapicisinda belirtilmediyse
# otomatik olarak bulunacaktir.
strategy = tf.distribute.MirroredStrategy()
print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
###Output
_____no_output_____
###Markdown
Girdi hattinin kurulmasi Eger bir model birden fazla GPU'da egitiliyorsa, grup boyutu buna orantili olarak arttirilmalidir ki fazla bilgisayar gucunu verimli bir sekilde kullanabilelim. Ayrica, egitim hizi da orantili olarak ayarlanmaidir.
###Code
BUFFER_SIZE = len(train_images)
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 10
###Output
_____no_output_____
###Markdown
`strategy.make_dataset_iterator`, veriyi kopyalara esit olarak dagitan bir iterator olusturur. Note: Bu API yakin zamanda degisecektir.
###Code
with strategy.scope():
train_dataset = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_iterator = strategy.make_dataset_iterator(train_dataset)
test_dataset = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels)).batch(BATCH_SIZE)
test_iterator = strategy.make_dataset_iterator(test_dataset)
###Output
_____no_output_____
###Markdown
Modelin olusturulmasi`tf.keras.Sequential` ile modelimizi olusturalim. Model Subclassing API'yini da kullanarak bu islemi yapabiliriz.
###Code
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.train.GradientDescentOptimizer(0.001)
###Output
_____no_output_____
###Markdown
Kayip fonksiyonunu tanimlayalimNormalde, eger 1 GPU/CPU'lu bir makine kullaniyorsak, kayip girdi grubundaki ornek sayisina bolunur.*Peki `tf.distribute.Strategy` ile kayip nasil hesaplanir?*> Ornegin, 4 GPU'muz ve boyutu 64 olan girdimiz oldugunu varsayalim. Bu girdiler esit olarak 4 GPU (4 kopya) ustune bolunur, yani her kopyaya giden girdi grub boyutu 16 idir.> Her kopyadaki model icindeki girdinin ustunden gecerek kayip degerini hesaplar. Simdi, bu kayip degerini icindeki girdi sayisina (16) bolmek yerine, en bastaki evrensel girdi miktarina (64) boler. *Neden bu islem boyle yaplir?*> Cunku degisim degerleri her kopyada hesaplandiktan sonra, butun kopyalardaki degerler butun degisim degerlerinin toplamina esitlenir.*Bunu TensorFlow'da nasil yapabiliriz?*Eger ozellestirilmis bir egitim dongusu yaziyorsaniz, her ornekteki kayiplari toplayip butun orneklerin toplamina bolmelisiniz:```GLOBAL_BATCH_SIZE:`scale_loss = tf.reduce_sum(loss) * (1. / GLOBAL_BATCH_SIZE)````* `tf.reduce_mean` metodunu kullanmanizi tavsiye etmiyoruz. Bu metod kayip degerini kopyalardaki ornek sayisina boler ki bu her adimda degisebilir.* Bu indirgeme ve olcekleme keras'ta otomatok olarak yapilir: model.fit ve model.compile ile* Eger `tf.keras.losses` siniflarini kullaniyorsaniz, kayip indirgemesinin ozellikle `NONE` ya da `SUM` olarak belirtilmesi gerekmektedir. `AUTO` ve `SUM_OVER_BATCH_SIZE` ise `tf.distribute.Strategy` ile birlikte kullanilamaz. Cunku kullanicilarin `AUTO` kullanmadan once yaptiklari indirgemenin o anki dagitim ornegindeki dogrulugundan emin olmalari gerekir. `SUM_OVER_BATCH_SIZE` kullanilamaz cunku su anki haliyle sadece kopyadaki ornek sayisina bolum yapip asil toplam ornek sayisina bolme islemini kullaniciya birakir, ki bu cok kolay gozden kacabilecek bir noktadir. Onun yerine kullanicinin indirgemeyi kendilerinin yapmalarini istiyoruz. Egitim dongusu
###Code
with strategy.scope():
def train_step():
def step_fn(inputs):
images, labels = inputs
logits = model(images)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.reduce_sum(cross_entropy) * (1.0 / BATCH_SIZE)
train_op = optimizer.minimize(loss)
with tf.control_dependencies([train_op]):
return tf.identity(loss)
per_replica_losses = strategy.experimental_run(
step_fn, train_iterator)
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
with strategy.scope():
iterator_init = train_iterator.initialize()
var_init = tf.global_variables_initializer()
loss = train_step()
with tf.Session() as sess:
sess.run([var_init])
for epoch in range(EPOCHS):
sess.run([iterator_init])
for step in range(10000):
if step % 1000 == 0:
print('Epoch {} Step {} Loss {:.4f}'.format(epoch+1,
step,
sess.run(loss)))
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Egitim donguleri ile tf.distribute.Strategy Run in Google Colab View source on GitHub Note: Bu dรถkรผmanlar TensorFlow gรถnรผllรผ kullanฤฑcฤฑlarฤฑ tarafฤฑndan รงevirilmiลtir.Topluluk tarafฤฑndan saฤlananan รงeviriler gรถnรผllรผlerin ellerinden geldiฤincegรผncellendiฤi iรงin [Resmi ฤฐngilizce dรถkรผmanlar](https://www.tensorflow.org/?hl=en)ile bire bir aynฤฑ olmasฤฑnฤฑ garantileyemeyiz. Eฤer bu tercรผmeleri iyileลtirmekiรงin รถnerileriniz var ise lรผtfen [tensorflow/docs](https://github.com/tensorflow/docs)havuzuna pull request gรถnderin. Gรถnรผllรผ olarak รงevirilere katkฤฑda bulunmak iรงin[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-tr)listesi ile iletiลime geรงebilirsiniz. Bu rehber egitim donguleri ile [`tf.distribute.Strategy`](https://www.tensorflow.org/r1/guide/distribute_strategy)'nin nasil kullanildigini gosteriyor. Basit bir CNN modelini Fashion MNIST veri seti ile egitecegiz. Bu veri seti icinde 28X28 boyutunda 60000 egitim resmini ve 28X28 boyutunda 10000 test resmini barindirir.Burada bize esneklik ve daha cok kontrol kazandirmasi icin ozellestirilmis egitim donguleri kullanacagiz. Ustelik, bu ozel donguler modeli ve egitim dongulerindeki hatalari ayiklamamizi da kolaylastiracaktir.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow'u yukleyelim
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow.compat.v1 as tf
# Yardimci kutuphaneler
import numpy as np
import os
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Fashion MNIST veri setini indirelim
###Code
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Diziye yeni bir boyut ekleyelim-> new shape == (28, 28, 1)
# Bunu yapmamizin sebebi ise modelimizin ilk katmaninin katlamali olmasi
# ve 4D bir girdiye ihtiyac duyar (batch_size, height, width, channels).
# batch_size boyutunu daha sonra ekleyecegiz.
train_images = train_images[..., None]
test_images = test_images[..., None]
# Resimleri [0, 1] araligina indirgeyelim.
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
train_labels = train_labels.astype('int64')
test_labels = test_labels.astype('int64')
###Output
_____no_output_____
###Markdown
Degiskenleri ve grafigi dagitmak icin bir taktik olusturalim `tf.distribute.MirroredStrategy` nasil calisir?* Butun degiskenler ve model grafigi birkac kere kopyalanir.* Girdi bu kopyalara esit olarak dagitilir.* Her kopya verilen girdiye gore bir kayip ve degisim tablosu hesaplar.* Butun degisim verileri toplanir ve kopyalardaki degerler bu toplama gore guncellenir.* Bu islemden sonra, ayni guncelleme degiskenlerin kopyalarina da uygulanir. Note: Butun kodu tek bir kapsam icine koyabilirsiniz, fakat biz burada daha aciklayici olmasi icin kodu boluyoruz.
###Code
# Eger kullanilacak cihazlar `tf.distribute.MirroredStrategy` yapicisinda belirtilmediyse
# otomatik olarak bulunacaktir.
strategy = tf.distribute.MirroredStrategy()
print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
###Output
_____no_output_____
###Markdown
Girdi hattinin kurulmasi Eger bir model birden fazla GPU'da egitiliyorsa, grup boyutu buna orantili olarak arttirilmalidir ki fazla bilgisayar gucunu verimli bir sekilde kullanabilelim. Ayrica, egitim hizi da orantili olarak ayarlanmaidir.
###Code
BUFFER_SIZE = len(train_images)
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 10
###Output
_____no_output_____
###Markdown
`strategy.make_dataset_iterator`, veriyi kopyalara esit olarak dagitan bir iterator olusturur. Note: Bu API yakin zamanda degisecektir.
###Code
with strategy.scope():
train_dataset = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_iterator = strategy.make_dataset_iterator(train_dataset)
test_dataset = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels)).batch(BATCH_SIZE)
test_iterator = strategy.make_dataset_iterator(test_dataset)
###Output
_____no_output_____
###Markdown
Modelin olusturulmasi`tf.keras.Sequential` ile modelimizi olusturalim. Model Subclassing API'yini da kullanarak bu islemi yapabiliriz.
###Code
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.train.GradientDescentOptimizer(0.001)
###Output
_____no_output_____
###Markdown
Kayip fonksiyonunu tanimlayalimNormalde, eger 1 GPU/CPU'lu bir makine kullaniyorsak, kayip girdi grubundaki ornek sayisina bolunur.*Peki `tf.distribute.Strategy` ile kayip nasil hesaplanir?*> Ornegin, 4 GPU'muz ve boyutu 64 olan girdimiz oldugunu varsayalim. Bu girdiler esit olarak 4 GPU (4 kopya) ustune bolunur, yani her kopyaya giden girdi grub boyutu 16 idir.> Her kopyadaki model icindeki girdinin ustunden gecerek kayip degerini hesaplar. Simdi, bu kayip degerini icindeki girdi sayisina (16) bolmek yerine, en bastaki evrensel girdi miktarina (64) boler. *Neden bu islem boyle yaplir?*> Cunku degisim degerleri her kopyada hesaplandiktan sonra, butun kopyalardaki degerler butun degisim degerlerinin toplamina esitlenir.*Bunu TensorFlow'da nasil yapabiliriz?*Eger ozellestirilmis bir egitim dongusu yaziyorsaniz, her ornekteki kayiplari toplayip butun orneklerin toplamina bolmelisiniz:```GLOBAL_BATCH_SIZE:`scale_loss = tf.reduce_sum(loss) * (1. / GLOBAL_BATCH_SIZE)````* `tf.reduce_mean` metodunu kullanmanizi tavsiye etmiyoruz. Bu metod kayip degerini kopyalardaki ornek sayisina boler ki bu her adimda degisebilir.* Bu indirgeme ve olcekleme keras'ta otomatok olarak yapilir: model.fit ve model.compile ile* Eger `tf.keras.losses` siniflarini kullaniyorsaniz, kayip indirgemesinin ozellikle `NONE` ya da `SUM` olarak belirtilmesi gerekmektedir. `AUTO` ve `SUM_OVER_BATCH_SIZE` ise `tf.distribute.Strategy` ile birlikte kullanilamaz. Cunku kullanicilarin `AUTO` kullanmadan once yaptiklari indirgemenin o anki dagitim ornegindeki dogrulugundan emin olmalari gerekir. `SUM_OVER_BATCH_SIZE` kullanilamaz cunku su anki haliyle sadece kopyadaki ornek sayisina bolum yapip asil toplam ornek sayisina bolme islemini kullaniciya birakir, ki bu cok kolay gozden kacabilecek bir noktadir. Onun yerine kullanicinin indirgemeyi kendilerinin yapmalarini istiyoruz. Egitim dongusu
###Code
with strategy.scope():
def train_step():
def step_fn(inputs):
images, labels = inputs
logits = model(images)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.reduce_sum(cross_entropy) * (1.0 / BATCH_SIZE)
train_op = optimizer.minimize(loss)
with tf.control_dependencies([train_op]):
return tf.identity(loss)
per_replica_losses = strategy.experimental_run(
step_fn, train_iterator)
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
with strategy.scope():
iterator_init = train_iterator.initialize()
var_init = tf.global_variables_initializer()
loss = train_step()
with tf.Session() as sess:
sess.run([var_init])
for epoch in range(EPOCHS):
sess.run([iterator_init])
for step in range(10000):
if step % 1000 == 0:
print('Epoch {} Step {} Loss {:.4f}'.format(epoch+1,
step,
sess.run(loss)))
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Egitim donguleri ile tf.distribute.Strategy Run in Google Colab View source on GitHub Note: Bu dรถkรผmanlar TensorFlow gรถnรผllรผ kullanฤฑcฤฑlarฤฑ tarafฤฑndan รงevirilmiลtir.Topluluk tarafฤฑndan saฤlananan รงeviriler gรถnรผllรผlerin ellerinden geldiฤincegรผncellendiฤi iรงin [Resmi ฤฐngilizce dรถkรผmanlar](https://www.tensorflow.org/?hl=en)ile bire bir aynฤฑ olmasฤฑnฤฑ garantileyemeyiz. Eฤer bu tercรผmeleri iyileลtirmekiรงin รถnerileriniz var ise lรผtfen [tensorflow/docs](https://github.com/tensorflow/docs)havuzuna pull request gรถnderin. Gรถnรผllรผ olarak รงevirilere katkฤฑda bulunmak iรงin[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-tr)listesi ile iletiลime geรงebilirsiniz. Bu rehber egitim donguleri ile [`tf.distribute.Strategy`](https://www.tensorflow.org/r1/guide/distribute_strategy)'nin nasil kullanildigini gosteriyor. Basit bir CNN modelini Fashion MNIST veri seti ile egitecegiz. Bu veri seti icinde 28X28 boyutunda 60000 egitim resmini ve 28X28 boyutunda 10000 test resmini barindirir.Burada bize esneklik ve daha cok kontrol kazandirmasi icin ozellestirilmis egitim donguleri kullanacagiz. Ustelik, bu ozel donguler modeli ve egitim dongulerindeki hatalari ayiklamamizi da kolaylastiracaktir.
###Code
# TensorFlow'u yukleyelim
import tensorflow.compat.v1 as tf
# Yardimci kutuphaneler
import numpy as np
import os
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Fashion MNIST veri setini indirelim
###Code
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Diziye yeni bir boyut ekleyelim-> new shape == (28, 28, 1)
# Bunu yapmamizin sebebi ise modelimizin ilk katmaninin katlamali olmasi
# ve 4D bir girdiye ihtiyac duyar (batch_size, height, width, channels).
# batch_size boyutunu daha sonra ekleyecegiz.
train_images = train_images[..., None]
test_images = test_images[..., None]
# Resimleri [0, 1] araligina indirgeyelim.
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
train_labels = train_labels.astype('int64')
test_labels = test_labels.astype('int64')
###Output
_____no_output_____
###Markdown
Degiskenleri ve grafigi dagitmak icin bir taktik olusturalim `tf.distribute.MirroredStrategy` nasil calisir?* Butun degiskenler ve model grafigi birkac kere kopyalanir.* Girdi bu kopyalara esit olarak dagitilir.* Her kopya verilen girdiye gore bir kayip ve degisim tablosu hesaplar.* Butun degisim verileri toplanir ve kopyalardaki degerler bu toplama gore guncellenir.* Bu islemden sonra, ayni guncelleme degiskenlerin kopyalarina da uygulanir. Note: Butun kodu tek bir kapsam icine koyabilirsiniz, fakat biz burada daha aciklayici olmasi icin kodu boluyoruz.
###Code
# Eger kullanilacak cihazlar `tf.distribute.MirroredStrategy` yapicisinda belirtilmediyse
# otomatik olarak bulunacaktir.
strategy = tf.distribute.MirroredStrategy()
print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
###Output
_____no_output_____
###Markdown
Girdi hattinin kurulmasi Eger bir model birden fazla GPU'da egitiliyorsa, grup boyutu buna orantili olarak arttirilmalidir ki fazla bilgisayar gucunu verimli bir sekilde kullanabilelim. Ayrica, egitim hizi da orantili olarak ayarlanmaidir.
###Code
BUFFER_SIZE = len(train_images)
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 10
###Output
_____no_output_____
###Markdown
`strategy.make_dataset_iterator`, veriyi kopyalara esit olarak dagitan bir iterator olusturur. Note: Bu API yakin zamanda degisecektir.
###Code
with strategy.scope():
train_dataset = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_iterator = strategy.make_dataset_iterator(train_dataset)
test_dataset = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels)).batch(BATCH_SIZE)
test_iterator = strategy.make_dataset_iterator(test_dataset)
###Output
_____no_output_____
###Markdown
Modelin olusturulmasi`tf.keras.Sequential` ile modelimizi olusturalim. Model Subclassing API'yini da kullanarak bu islemi yapabiliriz.
###Code
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.train.GradientDescentOptimizer(0.001)
###Output
_____no_output_____
###Markdown
Kayip fonksiyonunu tanimlayalimNormalde, eger 1 GPU/CPU'lu bir makine kullaniyorsak, kayip girdi grubundaki ornek sayisina bolunur.*Peki `tf.distribute.Strategy` ile kayip nasil hesaplanir?*> Ornegin, 4 GPU'muz ve boyutu 64 olan girdimiz oldugunu varsayalim. Bu girdiler esit olarak 4 GPU (4 kopya) ustune bolunur, yani her kopyaya giden girdi grub boyutu 16 idir.> Her kopyadaki model icindeki girdinin ustunden gecerek kayip degerini hesaplar. Simdi, bu kayip degerini icindeki girdi sayisina (16) bolmek yerine, en bastaki evrensel girdi miktarina (64) boler. *Neden bu islem boyle yaplir?*> Cunku degisim degerleri her kopyada hesaplandiktan sonra, butun kopyalardaki degerler butun degisim degerlerinin toplamina esitlenir.*Bunu TensorFlow'da nasil yapabiliriz?*Eger ozellestirilmis bir egitim dongusu yaziyorsaniz, her ornekteki kayiplari toplayip butun orneklerin toplamina bolmelisiniz:```GLOBAL_BATCH_SIZE:`scale_loss = tf.reduce_sum(loss) * (1. / GLOBAL_BATCH_SIZE)````* `tf.reduce_mean` metodunu kullanmanizi tavsiye etmiyoruz. Bu metod kayip degerini kopyalardaki ornek sayisina boler ki bu her adimda degisebilir.* Bu indirgeme ve olcekleme keras'ta otomatok olarak yapilir: model.fit ve model.compile ile* Eger `tf.keras.losses` siniflarini kullaniyorsaniz, kayip indirgemesinin ozellikle `NONE` ya da `SUM` olarak belirtilmesi gerekmektedir. `AUTO` ve `SUM_OVER_BATCH_SIZE` ise `tf.distribute.Strategy` ile birlikte kullanilamaz. Cunku kullanicilarin `AUTO` kullanmadan once yaptiklari indirgemenin o anki dagitim ornegindeki dogrulugundan emin olmalari gerekir. `SUM_OVER_BATCH_SIZE` kullanilamaz cunku su anki haliyle sadece kopyadaki ornek sayisina bolum yapip asil toplam ornek sayisina bolme islemini kullaniciya birakir, ki bu cok kolay gozden kacabilecek bir noktadir. Onun yerine kullanicinin indirgemeyi kendilerinin yapmalarini istiyoruz. Egitim dongusu
###Code
with strategy.scope():
def train_step():
def step_fn(inputs):
images, labels = inputs
logits = model(images)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.reduce_sum(cross_entropy) * (1.0 / BATCH_SIZE)
train_op = optimizer.minimize(loss)
with tf.control_dependencies([train_op]):
return tf.identity(loss)
per_replica_losses = strategy.experimental_run(
step_fn, train_iterator)
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
with strategy.scope():
iterator_init = train_iterator.initialize()
var_init = tf.global_variables_initializer()
loss = train_step()
with tf.Session() as sess:
sess.run([var_init])
for epoch in range(EPOCHS):
sess.run([iterator_init])
for step in range(10000):
if step % 1000 == 0:
print('Epoch {} Step {} Loss {:.4f}'.format(epoch+1,
step,
sess.run(loss)))
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Egitim donguleri ile tf.distribute.Strategy Run in Google Colab View source on GitHub Note: Bu dรถkรผmanlar TensorFlow gรถnรผllรผ kullanฤฑcฤฑlarฤฑ tarafฤฑndan รงevirilmiลtir.Topluluk tarafฤฑndan saฤlananan รงeviriler gรถnรผllรผlerin ellerinden geldiฤincegรผncellendiฤi iรงin [Resmi ฤฐngilizce dรถkรผmanlar](https://www.tensorflow.org/?hl=en)ile bire bir aynฤฑ olmasฤฑnฤฑ garantileyemeyiz. Eฤer bu tercรผmeleri iyileลtirmekiรงin รถnerileriniz var ise lรผtfen [tensorflow/docs](https://github.com/tensorflow/docs)havuzuna pull request gรถnderin. Gรถnรผllรผ olarak รงevirilere katkฤฑda bulunmak iรงin[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-tr)listesi ile iletiลime geรงebilirsiniz. Bu rehber egitim donguleri ile [`tf.distribute.Strategy`](https://www.tensorflow.org/r1/guide/distribute_strategy)'nin nasil kullanildigini gosteriyor. Basit bir CNN modelini Fashion MNIST veri seti ile egitecegiz. Bu veri seti icinde 28X28 boyutunda 60000 egitim resmini ve 28X28 boyutunda 10000 test resmini barindirir.Burada bize esneklik ve daha cok kontrol kazandirmasi icin ozellestirilmis egitim donguleri kullanacagiz. Ustelik, bu ozel donguler modeli ve egitim dongulerindeki hatalari ayiklamamizi da kolaylastiracaktir.
###Code
# TensorFlow'u yukleyelim
import tensorflow.compat.v1 as tf
# Yardimci kutuphaneler
import numpy as np
import os
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Fashion MNIST veri setini indirelim
###Code
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Diziye yeni bir boyut ekleyelim-> new shape == (28, 28, 1)
# Bunu yapmamizin sebebi ise modelimizin ilk katmaninin katlamali olmasi
# ve 4D bir girdiye ihtiyac duyar (batch_size, height, width, channels).
# batch_size boyutunu daha sonra ekleyecegiz.
train_images = train_images[..., None]
test_images = test_images[..., None]
# Resimleri [0, 1] araligina indirgeyelim.
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
train_labels = train_labels.astype('int64')
test_labels = test_labels.astype('int64')
###Output
_____no_output_____
###Markdown
Degiskenleri ve grafigi dagitmak icin bir taktik olusturalim `tf.distribute.MirroredStrategy` nasil calisir?* Butun degiskenler ve model grafigi birkac kere kopyalanir.* Girdi bu kopyalara esit olarak dagitilir.* Her kopya verilen girdiye gore bir kayip ve degisim tablosu hesaplar.* Butun degisim verileri toplanir ve kopyalardaki degerler bu toplama gore guncellenir.* Bu islemden sonra, ayni guncelleme degiskenlerin kopyalarina da uygulanir. Note: Butun kodu tek bir kapsam icine koyabilirsiniz, fakat biz burada daha aciklayici olmasi icin kodu boluyoruz.
###Code
# Eger kullanilacak cihazlar `tf.distribute.MirroredStrategy` yapicisinda belirtilmediyse
# otomatik olarak bulunacaktir.
strategy = tf.distribute.MirroredStrategy()
print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
###Output
_____no_output_____
###Markdown
Girdi hattinin kurulmasi Eger bir model birden fazla GPU'da egitiliyorsa, grup boyutu buna orantili olarak arttirilmalidir ki fazla bilgisayar gucunu verimli bir sekilde kullanabilelim. Ayrica, egitim hizi da orantili olarak ayarlanmaidir.
###Code
BUFFER_SIZE = len(train_images)
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 10
###Output
_____no_output_____
###Markdown
`strategy.make_dataset_iterator`, veriyi kopyalara esit olarak dagitan bir iterator olusturur. Note: Bu API yakin zamanda degisecektir.
###Code
with strategy.scope():
train_dataset = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_iterator = strategy.make_dataset_iterator(train_dataset)
test_dataset = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels)).batch(BATCH_SIZE)
test_iterator = strategy.make_dataset_iterator(test_dataset)
###Output
_____no_output_____
###Markdown
Modelin olusturulmasi`tf.keras.Sequential` ile modelimizi olusturalim. Model Subclassing API'yini da kullanarak bu islemi yapabiliriz.
###Code
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.train.GradientDescentOptimizer(0.001)
###Output
_____no_output_____
###Markdown
Kayip fonksiyonunu tanimlayalimNormalde, eger 1 GPU/CPU'lu bir makine kullaniyorsak, kayip girdi grubundaki ornek sayisina bolunur.*Peki `tf.distribute.Strategy` ile kayip nasil hesaplanir?*> Ornegin, 4 GPU'muz ve boyutu 64 olan girdimiz oldugunu varsayalim. Bu girdiler esit olarak 4 GPU (4 kopya) ustune bolunur, yani her kopyaya giden girdi grub boyutu 16 idir.> Her kopyadaki model icindeki girdinin ustunden gecerek kayip degerini hesaplar. Simdi, bu kayip degerini icindeki girdi sayisina (16) bolmek yerine, en bastaki evrensel girdi miktarina (64) boler. *Neden bu islem boyle yaplir?*> Cunku degisim degerleri her kopyada hesaplandiktan sonra, butun kopyalardaki degerler butun degisim degerlerinin toplamina esitlenir.*Bunu TensorFlow'da nasil yapabiliriz?*Eger ozellestirilmis bir egitim dongusu yaziyorsaniz, her ornekteki kayiplari toplayip butun orneklerin toplamina bolmelisiniz:```GLOBAL_BATCH_SIZE:`scale_loss = tf.reduce_sum(loss) * (1. / GLOBAL_BATCH_SIZE)````* `tf.reduce_mean` metodunu kullanmanizi tavsiye etmiyoruz. Bu metod kayip degerini kopyalardaki ornek sayisina boler ki bu her adimda degisebilir.* Bu indirgeme ve olcekleme keras'ta otomatok olarak yapilir: model.fit ve model.compile ile* Eger `tf.keras.losses` siniflarini kullaniyorsaniz, kayip indirgemesinin ozellikle `NONE` ya da `SUM` olarak belirtilmesi gerekmektedir. `AUTO` ve `SUM_OVER_BATCH_SIZE` ise `tf.distribute.Strategy` ile birlikte kullanilamaz. Cunku kullanicilarin `AUTO` kullanmadan once yaptiklari indirgemenin o anki dagitim ornegindeki dogrulugundan emin olmalari gerekir. `SUM_OVER_BATCH_SIZE` kullanilamaz cunku su anki haliyle sadece kopyadaki ornek sayisina bolum yapip asil toplam ornek sayisina bolme islemini kullaniciya birakir, ki bu cok kolay gozden kacabilecek bir noktadir. Onun yerine kullanicinin indirgemeyi kendilerinin yapmalarini istiyoruz. Egitim dongusu
###Code
with strategy.scope():
def train_step():
def step_fn(inputs):
images, labels = inputs
logits = model(images)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.reduce_sum(cross_entropy) * (1.0 / BATCH_SIZE)
train_op = optimizer.minimize(loss)
with tf.control_dependencies([train_op]):
return tf.identity(loss)
per_replica_losses = strategy.experimental_run(
step_fn, train_iterator)
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
with strategy.scope():
iterator_init = train_iterator.initialize()
var_init = tf.global_variables_initializer()
loss = train_step()
with tf.Session() as sess:
sess.run([var_init])
for epoch in range(EPOCHS):
sess.run([iterator_init])
for step in range(10000):
if step % 1000 == 0:
print('Epoch {} Step {} Loss {:.4f}'.format(epoch+1,
step,
sess.run(loss)))
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Egitim donguleri ile tf.distribute.Strategy Run in Google Colab View source on GitHub Note: Bu dรถkรผmanlar TensorFlow gรถnรผllรผ kullanฤฑcฤฑlarฤฑ tarafฤฑndan รงevirilmiลtir.Topluluk tarafฤฑndan saฤlananan รงeviriler gรถnรผllรผlerin ellerinden geldiฤincegรผncellendiฤi iรงin [Resmi ฤฐngilizce dรถkรผmanlar](https://www.tensorflow.org/?hl=en)ile bire bir aynฤฑ olmasฤฑnฤฑ garantileyemeyiz. Eฤer bu tercรผmeleri iyileลtirmekiรงin รถnerileriniz var ise lรผtfen [tensorflow/docs](https://github.com/tensorflow/docs)havuzuna pull request gรถnderin. Gรถnรผllรผ olarak รงevirilere katkฤฑda bulunmak iรงin[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-tr)listesi ile iletiลime geรงebilirsiniz. Bu rehber egitim donguleri ile [`tf.distribute.Strategy`](https://www.tensorflow.org/r1/guide/distribute_strategy)'nin nasil kullanildigini gosteriyor. Basit bir CNN modelini Fashion MNIST veri seti ile egitecegiz. Bu veri seti icinde 28X28 boyutunda 60000 egitim resmini ve 28X28 boyutunda 10000 test resmini barindirir.Burada bize esneklik ve daha cok kontrol kazandirmasi icin ozellestirilmis egitim donguleri kullanacagiz. Ustelik, bu ozel donguler modeli ve egitim dongulerindeki hatalari ayiklamamizi da kolaylastiracaktir.
###Code
# TensorFlow'u yukleyelim
import tensorflow.compat.v1 as tf
# Yardimci kutuphaneler
import numpy as np
import os
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Fashion MNIST veri setini indirelim
###Code
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Diziye yeni bir boyut ekleyelim-> new shape == (28, 28, 1)
# Bunu yapmamizin sebebi ise modelimizin ilk katmaninin katlamali olmasi
# ve 4D bir girdiye ihtiyac duyar (batch_size, height, width, channels).
# batch_size boyutunu daha sonra ekleyecegiz.
train_images = train_images[..., None]
test_images = test_images[..., None]
# Resimleri [0, 1] araligina indirgeyelim.
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
train_labels = train_labels.astype('int64')
test_labels = test_labels.astype('int64')
###Output
_____no_output_____
###Markdown
Degiskenleri ve grafigi dagitmak icin bir taktik olusturalim `tf.distribute.MirroredStrategy` nasil calisir?* Butun degiskenler ve model grafigi birkac kere kopyalanir.* Girdi bu kopyalara esit olarak dagitilir.* Her kopya verilen girdiye gore bir kayip ve degisim tablosu hesaplar.* Butun degisim verileri toplanir ve kopyalardaki degerler bu toplama gore guncellenir.* Bu islemden sonra, ayni guncelleme degiskenlerin kopyalarina da uygulanir. Note: Butun kodu tek bir kapsam icine koyabilirsiniz, fakat biz burada daha aciklayici olmasi icin kodu boluyoruz.
###Code
# Eger kullanilacak cihazlar `tf.distribute.MirroredStrategy` yapicisinda belirtilmediyse
# otomatik olarak bulunacaktir.
strategy = tf.distribute.MirroredStrategy()
print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
###Output
_____no_output_____
###Markdown
Girdi hattinin kurulmasi Eger bir model birden fazla GPU'da egitiliyorsa, grup boyutu buna orantili olarak arttirilmalidir ki fazla bilgisayar gucunu verimli bir sekilde kullanabilelim. Ayrica, egitim hizi da orantili olarak ayarlanmaidir.
###Code
BUFFER_SIZE = len(train_images)
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 10
###Output
_____no_output_____
###Markdown
`strategy.make_dataset_iterator`, veriyi kopyalara esit olarak dagitan bir iterator olusturur. Note: Bu API yakin zamanda degisecektir.
###Code
with strategy.scope():
train_dataset = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_iterator = strategy.make_dataset_iterator(train_dataset)
test_dataset = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels)).batch(BATCH_SIZE)
test_iterator = strategy.make_dataset_iterator(test_dataset)
###Output
_____no_output_____
###Markdown
Modelin olusturulmasi`tf.keras.Sequential` ile modelimizi olusturalim. Model Subclassing API'yini da kullanarak bu islemi yapabiliriz.
###Code
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.train.GradientDescentOptimizer(0.001)
###Output
_____no_output_____
###Markdown
Kayip fonksiyonunu tanimlayalimNormalde, eger 1 GPU/CPU'lu bir makine kullaniyorsak, kayip girdi grubundaki ornek sayisina bolunur.*Peki `tf.distribute.Strategy` ile kayip nasil hesaplanir?*> Ornegin, 4 GPU'muz ve boyutu 64 olan girdimiz oldugunu varsayalim. Bu girdiler esit olarak 4 GPU (4 kopya) ustune bolunur, yani her kopyaya giden girdi grub boyutu 16 idir.> Her kopyadaki model icindeki girdinin ustunden gecerek kayip degerini hesaplar. Simdi, bu kayip degerini icindeki girdi sayisina (16) bolmek yerine, en bastaki evrensel girdi miktarina (64) boler. *Neden bu islem boyle yaplir?*> Cunku degisim degerleri her kopyada hesaplandiktan sonra, butun kopyalardaki degerler butun degisim degerlerinin toplamina esitlenir.*Bunu TensorFlow'da nasil yapabiliriz?*Eger ozellestirilmis bir egitim dongusu yaziyorsaniz, her ornekteki kayiplari toplayip butun orneklerin toplamina bolmelisiniz:```GLOBAL_BATCH_SIZE:`scale_loss = tf.reduce_sum(loss) * (1. / GLOBAL_BATCH_SIZE)````* `tf.reduce_mean` metodunu kullanmanizi tavsiye etmiyoruz. Bu metod kayip degerini kopyalardaki ornek sayisina boler ki bu her adimda degisebilir.* Bu indirgeme ve olcekleme keras'ta otomatok olarak yapilir: model.fit ve model.compile ile* Eger `tf.keras.losses` siniflarini kullaniyorsaniz, kayip indirgemesinin ozellikle `NONE` ya da `SUM` olarak belirtilmesi gerekmektedir. `AUTO` ve `SUM_OVER_BATCH_SIZE` ise `tf.distribute.Strategy` ile birlikte kullanilamaz. Cunku kullanicilarin `AUTO` kullanmadan once yaptiklari indirgemenin o anki dagitim ornegindeki dogrulugundan emin olmalari gerekir. `SUM_OVER_BATCH_SIZE` kullanilamaz cunku su anki haliyle sadece kopyadaki ornek sayisina bolum yapip asil toplam ornek sayisina bolme islemini kullaniciya birakir, ki bu cok kolay gozden kacabilecek bir noktadir. Onun yerine kullanicinin indirgemeyi kendilerinin yapmalarini istiyoruz. Egitim dongusu
###Code
with strategy.scope():
def train_step():
def step_fn(inputs):
images, labels = inputs
logits = model(images)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.reduce_sum(cross_entropy) * (1.0 / BATCH_SIZE)
train_op = optimizer.minimize(loss)
with tf.control_dependencies([train_op]):
return tf.identity(loss)
per_replica_losses = strategy.experimental_run(
step_fn, train_iterator)
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
with strategy.scope():
iterator_init = train_iterator.initialize()
var_init = tf.global_variables_initializer()
loss = train_step()
with tf.Session() as sess:
sess.run([var_init])
for epoch in range(EPOCHS):
sess.run([iterator_init])
for step in range(10000):
if step % 1000 == 0:
print('Epoch {} Step {} Loss {:.4f}'.format(epoch+1,
step,
sess.run(loss)))
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Egitim donguleri ile tf.distribute.Strategy Run in Google Colab View source on GitHub Note: Bu dรถkรผmanlar TensorFlow gรถnรผllรผ kullanฤฑcฤฑlarฤฑ tarafฤฑndan รงevirilmiลtir.Topluluk tarafฤฑndan saฤlananan รงeviriler gรถnรผllรผlerin ellerinden geldiฤincegรผncellendiฤi iรงin [Resmi ฤฐngilizce dรถkรผmanlar](https://www.tensorflow.org/?hl=en)ile bire bir aynฤฑ olmasฤฑnฤฑ garantileyemeyiz. Eฤer bu tercรผmeleri iyileลtirmekiรงin รถnerileriniz var ise lรผtfen [tensorflow/docs](https://github.com/tensorflow/docs)havuzuna pull request gรถnderin. Gรถnรผllรผ olarak รงevirilere katkฤฑda bulunmak iรงin[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-tr)listesi ile iletiลime geรงebilirsiniz. Bu rehber egitim donguleri ile [`tf.distribute.Strategy`](https://www.tensorflow.org/r1/guide/distribute_strategy)'nin nasil kullanildigini gosteriyor. Basit bir CNN modelini Fashion MNIST veri seti ile egitecegiz. Bu veri seti icinde 28X28 boyutunda 60000 egitim resmini ve 28X28 boyutunda 10000 test resmini barindirir.Burada bize esneklik ve daha cok kontrol kazandirmasi icin ozellestirilmis egitim donguleri kullanacagiz. Ustelik, bu ozel donguler modeli ve egitim dongulerindeki hatalari ayiklamamizi da kolaylastiracaktir.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow'u yukleyelim
import tensorflow as tf
# Yardimci kutuphaneler
import numpy as np
import os
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Fashion MNIST veri setini indirelim
###Code
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Diziye yeni bir boyut ekleyelim-> new shape == (28, 28, 1)
# Bunu yapmamizin sebebi ise modelimizin ilk katmaninin katlamali olmasi
# ve 4D bir girdiye ihtiyac duyar (batch_size, height, width, channels).
# batch_size boyutunu daha sonra ekleyecegiz.
train_images = train_images[..., None]
test_images = test_images[..., None]
# Resimleri [0, 1] araligina indirgeyelim.
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
train_labels = train_labels.astype('int64')
test_labels = test_labels.astype('int64')
###Output
_____no_output_____
###Markdown
Degiskenleri ve grafigi dagitmak icin bir taktik olusturalim `tf.distribute.MirroredStrategy` nasil calisir?* Butun degiskenler ve model grafigi birkac kere kopyalanir.* Girdi bu kopyalara esit olarak dagitilir.* Her kopya verilen girdiye gore bir kayip ve degisim tablosu hesaplar.* Butun degisim verileri toplanir ve kopyalardaki degerler bu toplama gore guncellenir.* Bu islemden sonra, ayni guncelleme degiskenlerin kopyalarina da uygulanir. Note: Butun kodu tek bir kapsam icine koyabilirsiniz, fakat biz burada daha aciklayici olmasi icin kodu boluyoruz.
###Code
# Eger kullanilacak cihazlar `tf.distribute.MirroredStrategy` yapicisinda belirtilmediyse
# otomatik olarak bulunacaktir.
strategy = tf.distribute.MirroredStrategy()
print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
###Output
_____no_output_____
###Markdown
Girdi hattinin kurulmasi Eger bir model birden fazla GPU'da egitiliyorsa, grup boyutu buna orantili olarak arttirilmalidir ki fazla bilgisayar gucunu verimli bir sekilde kullanabilelim. Ayrica, egitim hizi da orantili olarak ayarlanmaidir.
###Code
BUFFER_SIZE = len(train_images)
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 10
###Output
_____no_output_____
###Markdown
`strategy.make_dataset_iterator`, veriyi kopyalara esit olarak dagitan bir iterator olusturur. Note: Bu API yakin zamanda degisecektir.
###Code
with strategy.scope():
train_dataset = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_iterator = strategy.make_dataset_iterator(train_dataset)
test_dataset = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels)).batch(BATCH_SIZE)
test_iterator = strategy.make_dataset_iterator(test_dataset)
###Output
_____no_output_____
###Markdown
Modelin olusturulmasi`tf.keras.Sequential` ile modelimizi olusturalim. Model Subclassing API'yini da kullanarak bu islemi yapabiliriz.
###Code
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.train.GradientDescentOptimizer(0.001)
###Output
_____no_output_____
###Markdown
Kayip fonksiyonunu tanimlayalimNormalde, eger 1 GPU/CPU'lu bir makine kullaniyorsak, kayip girdi grubundaki ornek sayisina bolunur.*Peki `tf.distribute.Strategy` ile kayip nasil hesaplanir?*> Ornegin, 4 GPU'muz ve boyutu 64 olan girdimiz oldugunu varsayalim. Bu girdiler esit olarak 4 GPU (4 kopya) ustune bolunur, yani her kopyaya giden girdi grub boyutu 16 idir.> Her kopyadaki model icindeki girdinin ustunden gecerek kayip degerini hesaplar. Simdi, bu kayip degerini icindeki girdi sayisina (16) bolmek yerine, en bastaki evrensel girdi miktarina (64) boler. *Neden bu islem boyle yaplir?*> Cunku degisim degerleri her kopyada hesaplandiktan sonra, butun kopyalardaki degerler butun degisim degerlerinin toplamina esitlenir.*Bunu TensorFlow'da nasil yapabiliriz?*Eger ozellestirilmis bir egitim dongusu yaziyorsaniz, her ornekteki kayiplari toplayip butun orneklerin toplamina bolmelisiniz:```GLOBAL_BATCH_SIZE:`scale_loss = tf.reduce_sum(loss) * (1. / GLOBAL_BATCH_SIZE)````* `tf.reduce_mean` metodunu kullanmanizi tavsiye etmiyoruz. Bu metod kayip degerini kopyalardaki ornek sayisina boler ki bu her adimda degisebilir.* Bu indirgeme ve olcekleme keras'ta otomatok olarak yapilir: model.fit ve model.compile ile* Eger `tf.keras.losses` siniflarini kullaniyorsaniz, kayip indirgemesinin ozellikle `NONE` ya da `SUM` olarak belirtilmesi gerekmektedir. `AUTO` ve `SUM_OVER_BATCH_SIZE` ise `tf.distribute.Strategy` ile birlikte kullanilamaz. Cunku kullanicilarin `AUTO` kullanmadan once yaptiklari indirgemenin o anki dagitim ornegindeki dogrulugundan emin olmalari gerekir. `SUM_OVER_BATCH_SIZE` kullanilamaz cunku su anki haliyle sadece kopyadaki ornek sayisina bolum yapip asil toplam ornek sayisina bolme islemini kullaniciya birakir, ki bu cok kolay gozden kacabilecek bir noktadir. Onun yerine kullanicinin indirgemeyi kendilerinin yapmalarini istiyoruz. Egitim dongusu
###Code
with strategy.scope():
def train_step():
def step_fn(inputs):
images, labels = inputs
logits = model(images)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.reduce_sum(cross_entropy) * (1.0 / BATCH_SIZE)
train_op = optimizer.minimize(loss)
with tf.control_dependencies([train_op]):
return tf.identity(loss)
per_replica_losses = strategy.experimental_run(
step_fn, train_iterator)
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
with strategy.scope():
iterator_init = train_iterator.initialize()
var_init = tf.global_variables_initializer()
loss = train_step()
with tf.Session() as sess:
sess.run([var_init])
for epoch in range(EPOCHS):
sess.run([iterator_init])
for step in range(10000):
if step % 1000 == 0:
print('Epoch {} Step {} Loss {:.4f}'.format(epoch+1,
step,
sess.run(loss)))
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Egitim donguleri ile tf.distribute.Strategy Run in Google Colab View source on GitHub Note: Bu dรถkรผmanlar TensorFlow gรถnรผllรผ kullanฤฑcฤฑlarฤฑ tarafฤฑndan รงevirilmiลtir.Topluluk tarafฤฑndan saฤlananan รงeviriler gรถnรผllรผlerin ellerinden geldiฤincegรผncellendiฤi iรงin [Resmi ฤฐngilizce dรถkรผmanlar](https://www.tensorflow.org/?hl=en)ile bire bir aynฤฑ olmasฤฑnฤฑ garantileyemeyiz. Eฤer bu tercรผmeleri iyileลtirmekiรงin รถnerileriniz var ise lรผtfen [tensorflow/docs](https://github.com/tensorflow/docs)havuzuna pull request gรถnderin. Gรถnรผllรผ olarak รงevirilere katkฤฑda bulunmak iรงin[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-tr)listesi ile iletiลime geรงebilirsiniz. Bu rehber egitim donguleri ile [`tf.distribute.Strategy`](https://www.tensorflow.org/r1/guide/distribute_strategy)'nin nasil kullanildigini gosteriyor. Basit bir CNN modelini Fashion MNIST veri seti ile egitecegiz. Bu veri seti icinde 28X28 boyutunda 60000 egitim resmini ve 28X28 boyutunda 10000 test resmini barindirir.Burada bize esneklik ve daha cok kontrol kazandirmasi icin ozellestirilmis egitim donguleri kullanacagiz. Ustelik, bu ozel donguler modeli ve egitim dongulerindeki hatalari ayiklamamizi da kolaylastiracaktir.
###Code
# TensorFlow'u yukleyelim
import tensorflow.compat.v1 as tf
# Yardimci kutuphaneler
import numpy as np
import os
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Fashion MNIST veri setini indirelim
###Code
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Diziye yeni bir boyut ekleyelim-> new shape == (28, 28, 1)
# Bunu yapmamizin sebebi ise modelimizin ilk katmaninin katlamali olmasi
# ve 4D bir girdiye ihtiyac duyar (batch_size, height, width, channels).
# batch_size boyutunu daha sonra ekleyecegiz.
train_images = train_images[..., None]
test_images = test_images[..., None]
# Resimleri [0, 1] araligina indirgeyelim.
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
train_labels = train_labels.astype('int64')
test_labels = test_labels.astype('int64')
###Output
_____no_output_____
###Markdown
Degiskenleri ve grafigi dagitmak icin bir taktik olusturalim `tf.distribute.MirroredStrategy` nasil calisir?* Butun degiskenler ve model grafigi birkac kere kopyalanir.* Girdi bu kopyalara esit olarak dagitilir.* Her kopya verilen girdiye gore bir kayip ve degisim tablosu hesaplar.* Butun degisim verileri toplanir ve kopyalardaki degerler bu toplama gore guncellenir.* Bu islemden sonra, ayni guncelleme degiskenlerin kopyalarina da uygulanir. Note: Butun kodu tek bir kapsam icine koyabilirsiniz, fakat biz burada daha aciklayici olmasi icin kodu boluyoruz.
###Code
# Eger kullanilacak cihazlar `tf.distribute.MirroredStrategy` yapicisinda belirtilmediyse
# otomatik olarak bulunacaktir.
strategy = tf.distribute.MirroredStrategy()
print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
###Output
_____no_output_____
###Markdown
Girdi hattinin kurulmasi Eger bir model birden fazla GPU'da egitiliyorsa, grup boyutu buna orantili olarak arttirilmalidir ki fazla bilgisayar gucunu verimli bir sekilde kullanabilelim. Ayrica, egitim hizi da orantili olarak ayarlanmaidir.
###Code
BUFFER_SIZE = len(train_images)
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 10
###Output
_____no_output_____
###Markdown
`strategy.make_dataset_iterator`, veriyi kopyalara esit olarak dagitan bir iterator olusturur. Note: Bu API yakin zamanda degisecektir.
###Code
with strategy.scope():
train_dataset = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_iterator = strategy.make_dataset_iterator(train_dataset)
test_dataset = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels)).batch(BATCH_SIZE)
test_iterator = strategy.make_dataset_iterator(test_dataset)
###Output
_____no_output_____
###Markdown
Modelin olusturulmasi`tf.keras.Sequential` ile modelimizi olusturalim. Model Subclassing API'yini da kullanarak bu islemi yapabiliriz.
###Code
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.train.GradientDescentOptimizer(0.001)
###Output
_____no_output_____
###Markdown
Kayip fonksiyonunu tanimlayalimNormalde, eger 1 GPU/CPU'lu bir makine kullaniyorsak, kayip girdi grubundaki ornek sayisina bolunur.*Peki `tf.distribute.Strategy` ile kayip nasil hesaplanir?*> Ornegin, 4 GPU'muz ve boyutu 64 olan girdimiz oldugunu varsayalim. Bu girdiler esit olarak 4 GPU (4 kopya) ustune bolunur, yani her kopyaya giden girdi grub boyutu 16 idir.> Her kopyadaki model icindeki girdinin ustunden gecerek kayip degerini hesaplar. Simdi, bu kayip degerini icindeki girdi sayisina (16) bolmek yerine, en bastaki evrensel girdi miktarina (64) boler. *Neden bu islem boyle yaplir?*> Cunku degisim degerleri her kopyada hesaplandiktan sonra, butun kopyalardaki degerler butun degisim degerlerinin toplamina esitlenir.*Bunu TensorFlow'da nasil yapabiliriz?*Eger ozellestirilmis bir egitim dongusu yaziyorsaniz, her ornekteki kayiplari toplayip butun orneklerin toplamina bolmelisiniz:```GLOBAL_BATCH_SIZE:`scale_loss = tf.reduce_sum(loss) * (1. / GLOBAL_BATCH_SIZE)````* `tf.reduce_mean` metodunu kullanmanizi tavsiye etmiyoruz. Bu metod kayip degerini kopyalardaki ornek sayisina boler ki bu her adimda degisebilir.* Bu indirgeme ve olcekleme keras'ta otomatok olarak yapilir: model.fit ve model.compile ile* Eger `tf.keras.losses` siniflarini kullaniyorsaniz, kayip indirgemesinin ozellikle `NONE` ya da `SUM` olarak belirtilmesi gerekmektedir. `AUTO` ve `SUM_OVER_BATCH_SIZE` ise `tf.distribute.Strategy` ile birlikte kullanilamaz. Cunku kullanicilarin `AUTO` kullanmadan once yaptiklari indirgemenin o anki dagitim ornegindeki dogrulugundan emin olmalari gerekir. `SUM_OVER_BATCH_SIZE` kullanilamaz cunku su anki haliyle sadece kopyadaki ornek sayisina bolum yapip asil toplam ornek sayisina bolme islemini kullaniciya birakir, ki bu cok kolay gozden kacabilecek bir noktadir. Onun yerine kullanicinin indirgemeyi kendilerinin yapmalarini istiyoruz. Egitim dongusu
###Code
with strategy.scope():
def train_step():
def step_fn(inputs):
images, labels = inputs
logits = model(images)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.reduce_sum(cross_entropy) * (1.0 / BATCH_SIZE)
train_op = optimizer.minimize(loss)
with tf.control_dependencies([train_op]):
return tf.identity(loss)
per_replica_losses = strategy.experimental_run(
step_fn, train_iterator)
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
with strategy.scope():
iterator_init = train_iterator.initialize()
var_init = tf.global_variables_initializer()
loss = train_step()
with tf.Session() as sess:
sess.run([var_init])
for epoch in range(EPOCHS):
sess.run([iterator_init])
for step in range(10000):
if step % 1000 == 0:
print('Epoch {} Step {} Loss {:.4f}'.format(epoch+1,
step,
sess.run(loss)))
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Egitim donguleri ile tf.distribute.Strategy Run in Google Colab View source on GitHub Note: Bu dรถkรผmanlar TensorFlow gรถnรผllรผ kullanฤฑcฤฑlarฤฑ tarafฤฑndan รงevirilmiลtir.Topluluk tarafฤฑndan saฤlananan รงeviriler gรถnรผllรผlerin ellerinden geldiฤincegรผncellendiฤi iรงin [Resmi ฤฐngilizce dรถkรผmanlar](https://www.tensorflow.org/?hl=en)ile bire bir aynฤฑ olmasฤฑnฤฑ garantileyemeyiz. Eฤer bu tercรผmeleri iyileลtirmekiรงin รถnerileriniz var ise lรผtfen [tensorflow/docs](https://github.com/tensorflow/docs)havuzuna pull request gรถnderin. Gรถnรผllรผ olarak รงevirilere katkฤฑda bulunmak iรงin[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-tr)listesi ile iletiลime geรงebilirsiniz. Bu rehber egitim donguleri ile [`tf.distribute.Strategy`](https://www.tensorflow.org/r1/guide/distribute_strategy)'nin nasil kullanildigini gosteriyor. Basit bir CNN modelini Fashion MNIST veri seti ile egitecegiz. Bu veri seti icinde 28X28 boyutunda 60000 egitim resmini ve 28X28 boyutunda 10000 test resmini barindirir.Burada bize esneklik ve daha cok kontrol kazandirmasi icin ozellestirilmis egitim donguleri kullanacagiz. Ustelik, bu ozel donguler modeli ve egitim dongulerindeki hatalari ayiklamamizi da kolaylastiracaktir.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow'u yukleyelim
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow.compat.v1 as tf
# Yardimci kutuphaneler
import numpy as np
import os
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Fashion MNIST veri setini indirelim
###Code
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Diziye yeni bir boyut ekleyelim-> new shape == (28, 28, 1)
# Bunu yapmamizin sebebi ise modelimizin ilk katmaninin katlamali olmasi
# ve 4D bir girdiye ihtiyac duyar (batch_size, height, width, channels).
# batch_size boyutunu daha sonra ekleyecegiz.
train_images = train_images[..., None]
test_images = test_images[..., None]
# Resimleri [0, 1] araligina indirgeyelim.
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
train_labels = train_labels.astype('int64')
test_labels = test_labels.astype('int64')
###Output
_____no_output_____
###Markdown
Degiskenleri ve grafigi dagitmak icin bir taktik olusturalim `tf.distribute.MirroredStrategy` nasil calisir?* Butun degiskenler ve model grafigi birkac kere kopyalanir.* Girdi bu kopyalara esit olarak dagitilir.* Her kopya verilen girdiye gore bir kayip ve degisim tablosu hesaplar.* Butun degisim verileri toplanir ve kopyalardaki degerler bu toplama gore guncellenir.* Bu islemden sonra, ayni guncelleme degiskenlerin kopyalarina da uygulanir. Note: Butun kodu tek bir kapsam icine koyabilirsiniz, fakat biz burada daha aciklayici olmasi icin kodu boluyoruz.
###Code
# Eger kullanilacak cihazlar `tf.distribute.MirroredStrategy` yapicisinda belirtilmediyse
# otomatik olarak bulunacaktir.
strategy = tf.distribute.MirroredStrategy()
print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
###Output
_____no_output_____
###Markdown
Girdi hattinin kurulmasi Eger bir model birden fazla GPU'da egitiliyorsa, grup boyutu buna orantili olarak arttirilmalidir ki fazla bilgisayar gucunu verimli bir sekilde kullanabilelim. Ayrica, egitim hizi da orantili olarak ayarlanmaidir.
###Code
BUFFER_SIZE = len(train_images)
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 10
###Output
_____no_output_____
###Markdown
`strategy.make_dataset_iterator`, veriyi kopyalara esit olarak dagitan bir iterator olusturur. Note: Bu API yakin zamanda degisecektir.
###Code
with strategy.scope():
train_dataset = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_iterator = strategy.make_dataset_iterator(train_dataset)
test_dataset = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels)).batch(BATCH_SIZE)
test_iterator = strategy.make_dataset_iterator(test_dataset)
###Output
_____no_output_____
###Markdown
Modelin olusturulmasi`tf.keras.Sequential` ile modelimizi olusturalim. Model Subclassing API'yini da kullanarak bu islemi yapabiliriz.
###Code
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.train.GradientDescentOptimizer(0.001)
###Output
_____no_output_____
###Markdown
Kayip fonksiyonunu tanimlayalimNormalde, eger 1 GPU/CPU'lu bir makine kullaniyorsak, kayip girdi grubundaki ornek sayisina bolunur.*Peki `tf.distribute.Strategy` ile kayip nasil hesaplanir?*> Ornegin, 4 GPU'muz ve boyutu 64 olan girdimiz oldugunu varsayalim. Bu girdiler esit olarak 4 GPU (4 kopya) ustune bolunur, yani her kopyaya giden girdi grub boyutu 16 idir.> Her kopyadaki model icindeki girdinin ustunden gecerek kayip degerini hesaplar. Simdi, bu kayip degerini icindeki girdi sayisina (16) bolmek yerine, en bastaki evrensel girdi miktarina (64) boler. *Neden bu islem boyle yaplir?*> Cunku degisim degerleri her kopyada hesaplandiktan sonra, butun kopyalardaki degerler butun degisim degerlerinin toplamina esitlenir.*Bunu TensorFlow'da nasil yapabiliriz?*Eger ozellestirilmis bir egitim dongusu yaziyorsaniz, her ornekteki kayiplari toplayip butun orneklerin toplamina bolmelisiniz:```GLOBAL_BATCH_SIZE:`scale_loss = tf.reduce_sum(loss) * (1. / GLOBAL_BATCH_SIZE)````* `tf.reduce_mean` metodunu kullanmanizi tavsiye etmiyoruz. Bu metod kayip degerini kopyalardaki ornek sayisina boler ki bu her adimda degisebilir.* Bu indirgeme ve olcekleme keras'ta otomatok olarak yapilir: model.fit ve model.compile ile* Eger `tf.keras.losses` siniflarini kullaniyorsaniz, kayip indirgemesinin ozellikle `NONE` ya da `SUM` olarak belirtilmesi gerekmektedir. `AUTO` ve `SUM_OVER_BATCH_SIZE` ise `tf.distribute.Strategy` ile birlikte kullanilamaz. Cunku kullanicilarin `AUTO` kullanmadan once yaptiklari indirgemenin o anki dagitim ornegindeki dogrulugundan emin olmalari gerekir. `SUM_OVER_BATCH_SIZE` kullanilamaz cunku su anki haliyle sadece kopyadaki ornek sayisina bolum yapip asil toplam ornek sayisina bolme islemini kullaniciya birakir, ki bu cok kolay gozden kacabilecek bir noktadir. Onun yerine kullanicinin indirgemeyi kendilerinin yapmalarini istiyoruz. Egitim dongusu
###Code
with strategy.scope():
def train_step():
def step_fn(inputs):
images, labels = inputs
logits = model(images)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.reduce_sum(cross_entropy) * (1.0 / BATCH_SIZE)
train_op = optimizer.minimize(loss)
with tf.control_dependencies([train_op]):
return tf.identity(loss)
per_replica_losses = strategy.experimental_run(
step_fn, train_iterator)
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
with strategy.scope():
iterator_init = train_iterator.initialize()
var_init = tf.global_variables_initializer()
loss = train_step()
with tf.Session() as sess:
sess.run([var_init])
for epoch in range(EPOCHS):
sess.run([iterator_init])
for step in range(10000):
if step % 1000 == 0:
print('Epoch {} Step {} Loss {:.4f}'.format(epoch+1,
step,
sess.run(loss)))
###Output
_____no_output_____ |
examples/example_sphere_models.ipynb | ###Markdown
Sphere Models In this section, we describe restricted sphere models, which have been used to represent (tumor)-cells.Sphere's by their nature have no orientations. Their only parameter is their diameter.Similarly to Cylinder models, Spheres have different model approximations that make different assumptions on the acquisition scheme. We start with describing the simplest Dot model (S1), and more towards more general models (S4). Dot: S1The Dot model represents a non-diffusing component, which could represent trapped water in glial cells*(Stanisz et al. 1997)*, or axons with a different orientation to the main bundle*(Panagiotaki et al. 2009)*.Notably, *(Alexander et al. 2010, Veraart et al. 2016)* mentions that the contribution of the Dot model is negligible in \emph{in-vivo} acquisitions. The signal of a Dot is described by a sphere with a diameter of zero, or equivalently an isotropic Gaussian compartment with $\lambda_{iso}$ set to zero. In other words, it's just a function returning one no matter the input:\begin{equation} E_{\textrm{dot}}=1.\end{equation}
###Code
from dmipy.signal_models import sphere_models
dot = sphere_models.S1Dot()
###Output
_____no_output_____
###Markdown
Soderman Sphere: S2To keep naming consistency with the cylinder model with the same acquisition assumptions we call this the Soderman Sphere, but the equation is given by *(Balinov et al. 1993)*. The radius $R$ now corresponds to the radius of the sphere. \begin{equation}E(q,R|\delta\rightarrow0,\Delta\gg R^2/D)=\left(\frac{3}{(2\pi q R) ^ 2}\left(\frac{\sin(2\pi q R)}{(2\pi q R)} - \cos(2\pi q R)\right)\right) ^2\end{equation}
###Code
import numpy as np
from dmipy.core.acquisition_scheme import acquisition_scheme_from_qvalues
sphere_stejskal_tanner = sphere_models.S2SphereStejskalTannerApproximation()
Nsamples = 100
bvecs = np.tile(np.r_[0., 1., 0.], (Nsamples, 1)) # doesn't matter it has no orientation
qvals = np.linspace(0, 3e5, Nsamples)
delta = 0.01
Delta = 0.03
scheme = acquisition_scheme_from_qvalues(qvals, bvecs, delta, Delta)
import matplotlib.pyplot as plt
%matplotlib inline
for diameter in np.linspace(1e-6, 1e-5, 5):
plt.plot(qvals, sphere_stejskal_tanner(scheme, diameter=diameter),
label="Diameter="+str(1e6 * diameter)+"$\mu m$")
plt.legend(fontsize=12)
plt.title("Stejskal-Tanner attenuation over sphere diameter", fontsize=17)
plt.xlabel("q-value [1/m]", fontsize=15)
plt.ylabel("E(q)", fontsize=15);
###Output
_____no_output_____
###Markdown
Callaghan Sphere: S3*(Callaghan 1995)*. Coming Soon... Gaussian Phase Sphere: S4*(Balinov et al. 1993)* derived the formulation of the Gaussian-Phase approximation for spheres, which models the signal attenuation for finite pulse duration $\delta$ and pulse separation $\Delta$. This approximation has been used for the VERDICT model for tumor characterization *(Panagiotaki et al. 2014)*. \begin{equation}\ln\left[E_\perp(G,\delta,\Delta,R)\right]=\frac{2\gamma^2 G^2}{D}\sum_{m=1}^{\infty}\frac{a_m^{-4}}{a^2_mR^2-2}\times\left[2\delta-\dfrac{2 + e^{-a_m^2D(\Delta-\delta)} - 2e^{-a_m^2D\delta} - 2e^{-a_m^2D\Delta}+e^{-a_m^2D(\Delta+\delta)}}{a_m^2D}\right]\end{equation}
###Code
gaussian_phase = sphere_models.S4SphereGaussianPhaseApproximation()
###Output
_____no_output_____ |
08-cython.ipynb | ###Markdown
Compilation C en PythonPython est un langage interprรฉtรฉ relativement souple. Il n'est pas nรฉcessaire de donner le type des objets que vous manipulez: c'est Python qui, ร l'exรฉcution, dรฉtermine le type de vos objets pour appeler le code machine correspondant.Au quotidien, on apprรฉcie notamment de pouvoir manipuler des entiers, des flottants sans se soucier:- des types,- des dรฉbordements d'entier,- des dรฉbordements de mรฉmoire dans les tableaux,- des divisions par zรฉro,- etc.
###Code
print ("Des entiers: {}".format(12 * 3))
print ("Des flottants: {}".format(12 * 3.14))
print ("De trรจs grands entiers: {}".format(12 ** 345))
print ("Des listes trop courtes: {}".format([1, 2][2]))
###Output
_____no_output_____
###Markdown
Cette souplesse, trรจs apprรฉciรฉe des ingรฉnieurs pour la flexibilitรฉ qu'elle offre au quotidien, a un coรปt: celui de la performance. Le retour au C est souvent nรฉcessaire quand le temps de calcul devient un problรจme.La bibliothรจque `numpy` que nous avons dรฉjร manipulรฉe propose de revenir au C pour les tableaux de donnรฉes et fait appel aux fonctions C qui correspondent ร l'intuition des opรฉrateurs Python. On a vu par exemple `np.sin(x)` qui dรฉroule une boucle pour calculer un sinus sur tous les รฉlรฉments d'un tableau. Introduction ร CythonObservons le code ci-dessous qui intรจgre la fonction $f:x \mapsto x^2 - x$ par la mรฉthode des rectangles.
###Code
def f(x):
return x**2-x
def integrate_f(a, b, N):
s = 0
dx = (b-a)/N
for i in range(N):
s += f(a+i*dx)
return s * dx
%timeit integrate_f(0, 5, 100000)
###Output
_____no_output_____
###Markdown
Cython (http://docs.cython.org/index.html) est une bibliothรจque Python qui permet de compiler en C du code natif Python. En pratique, c'est une nouvelle syntaxe qui permet de gรฉnรฉrer du code C ร partir de code Python annotรฉ.**La bonne nouvelle, c'est que tout code Python non annotรฉ peut รชtre compris par Cython.**Dans le notebook, on peut utiliser une extension `Cython` qui permet d'automatiser la procรฉdure de compilation et de chargement des fonctions compilรฉes dans Python. Il faut commencer par charger cette extension.
###Code
%load_ext Cython
###Output
_____no_output_____
###Markdown
Puis on prรฉfixe les cellules de code ร compiler par la ligne `%%cython`. Il est indispensable que cette instruction soit sur la premiรจre ligne de la cellule.
###Code
%%cython
def f(x):
return x**2-x
def integrate_f(a, b, N):
s = 0
dx = (b-a)/N
for i in range(N):
s += f(a+i*dx)
return s * dx
###Output
_____no_output_____
###Markdown
Comparons maintenant les performances.
###Code
%timeit integrate_f(0, 5, 100000)
###Output
_____no_output_____
###Markdown
D'une maniรจre gรฉnรฉrale, du code Python non annotรฉ permet d'atteindre une amรฉlioration des temps de calcul de 30%.Mais soyons plus audacieux et annotons nos variables par leur type:- les variables passรฉes aux fonctions (on ajoute ainsi `double`, `int`, etc.);- les variables locales ร notre fonction (on utilise alors **le mot clรฉ `cdef`**)
###Code
%%cython
def f(double x):
return x**2-x
def integrate_f(double a, double b, int N):
cdef int i
cdef double dx
s = 0
dx = (b-a)/N
for i in range(N):
s += f(a+i*dx)
return s * dx
%timeit integrate_f(0, 5, 100000)
###Output
_____no_output_____
###Markdown
L'รฉcart de performance commence ร devenir intรฉressant.La commande `%%cython` offre une option `-a` (pour annotate) qui permet de guider nos annotations. Ainsi, dans le rรฉsultat qui s'affiche sous la cellule:- plus une ligne est jaune, plus elle est proche du Python;- plus une ligne est blanche, plus elle est proche du C.On peut cliquer sur les lignes jaunes (et blanches) pour ยซโฏdรฉplierโฏยป le code C gรฉnรฉrรฉ correspondant et comprendre les optimisations qui peuvent encore รชtre faites.
###Code
%%cython -a
def f(double x):
return x**2-x
def integrate_f(double a, double b, int N):
cdef int i
cdef double dx
s = 0
dx = (b-a)/N
for i in range(N):
s += f(a+i*dx)
return s * dx
###Output
_____no_output_____
###Markdown
Avec la pratique, ici on comprend que la fonction `f` n'a pas de type de retour. (cf. clic sur la ligne 3) En cliquant sur la ligne 2 ou sur la ligne 11, on voit รฉgalement l'รฉtendue des dรฉgรขts (le volume du code C gรฉnรฉrรฉ).Il est alors possible de typer la valeur de retour de la fonction `f`. **On remplace alors le mot-clรฉ `def` par le mot clรฉ `cdef`** avant de prรฉciser le type de retour. Le prix ร payer pour cette optimisation est que la fonction `f` n'est plus accessible depuis le code Python. Elle peut en revanche รชtre appelรฉe par les autres fonctions dans la cellule Cython.C'est la raison pour laquelle **il n'est pas possible de typer la valeur de retour de la fonction `integrate_f`**.
###Code
%%cython -a
cdef double f(double x):
return x**2-x
def integrate_f(double a, double b, int N):
cdef int i
cdef double dx
s = 0
dx = (b-a)/N
for i in range(N):
s += f(a+i*dx)
return s * dx
%timeit integrate_f(0, 5, 100000)
###Output
_____no_output_____
###Markdown
**Exercice:** Il reste une annotation que nous avons oubliรฉe dans le code prรฉcรฉdent. Complรฉtez l'optimisation que nous avons entamรฉe et comparez les performances.
###Code
# %load solutions/cython_integrate.py
%timeit integrate_f(0, 5, 100000)
###Output
_____no_output_____
###Markdown
Appel de fonctions C depuis Python (avancรฉ)On souhaite appeler la fonction `qsort` de la librairie standard C pour trier des structures Python. Python est certes bien รฉquipรฉ pour trier des listes, mais l'exemple est trรจs illustratif.La fonction `qsort` est prรฉsente dans la librairie standard C, dans `stdlib.h`. On peut y trouver sa signature:```cvoid qsort(void *array, size_t count, size_t size, int (*compare)(const void*, const void*))````qsort` fonctionne ร base de structures opaques (`void*`): `array` est un tableau ร trier qui comprend `count` รฉlรฉments de taille `size` (en octets). La fonction `compare` prend en entrรฉe deux rรฉfรฉrences (pointeurs) dans le tableau `array` et renvoie un entier nรฉgatif si `a b`.On va crรฉer une fonction `py_qsort` qui va trier une liste d'entiers avec la fonction `qsort` en C.Il va nous falloir:- crรฉer un tableau C d'entiers de la bonne taille;- recopier les entiers Python dans le tableau C;- appeler la fonction `qsort` avec la bonne fonction de comparaison;- rรฉcupรฉrer le tableau triรฉ puis le convertir en tableau Python.Cython permet de faire appel ร des fonctions dรฉfinies dans des *headers* C grรขce ร la formule `cdef extern from`, on recopie ensuite ligne par ligne les dรฉclarations de fonctions que l'on souhaite utiliser en Cython.
###Code
%%cython
cdef extern from "stdlib.h":
void qsort(void *array, size_t count, size_t size,
int (*compare)(const void*, const void*))
void *malloc(size_t size)
void free(void* ptr)
# Comparison function
cdef int int_compare(const void* a, const void* b):
cdef int ia, ib
ia = (<int*> a)[0] # cast to int -> dereference
ib = (<int*> b)[0]
return ia - ib
def py_qsort(list x):
cdef int *array
cdef int i, n
# Allocate the C array
n = len(x)
array = <int*> malloc(sizeof(int) * n)
if array == NULL:
raise MemoryError("Unable to allocate array")
# Fill the C array with the Python integers
for i in range(n):
array[i] = x[i]
# qsort the array
qsort(<void*> array, <size_t> n, sizeof(int), int_compare)
# Convert back to Python and free
for i in range(n):
x[i] = array[i]
free(array)
from random import shuffle
intlist = list(range(10))
shuffle(intlist)
print (intlist)
py_qsort(intlist)
print (intlist)
###Output
_____no_output_____
###Markdown
**Exercice:** Ajouter un mot-clรฉ `reverse` ร la fonction `py_qsort` (par dรฉfaut ร `False`) pour faire un tri dans l'ordre dรฉcroissant.
###Code
%%cython
py_qsort(intlist)
print (intlist)
shuffle(intlist)
py_qsort(intlist, reverse = True)
print (intlist) # [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
###Output
_____no_output_____
###Markdown
**Exercice:** Ajouter un mot-clรฉ `compare` ร la fonction `py_qsort` (par dรฉfaut ร `None`) pour passer une fonction de comparaison Python.
###Code
%%cython
intlist = list(range(-10, 10))
shuffle(intlist)
def cmp(a, b):
return (abs(a) - abs(b))
py_qsort(intlist, compare = cmp)
print (intlist) # [0, -1, 1, -2, 2, 3, -3, -4, 4, -5, 5, -6, 6, 7, -7, 8, -8, 9, -9, -10]
shuffle(intlist)
py_qsort(intlist, compare = cmp, reverse = True)
print (intlist)
###Output
_____no_output_____ |
content/06-good-code/documentation.ipynb | ###Markdown
DocumentationDocumentation is a second piece on your path to writing strong code. Code style is the first, and code testing - to be discussed in the next chapter is the third. Together, these make up the strong code trifecta. In this chapter we'll discuss how to move beyond having good code style (which is important!) to have well-documented and well-commented code.Code documentation is text that accompanies and/or is embedded within a software project, that explains what the code is and how to use it. Documentation is text that accompanies and/or is embedded within a software project that explains what the code is and how to use it. While there are many levels of documentations beyond what we'll discuss here, we'll focus on adding docstrings and code comments to your code. Documenting your code by including helpful comments and docstrings will take your code to the next level. As a rule, good code has good documentation - but code documentation should _not_ be used to try and fix unclear names, or bad structure, so we can't forget about code style and everything discussed in the last chapter. Specifically, *comments* should add any additional context and information that helps explain what the code is, how it works, and why it works that way to developers, whereas *docstrings* should make how to use the code clear to users. In this chapter we'll discuss both docstrings and comments, which together lead to well-documented code.Most simply, documentation is incredibly important, undervalued, and underrated. Documentation is stuff written for humans in human language to help the humans. It does not affect the functionality of the code, but it does help the humans reading and using the code. CommentsComments are string literals written directly in the code, typically directed at developers - people reading and potentially writing or editing the code.As noted in the code style chapter, code comments should use ``, followed by a space before the comment, and be written at the same indentation level of the code it describes. But, when exactly should you include comments in yoru code? Well, generally it's best for comments to focus on the *how* and *why*, over literally explaining what the code does. This is because you can expect individuals reading your code to have an understnding of the basic code constructs in Python. As such a comment that reads "` this is a for loop`" will be distracting and uniformative.Instead, good code comments should explain any context needed to understand the task at hand, give a broad overview of what approach you are taking to perform the task, and, if you're using any unusual approaches, explain what they are, and why you're using them.As noted previously, comments *must* be maintained. Be sure to keep them up-to-date, meaning the comments included should apply to the code that is actually there. Out-of-date comments are worse than no comments at all.Note that people new to writing code comments and documentation generally sometimes get hung up on questions like "How many comments do I need?" and "Should I comment every line?" Rather than ask these questions, we encourage you to keep in mind that comments should add context without being distracting. Typically, commenting every line makes your code *harder* to read and understand, which is to be avoided. Similarly, having *no comments* across all code in a project should also typically be avoided, as some explanation or context to those reading your code (including future you) would likely be beneficial.**What to avoid**Specifically, comments that explain what the code construct does literally, should be avoided.
###Code
# This is a loop that iterates over elements in a list
for element in list_of_elements:
pass
###Output
_____no_output_____
###Markdown
**How to improve**Intead, you'd want to explain your thinking, along the general lines of what you see here (where X, Y, and Z would be replaced with specifics applicable to your project):
###Code
# Because of X, we will use approach Y to do Z
for element in list_of_elements:
pass
###Output
_____no_output_____
###Markdown
DocstringsUnlike comments, which are intermingled among the code directly and included for developers (people reading the code), docstrings are descriptions and guides written alongside the code itself for code *users*. **Docstrings** are used to describe how to use the code stored in modules, classes and/or functions. Specifically, they describe the *operation* of the code.Docstrings describe modules, classes and functions. They describe the operation of the code. `numpy`-style docsstringsWhile there are a handful of different common approaches to writing docstrings in Python, we'll focus on writing `numpy`-style docstrings.[Numpy style docs](https://numpydoc.readthedocs.io/en/latest/format.html) are a particular specification for docstrings. The documentation is extensive, so for more detail we encourage you to delve into their documentation...about docstrings. Here, we'll only ocver the basics. Docstring: `add()`To introduce the syntax and formatting for a `numpy`-style docstring, we'll document a *very* simple function `add`. You'll notice a few things in the docstring below:1. Docstrings begin and end with triple quotes.2. Docstrings are added write beneath the line of code defining the function or class they're describing.3. Jupyter notebooks will format docstrings using a red color.4. The docstring below includes three parts: 1) a string describing what the function accomplishes, 2) the inputs (Parameters) to the function, and 3) the outputs (Returns) from the function.
###Code
def add(num1, num2):
"""Add two numbers together.
Parameters
----------
num1 : int or float
The first number, to be added.
num2 : int or float
The second number, to be added.
Returns
-------
answer : int or float
The result of the addition.
"""
answer = num1 + num2
return answer
###Output
_____no_output_____
###Markdown
Using the example above, we'll describe each of these three components above in a bit more details**General Description**`"""Add two numbers together."""` provides the user with a general description of what the function does. This can be a multi-line string and should describe generally the task accomplished by or goals of the function.**Parameters**The parameters section opens with "Parameters' with a line of dashes on the following line. Within this section, each input/argument to the fucntion, its corresponding type, and a description of the input is included.For example `num1` is the parameter name. Separated by a colon (`:`), the type of that parameter is described (`int or float`). On the following line and indented, a short description of that parameter is included. The same pieces of information are included for the second parameter `num2`)```python """ Parameters ---------- num1 : int or float The first number, to be added. num2 : int or float The second number, to be added. """```**Returns**The formatting for the returns section mirrors that of the Parameters section; however, the information included reflects the variable or variables `return`ed from the function (or method).```python """ Returns ------- answer : int or float The result of the addition. """```In the `add()` function, there is a single output `answer`, which will be either an `int or float`, and can be described as "The result of the addition." As a reminder, complete docstrings can and should include even more information than what is described here. However, to get in the practice of documenting your code, including these three aspects is a great place to start. But, we encourage you to dig into the `numpy` docstring documentation to expand on your docuementation understanding. For one example of a complete docstring, you can check out the information included in the docstring for [`numpy.array`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.array.html). Accessing Docstrings.Importantly, **docstrings** are available to you *outside* of the source code in a number of ways. **Aproach 1: `?`**In a Jupyter notebook, documentation for an object can be accessed by adding a quesiton mark (`?`) after the object name: `add?`. This would pull the documentation up as a pop-up at the bottom of a Jupyter notebook. **Aproach 2: `help()`**The `help()` function can be used to pull up the documentation in-line for any object:
###Code
help(add)
###Output
Help on function add in module __main__:
add(num1, num2)
Add two numbers together.
Parameters
----------
num1 : int or float
The first number, to be added.
num2 : int or float
The second number, to be added.
Returns
-------
answer : int or float
The result of the addition.
###Markdown
**Aproach 3: `__doc__`**`__doc__` is also an attribute stored in an ocject. As with all attributes, the information stored can be accessed using `obj.__doc__`. Note that `__` is a dunder, or a double underscore. There are two leading and two trailing underscores around `doc`.
###Code
print(add.__doc__)
###Output
Add two numbers together.
Parameters
----------
num1 : int or float
The first number, to be added.
num2 : int or float
The second number, to be added.
Returns
-------
answer : int or float
The result of the addition.
|
notebooks/bahdanau_attention.ipynb | ###Markdown
Neural Machine Translation by Jointly Learning to Align and TranslateDzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio *ICLR 2015*https://arxiv.org/pdf/1409.0473.pdf
###Code
import os
import re
from typing import List
from IPython.display import Image
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
###Output
_____no_output_____
###Markdown
0 Setup Sample ExamplesFollowing examples are english to french translation sampled from [wmt14_translate](https://www.tensorflow.org/datasets/catalog/wmt14_translatewmt14_translatefr-en). Our task is to train a model to translate english sentences to french.
###Code
sample_en = [
"Crop insurance payments include only government crop insurance programs; private hail insurance payments are excluded.",
"Activities of the second type will be a major determinant of the successful implementation of a language policy.",
"There are no known natural sources of acrylonitrile.",
"James recounts how the community was developed."
]
sample_fr = [
"Les indemnitรฉs dโassurance-rรฉcolte comprennent uniquement celles des programmes publics; les indemnitรฉs de lโassurance-grรชle privรฉe sont exclues.",
"Pour la mise en oeuvre, la deuxiรจme catรฉgorie d'activitรฉs constitue un dรฉterminant essentiel du succรจs de la politique linguistique.",
"On ne connaรยฎt aucune source naturelle d'acrylonitrile.",
"Mme James raconte comment la collectivitรฉ a รฉtรฉ crรฉรฉe."
]
###Output
_____no_output_____
###Markdown
Preprocessing
###Code
def PreprocessSentence(sentence: str) -> str:
"""Preprocess an input sentence, make it cosumbale by the model."""
sentence = sentence.lower().strip()
# Add whitespace after certain special characters.
sentence = re.sub(r"([,.?!$%'])", r" \1 ", sentence)
# Add <start> and <end> token to sentence.
sentence = "<start> " + sentence + " <end>"
# Remove the redundant whitespaces.
sentence = re.sub(r"[' ']+", " ", sentence)
return sentence
sample_en = [PreprocessSentence(x) for x in sample_en]
sample_fr = [PreprocessSentence(x) for x in sample_fr]
print("Sample english sentences:")
for en in sample_en: print(en)
print("\nSample french sentences:")
for fr in sample_fr: print(fr)
###Output
Sample english sentences:
<start> crop insurance payments include only government crop insurance programs; private hail insurance payments are excluded . <end>
<start> activities of the second type will be a major determinant of the successful implementation of a language policy . <end>
<start> there are no known natural sources of acrylonitrile . <end>
<start> james recounts how the community was developed . <end>
Sample french sentences:
<start> les indemnitรฉs dโassurance-rรฉcolte comprennent uniquement celles des programmes publics; les indemnitรฉs de lโassurance-grรชle privรฉe sont exclues . <end>
<start> pour la mise en oeuvre , la deuxiรจme catรฉgorie d activitรฉs constitue un dรฉterminant essentiel du succรจs de la politique linguistique . <end>
<start> on ne connaรฃยฎt aucune source naturelle d acrylonitrile . <end>
<start> mme james raconte comment la collectivitรฉ a รฉtรฉ crรฉรฉe . <end>
###Markdown
Tokenization
###Code
def GetTokenizer(sentences: List[str]) -> Tokenizer:
"""Create tokenizer."""
tokenizer = Tokenizer(filters="", oov_token="<OOV>")
tokenizer.fit_on_texts(sentences)
return tokenizer
en_tokenizer = GetTokenizer(sample_en)
en_vocab_size = len(en_tokenizer.word_index)
en_sequences = en_tokenizer.texts_to_sequences(sample_en)
print(f"English vocab size: {en_vocab_size}\n")
print(en_tokenizer.word_index)
print(f"\nInput sequences:")
for seq in en_sequences: print(seq)
print("\n")
fr_tokenizer = GetTokenizer(sample_fr)
fr_vocab_size = len(fr_tokenizer.word_index)
fr_sequences = fr_tokenizer.texts_to_sequences(sample_fr)
print(f"French vocab size: {fr_vocab_size}\n")
print(fr_tokenizer.word_index)
print(f"\nTarget sequences:")
for seq in fr_sequences: print(seq)
###Output
English vocab size: 41
{'<OOV>': 1, '<start>': 2, '.': 3, '<end>': 4, 'of': 5, 'insurance': 6, 'the': 7, 'crop': 8, 'payments': 9, 'are': 10, 'a': 11, 'include': 12, 'only': 13, 'government': 14, 'programs;': 15, 'private': 16, 'hail': 17, 'excluded': 18, 'activities': 19, 'second': 20, 'type': 21, 'will': 22, 'be': 23, 'major': 24, 'determinant': 25, 'successful': 26, 'implementation': 27, 'language': 28, 'policy': 29, 'there': 30, 'no': 31, 'known': 32, 'natural': 33, 'sources': 34, 'acrylonitrile': 35, 'james': 36, 'recounts': 37, 'how': 38, 'community': 39, 'was': 40, 'developed': 41}
Input sequences:
[2, 8, 6, 9, 12, 13, 14, 8, 6, 15, 16, 17, 6, 9, 10, 18, 3, 4]
[2, 19, 5, 7, 20, 21, 22, 23, 11, 24, 25, 5, 7, 26, 27, 5, 11, 28, 29, 3, 4]
[2, 30, 10, 31, 32, 33, 34, 5, 35, 3, 4]
[2, 36, 37, 38, 7, 39, 40, 41, 3, 4]
French vocab size: 51
{'<OOV>': 1, '<start>': 2, '.': 3, '<end>': 4, 'la': 5, 'les': 6, 'indemnitรฉs': 7, 'de': 8, 'd': 9, 'dโassurance-rรฉcolte': 10, 'comprennent': 11, 'uniquement': 12, 'celles': 13, 'des': 14, 'programmes': 15, 'publics;': 16, 'lโassurance-grรชle': 17, 'privรฉe': 18, 'sont': 19, 'exclues': 20, 'pour': 21, 'mise': 22, 'en': 23, 'oeuvre': 24, ',': 25, 'deuxiรจme': 26, 'catรฉgorie': 27, 'activitรฉs': 28, 'constitue': 29, 'un': 30, 'dรฉterminant': 31, 'essentiel': 32, 'du': 33, 'succรจs': 34, 'politique': 35, 'linguistique': 36, 'on': 37, 'ne': 38, 'connaรฃยฎt': 39, 'aucune': 40, 'source': 41, 'naturelle': 42, 'acrylonitrile': 43, 'mme': 44, 'james': 45, 'raconte': 46, 'comment': 47, 'collectivitรฉ': 48, 'a': 49, 'รฉtรฉ': 50, 'crรฉรฉe': 51}
Target sequences:
[2, 6, 7, 10, 11, 12, 13, 14, 15, 16, 6, 7, 8, 17, 18, 19, 20, 3, 4]
[2, 21, 5, 22, 23, 24, 25, 5, 26, 27, 9, 28, 29, 30, 31, 32, 33, 34, 8, 5, 35, 36, 3, 4]
[2, 37, 38, 39, 40, 41, 42, 9, 43, 3, 4]
[2, 44, 45, 46, 47, 5, 48, 49, 50, 51, 3, 4]
###Markdown
PaddingPad the sequences by 0 to get same sequence_length.
###Code
def Padding(sequences: List[List[int]]) -> List[List[int]]:
"""Pad sequences."""
padded = tf.keras.preprocessing.sequence.pad_sequences(
sequences, padding="post")
return padded
en_sequences = Padding(en_sequences)
max_input_len = len(en_sequences[0])
print(f"Padded input sequences:")
for seq in en_sequences: print(seq)
print(f"max_input_len: {max_input_len}")
fr_sequences = Padding(fr_sequences)
max_target_len = len(fr_sequences[0])
print(f"\nPadded target sequences:")
for seq in fr_sequences: print(seq)
print(f"max_target_len: {max_target_len}")
###Output
Padded input sequences:
[ 2 8 6 9 12 13 14 8 6 15 16 17 6 9 10 18 3 4 0 0 0]
[ 2 19 5 7 20 21 22 23 11 24 25 5 7 26 27 5 11 28 29 3 4]
[ 2 30 10 31 32 33 34 5 35 3 4 0 0 0 0 0 0 0 0 0 0]
[ 2 36 37 38 7 39 40 41 3 4 0 0 0 0 0 0 0 0 0 0 0]
max_input_len: 21
Padded target sequences:
[ 2 6 7 10 11 12 13 14 15 16 6 7 8 17 18 19 20 3 4 0 0 0 0 0]
[ 2 21 5 22 23 24 25 5 26 27 9 28 29 30 31 32 33 34 8 5 35 36 3 4]
[ 2 37 38 39 40 41 42 9 43 3 4 0 0 0 0 0 0 0 0 0 0 0 0 0]
[ 2 44 45 46 47 5 48 49 50 51 3 4 0 0 0 0 0 0 0 0 0 0 0 0]
max_target_len: 24
###Markdown
AbstractCurrent models usually choose a encoder-decoder architecture. The encoder encodes a source sequence into a fixed-length hidden state from which a decoder generates a target sequence. The paper argues that the use of a fixed-length hidden state is a bottleneck in improving the performance. The paper, instead, proposes a (soft-)search for parts of a source sequence that are relevant to predicting a target word. (i.e. the target word should "pay attention" to certain input words.) 1 Introduction & Background***Neural machine translation*** attempts to build and train **a single, large neural network** that reads a sentence and outputs a correct translation. (While tradation phtrase-based systems are usually consist of many sub-components that are tuned separately.) Most of the proposed models belong to a family of ***encoder-decoders*** - [Sequence to sequence learning with neural networks](https://arxiv.org/pdf/1409.3215.pdf) Ilya Sutskever, Oriol Vinyals, and Quoc V.Le *NIPS 2014* **(Seq2Seq)** - [ Learning phrase representations using rnn encoder-decoder for statistical machine translation](https://arxiv.org/pdf/1406.1078.pdf) Kyunghyun Cho, Yoshua Bengio *CoRR 2014* **(Enc-Dec)** A potential issue with this approach lies on the fact that all necessary information of a source sequence need to be compressed into a fixed-length vector (hidden state). This makes it hard for the model to cope with long sentences. Following paper showed that the performance of a basic encoder-decoder deteriorates rapidly as the length of an input sentence increases. - [On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](https://arxiv.org/pdf/1409.1259.pdf) Kyunghyun Cho, Bart van Merrienboer, Dzmitry Bahdanau, Yoshua Bengio *SSST-8 2014* To address the issue, the paper introduces attention mechanism. The model encodes the input sequence into context vectors Each time step the decoder predicts based on the context vectors and previous decoder state. 2 Model Architecture
###Code
Image(filename='../pics/bahdanau_attention.png')
###Output
_____no_output_____
###Markdown
2.1 EncoderPaper chooses a bidirectional-GRU structure to take care of both preceding and following words.The forward GRU encodes input sequence into *forward hidden states* $(\overrightarrow{h}_1, ..., \overrightarrow{h}_{T_x})$.The backward GRU encodes input sequence into *backward hidden states* $(\overleftarrow{h}_1, ..., \overleftarrow{h}_{T_x})$.The hidden vector for each token $x_j$ is then the concatenation of forward and backward state.$$h_j=[\overrightarrow{h}_j^T,\overleftarrow{h}_j^T]^T$$In the paper `emb_size=600`, `hidden_size=1000`
###Code
def Encoder(input_vocab_size: int,
emb_size: int,
hidden_size: int,
name: str="encoder"):
"""Bi-directional GRU encoder.
Inputs:
sequences: Indices of input sequence tokens, of shape
(batch_size, input_seq_len)
Args:
input_vocab_size: Size of input vocab.
emb_size: Dimensionality of the embeddings.
hidden_size: Dimensionality of the layers.
name: Name of the Encoder.
Returns:
Encoder output, of shape (batch_size, input_seq_len, hidden_size)
Last encoder state, of shape (batch_size, hidden_size)
"""
sequences = tf.keras.Input(shape=(None, ), name="input_sequences")
embedding_layer = tf.keras.layers.Embedding(input_dim=input_vocab_size,
output_dim=emb_size)
bi_gru_layer = tf.keras.layers.Bidirectional(
tf.keras.layers.GRU(units=hidden_size,
return_sequences=True,
return_state=True))
reduce_states = tf.keras.layers.Dense(hidden_size)
# Embedding layer. (batch_size, input_seq_len, emb_size)
embeddings = embedding_layer(sequences)
# Bi-GRU layer.
# - encoder_outputs: (batch_size, input_seq_len, hidden_size*2)
# - forward_state: (batch_size, hidden_size)
# - backward_state: (batch_size, hidden_size)
encoder_output, forward_state, backward_state = bi_gru_layer(embeddings)
# Reduce the forward and backward state into a single initial state for the
# decoder since decoder is not Bi-directional. (batch_size, hidden_size)
state = reduce_states(tf.concat([forward_state, backward_state], axis=1))
return tf.keras.Model(
inputs=[sequences], outputs=[encoder_output, state], name=name)
# Example
EMB_SIZE = 600
HIDDEN_SIZE = 1000
encoder = Encoder(input_vocab_size=en_vocab_size+1, # +1 for padding
emb_size=EMB_SIZE,
hidden_size=HIDDEN_SIZE)
encoder_output, encoder_state = encoder(en_sequences)
tf.keras.utils.plot_model(encoder, show_shapes=True)
###Output
_____no_output_____
###Markdown
2.2 Masking Padding maskMask all the pad tokens in the batch of sequences, to make sure the model doesn't treat paddings as inputs.
###Code
def GetPaddingMask(sequences: tf.Tensor) -> tf.Tensor:
""" Create padding mask.
Args:
sequences: input sequences, of shape (batch_size, seq_len)
Returns:
mask: mask tensor of shape (batch_size, seq_len)
"""
mask = tf.cast(tf.not_equal(sequences, tf.constant(0)), tf.float32)
return mask
# Example
input_padding_mask = GetPaddingMask(en_sequences)
print("Input padding mask:")
print(input_padding_mask)
###Output
Input padding mask:
tf.Tensor(
[[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 0.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]], shape=(4, 21), dtype=float32)
###Markdown
2.3 Bahdanau attentionFor timestep $i$, given $s_{i-1}$ is the hidden state from previous decoding step and $h=\{h_j\}$ are encoder outputsThe attention is calculated in three steps:- **Score** Q (hidden state in previous decoding steps) and K (encoder outputs)$$e_{ij}=W_a\tanh(W_ss_{t-1}+W_hh))$$- **Aignment** with softmax to get attention weights$$\alpha_{ij}=\frac{exp(e_{ij})}{\sum_{k=1}^{T_X}exp(e_{ik})}$$- **Calculate Attention** as weighted sum of V (encoder outputs)$$c_i=\sum_{j=1}^{T_x}\alpha_{ij}h_j$$
###Code
class BahdanauAttention(tf.keras.layers.Layer):
""" Bahdanau attention layer.
Args:
hidden_size: Dimensionality of the layers.
name: Name of the layer.
"""
def __init__(self, hidden_size: int, name: str="attention"):
super(BahdanauAttention, self).__init__(name=name)
self.query_linear = tf.keras.layers.Dense(units=hidden_size)
self.value_linear = tf.keras.layers.Dense(units=hidden_size)
self.score_linear = tf.keras.layers.Dense(units=1)
def call(self,
query: tf.Tensor,
values: tf.Tensor,
input_padding_mask: tf.Tensor):
"""
Args:
query: The query tensor of shape (batch_size, hidden_size)
values: The query tensor of shape (batch_size, input_seq_len, hidden_size*2)
input_padding_mask: The mask tensor of shape (batch_size, input_seq_len)
"""
q = self.query_linear(tf.expand_dims(query, 1))
v = self.value_linear(values)
# Attention - Score (Additive attention), of shape (batch_size, input_seq_len, 1)
score = self.score_linear(tf.nn.tanh(q+v))
# Attention - Alignment
# - Softmax on the second axis (input_seq_len) so that the scores add up to 1.
attention_weights = tf.nn.softmax(score, axis=1)
# - Mask the paddings in encoder sequence so their are not included in the attentions.
input_padding_mask = tf.expand_dims(input_padding_mask, axis=-1)
attention_weights *= tf.cast(input_padding_mask, tf.float32)
# Attention - Calculate context vector, of shape (batch_size, 1, hidden_dim)
context_vector = attention_weights * v
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, tf.squeeze(attention_weights, axis=-1)
# Example
attention = BahdanauAttention(hidden_size=HIDDEN_SIZE)
context_vector, attention_weights = attention(
query=encoder_state, values=encoder_output, input_padding_mask=input_padding_mask)
print("Context vector:")
print(context_vector)
print("\nattention weights:")
print(attention_weights)
###Output
Context vector:
tf.Tensor(
[[ 0.00132546 -0.00354546 0.00955405 ... 0.00307471 -0.00306587
0.00538846]
[-0.00297993 -0.00270179 -0.00343082 ... -0.00329211 0.00572293
0.0045021 ]
[-0.00225976 -0.000354 0.00119226 ... 0.0037788 -0.00380389
0.00520081]
[ 0.00453089 0.00362038 0.00112795 ... 0.00231981 0.00416471
0.00056484]], shape=(4, 1000), dtype=float32)
attention weights:
tf.Tensor(
[[0.04922134 0.04799827 0.04823243 0.04718865 0.04742919 0.04774665
0.04849301 0.04749199 0.04790553 0.04853286 0.04846075 0.04758029
0.04810355 0.04644806 0.0469764 0.04682632 0.04730159 0.04709158
0. 0. 0. ]
[0.04926461 0.04859963 0.04755028 0.04779278 0.04890285 0.04811487
0.04632172 0.04605797 0.04722239 0.04735815 0.04695009 0.0469963
0.04723995 0.04751648 0.04756604 0.0467879 0.04765843 0.04819013
0.04799059 0.04801839 0.04790051]
[0.0492771 0.04750408 0.04858499 0.04822636 0.04836567 0.04749927
0.04673134 0.04683707 0.04763693 0.04787856 0.04758745 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. ]
[0.04980261 0.0495143 0.04973145 0.04840541 0.04763563 0.047553
0.04674438 0.04721204 0.04778758 0.04744418 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. ]], shape=(4, 21), dtype=float32)
###Markdown
2.4 DecoderThe decoder defines a probability over the translation $y$ by decomposing the joint probability$$p(y)=\sum_{i=1}^{T_y}p(y_i|\{y_1, ..., y_{t-1}\}, c)$$With an RNN, each conditional probability is modeled as$$p(y_i|\{y_1, ..., y_{t-1}\}, c)=RNN(y_{t-1}, s_{t-1}, c)$$The decoder first get context_vector with ($s_{t-1}$, $c$), then concatenate it with the embeddings of target token and feed into the RNN
###Code
def Decoder(target_vocab_size: int,
emb_size: int,
hidden_size: int,
name: str="decoder"):
"""GRU decoder.
Inputs:
sequences: Indices of target sequence tokens, of shape
(batch_size, 1)
decoder_hidden: hidden state from previous decode step.
of shape (batch_size, hidden_size)
encoder_output: of shape (batch_size, input_seq_len, hidden_size)
input_padding_mask: The mask tensor of shape (batch_size, input_seq_len)
Args:
target_vocab_size: Size of target vocab.
emb_size: Dimensionality of the embeddings.
hidden_size: Dimensionality of the layers.
name: Name of the Decoder.
Returns:
Decoder output, of shape (batch_size, target_vocab_size)
Last decoder state, of shape (batch_size, hidden_size)
attention weights of shape (batch_size, input_seq_len)
"""
sequences = tf.keras.Input(shape=(1, ), name="target_sequences")
decoder_hidden = tf.keras.Input(shape=(hidden_size, ), name="decoder_hidden")
encoder_output = tf.keras.Input(shape=(None, hidden_size*2), name="encoder_output")
input_padding_mask = tf.keras.Input(shape=(None, ), name="mask")
embedding_layer = tf.keras.layers.Embedding(input_dim=target_vocab_size,
output_dim=emb_size)
attention_layer = BahdanauAttention(hidden_size=hidden_size)
gru_layer = tf.keras.layers.GRU(units=hidden_size,
return_sequences=True,
return_state=True)
output_linear = tf.keras.layers.Dense(target_vocab_size)
# Embedding layer. (batch_size, 1, emb_size)
embeddings = embedding_layer(sequences)
# Attention of shape (batch_size, hidden_dim)
context_vector, attention_weights = attention_layer(query=decoder_hidden,
values=encoder_output,
input_padding_mask=input_padding_mask)
# Concat embeddings and context vector, of shape (batch_size, 1, emb_size + hidden_size)
decoder_input = tf.concat([tf.expand_dims(context_vector, 1), embeddings], axis=-1)
# GRU layer.
# - gru_outputs: (batch_size, 1, hidden_size)
# - state: (batch_size, hidden_size)
gru_output, state = gru_layer(decoder_input)
# Get decoder output
gru_output = tf.reshape(gru_output, (-1, hidden_size))
decoder_output = output_linear(gru_output)
return tf.keras.Model(
inputs=[sequences, decoder_hidden, encoder_output, input_padding_mask],
outputs=[decoder_output, state, attention_weights], name=name)
# Example
decoder = Decoder(target_vocab_size=fr_vocab_size,
emb_size=EMB_SIZE,
hidden_size=HIDDEN_SIZE)
decoder_output, state, attention_weights = decoder(
[fr_sequences[:, :1], encoder_state, encoder_output, input_padding_mask])
tf.keras.utils.plot_model(decoder, show_shapes=True)
###Output
_____no_output_____ |
tpCodageEntiers.ipynb | ###Markdown
Reprรฉsentation des entiers, un peu de Python... Lors de la conception d'un programme les objects du monde rรฉel (abstraits ou concrets) doivent รชtre reprรฉsentรฉs sous une forme numรฉrique. Cette capacitรฉ de modรฉlisation est l'une des difficultรฉs principales pour l'apprenti programmeur qu'il soit dรฉbutant ou non. Pire, pour les dรฉbutants, cet aspect est souvent nรฉgligรฉ au profit de l'algorithmique et finalement une partie des รฉlรจves ne parviennent pas mรชme ร concevoir les algorithmes rรฉpondant ร un problรจme posรฉ dans un รฉnoncรฉ en franรงais faute d'รชtre capables d'imaginer quels objets numรฉriques pourraient reprรฉsenter le problรจme ร rรฉsoudre. Les mรฉthodes d'analyse permettent de rรฉpondre ร cette difficultรฉ mais, in fine, il faudra bien faire rentrer le modรจle dans des reprรฉsentations numรฉriques de base telles que les chaรฎnes de caractรจres, les nombres entiers, les nombres dรฉcimaux... Cette sรฉquence de travail vous donnera des รฉlรฉments permettant de manipuler des concepts python et de les illustrer sur le sujet de la reprรฉsentation des nombres entiers. 1. Conversion du dรฉcimal au binaire EntiersObserver le fonctionnement des fonctions suivantes.```pythona=42b=1200print(a)print("{}".format(a))print("{}".format(b))``` Il est possible de changer le format d'affichage. Expliquez les sorties suivantes:```pythonprint("{:b}".format(a))print("{:b}".format(b))``` Si la base deux joue un rรดle prรฉpondรฉrant en informatique, l'autre base รฉgalementutilisรฉe de faรงon frรฉquente est la base 16, dite hexadรฉcimale. Elle permet de dรฉcrire oude manipuler commodรฉment une longue suite de bits. Il serait en effet vite difficile de manipuler des grandes sรฉquences de bits, pour par exemple explorer un fichier ou entrer un code WIFI. On multiplierait ainsi les risques d'erreurs. Le cerveau humain est plus accoutumรฉ ร un faible nombre de symboles puisรฉs parmi un grand choix plutรดt qu'ร une immense suite de symboles mรชme s'ils sont puisรฉs dans un ensemble de faible รฉtendue.Expliquez ceci:```pythonprint("{:x}".format(a))print("{:x}".format(b))``` On peut aussi faire l'inverse:```python a=int("101010",2) print(a)```Testez avec `b`et avec d'autres bases...
###Code
###Output
_____no_output_____
###Markdown
Il est mรชme possible de formater la sortie en imposant un format prรฉcis: ```Pythonprint("{0:{fill}8b}".format(a, fill='0'))``` Que se passe-t-il si on souhaite afficher un nรฉgatif? Il serait naturel de reprรฉsenter les entiers relatifs en utilisant un bit pour le signe et les autres pour la valeur absolue. Ainsi, avec des mots de 16 bits, on utiliserait 1 bit pour le signe et 15 bits pour la valeur absolue. Mais, outre l'existence de deux zรฉros, cela ne facilite pas du tout les traitements arithmรฉtiques. La plupart des langages appliquent une autre mรฉthode, qui consiste ร reprรฉsenter un entier relatif parun entier naturel. Si on utilise des mots de 16 bits, on peut reprรฉsenter les **entiers relatifs** compris entre -32 768 et 32767. Donc sur 16 bits, on reprรฉsente un entier relatif z **positif ou nul**comme l'entier naturel x et un entier relatif r **strictement nรฉgatif** comme l'entier naturel$x + 2^{16} = x + 65 536$, qui est compris entre 32 768 et 65 535. Ainsi, les entiers naturelsde 0 ร 32 767 correspondent aux entiers relatifs positifs ou nuls, ร droite sur la figure ci-dessous etles entiers naturels de 32 768 ร 65 535 reprรฉsentent les entiers relatifs strictement nรฉgatifsร gauche sur cette mรชme figure.<img src="fig/cerclemodulo.png" alt="cercleModulo.png" title="cercleModulo.png" width="200" />Finalement on peut rรฉsumer la situation ainsi: dans certains intervalles fixรฉs par les limites imposรฉes par le nombre de bits disponibles notรฉ $Nb$, les entiers relatifs sont reprรฉsentรฉs par leur valeur modulo $2^{Nb}$.Explorons ce mode de repรฉsentation dรฉnommรฉ **C2**.Lorsque l'on s'intรฉresse au C2, il faut d'abord s'entendre sur le nombre de bits sur lequel on travaille. Certains processeurs (plutรดt anciens) travaillent sur des reprรฉsentations sur 8 bits. Dans ce cas les entiers relatifs vont de $-2^{8}$ ร $2^{7}-1$. Utilisez les formats d'affichage en Python afin de reprรฉsenter a=42 et b=-42 sur 8 bits en **C2**.Exemples:3 en **C2** sur 8 bits:00000011-3 en **C2** sur 8 bits:11111101 Effectuez l'opรฉration $c=b+1$ et affichez en **C2**. Affichez l'opposรฉ de c: $d=-c $ et affichez en **C2**. Affichez l'addition de $e=d+b$ et affichez en **C2**. Comment interprรฉter ce rรฉsultat? Passage ร 32 bits. Refaites les mรชmes calculs et affichages mais cette fois-ci sur 32 bits.Affichez les limites de reprรฉsentations sur 32bits.Nota: en Python l'exposant est dรฉnotรฉ: `**` par exemple $x^2$ est codรฉ ainsi: `x**2` Passage ร 64 bits. Refaites la mรชme chose cette fois-ci en format double prรฉcision (64bits) Comment inverser la conversion? Vous devez partir d'une chaine repรฉsentant un nombre codรฉ en **C2** sur par exemple 8 bits: "11010111" ou encore "00101010" Comment savoir si c'est un positif ou un nรฉgatif? Complรฉtez ce code afin d'obtenir cet affichage ร partir des mรชmes chaรฎnes en entrรฉe.```Pythonprint(int("11010111",2))print(int("00101010",2))....```Affichage: ```21542-4142```
###Code
###Output
_____no_output_____
###Markdown
Passage ร 64 bits:Complรฉtez ce code afin d'obtenir cet affichage ร partir des mรชmes chaรฎnes en entrรฉe.```Pythonlargeur=64str1="0000000000000000000000000000000000000000000000000000000000101010"str2="1111111111111111111111111111111111111111111111111111111111010110"print(int(str1,2))print(int(str2,2))....```Affichage: ```21542-4142``` Il est possible d'utiliser les types du langage C grรขce ร la librarie `ctypes`.
###Code
import ctypes
x=ctypes.c_int(42)
print(x)
###Output
c_int(42)
###Markdown
Il est possible de regarder la structure interne de l'entier en **C2** . Expliquez les sorties ci-aprรจs.Nota:Pour accรฉder ร la reprรฉsentation en mรฉmoire d'un entier, on le met sous la forme d'une sรฉquence de bytes. La fonction "pack" du module struct (https://docs.python.org/3/library/struct.html) permet d'effectuer cette transformation, on note en particulier l'usage de "!" pour fixer une reprรฉsentation gros-boutiste. La fonction unpack fait la transformation inverse.La chaรฎne de caractรจres produite est prรฉfixรฉe par "b" et est la suite des codes ascii (ou de leur interprรฉtation si elle est affichable) reprรฉsentant les codes hexadรฉcimaux des octets.<img src="fig/1280px-USASCII_code_chart.png" alt="1280px-USASCII_code_chart.png" title="1280px-USASCII_code_chart.png" width="400" />
###Code
import struct
import binascii
out = struct.pack('!i', 42)
print(out)
print(binascii.hexlify(out))
out = struct.pack('!i', -42)
print(out)
print(binascii.hexlify(out))
###Output
b'\x00\x00\x00*'
b'0000002a'
b'\xff\xff\xff\xd6'
b'ffffffd6'
###Markdown
Mais il est possible d'interprรฉter diffรฉrement une mรชme sรฉquence de bits, soit sous hypothรจse que c'est un entier, soit sous hypothรจse que c'est un entier signรฉ. Pour plus d'informations voir: https://docs.python.org/2/library/struct.html et : https://bip.weizmann.ac.il/course/python/PyMOTW/PyMOTW/docs/struct/index.html
###Code
z=42
out = struct.pack('!I', z)
print(binascii.hexlify(out))
i=struct.unpack('!I',out)
print(z," donne: ",i[0])
z=-42
out = struct.pack('!i', z)
print(binascii.hexlify(out))
i=struct.unpack('!I',out)
print(z," donne: ",i[0])
###Output
b'0000002a'
42 donne: 42
b'ffffffd6'
-42 donne: 4294967254
###Markdown
Algos de conversions .... Vous trouverez ici des exemple d'exercices ร demander aux รฉlรจves. Ce sont des exemples de base en algorithmique et ils portent sur le thรจme du codage. Reprรฉsenter en base $k$ un entier naturel donnรฉ en base $10$Pour รฉcrire les entiers naturels en base $k$, on a besoin de $k$ chiffres. Quand on a$r$ objets, on les groupe par paquets de $k$, puis on groupe ces paquets en paquets depaquets, etc. Autrement dit, on fait une succession de divisions par $k$, jusqu'ร obtenir un quotient รฉgal ร 0. Plus formellement, si l'on note $a_i,a_{i-1},...,a_1,a_0$ l'รฉcriture de $n$ en base $k$, elle est obtenuepour l'algorithme suivant:entrรฉes: $n,k$$i\leftarrow 0$tant que $n \neq 0$ faire+ $a_i \leftarrow$ reste de la division euclidienne de $n$ par $k$+ $n \leftarrow$ quotient de la division euclidienne de $n$ par $k$+ $i\leftarrow i+1$ rรฉsultat: $a_i,a_{i-1},...,a_1,a_0$
###Code
def inttostring(n,b,digits="0123456789ABCDEF"):
"""Retourne la chaรฎne de caractรจres reprรฉsentant n en base b, en utilisant les caractรจres de digits."""
s=""
pass ## ร remplacer
return s
print(inttostring(10,2))
###Output
###Markdown
Pour trouver la reprรฉsentation en base dix d'un entier naturel donnรฉ en base $k$, onutilise le fait qu'en base $k$, le chiffre le plus ร droite reprรฉsente les unitรฉs, le prรฉcรฉdentles paquets de $k$, le prรฉcรฉdent les paquets de $k \times k=k^2$, le prรฉcรฉdent les paquets de$k \times k \times k = k^3$, etc.On retrouve les opรฉrations rรฉciproques de celles effectuรฉes dans l'algorithme deconversion en base $k$. Si $a_i,a_{i-1},...,a_1,a_0$ est l'รฉcriture de $n$ en base $k$, alors:$n=a_0+a_{1}k+a_2k^2+...+a_ik^i$ Ce calcul suppose $(i+1) + i + โฆ + 1 = (i+1)(i+2)/2 $ produits: $O(n^2)$.La fonction "int" ne permet pas de dรฉpasser la base 36. รcrire une fonction prenant en paramรจtres une chaรฎne de caractรจres s et une base k infรฉrieure ร 64 et retournant l'entier dont l'รฉcriture en base k est s.Codez cet algo de deux faรงons: dans un cas le parcours est effectuรฉ dans l'ordre de la chaรฎne et dans l'autre le parcours est effectuรฉ dans l'ordre inverse. Aide: Pour avoir le rang du caractรจre c dans digits vous pouvez utiliser:```Pythondigits.index(c)```
###Code
def stringtoint(s,k,digits="0123456789ABCDEF"):
"""Retourne l'entier reprรฉsentรฉ par la chaรฎne de caractรจres s en base k."""
n=0
exp=0
pass
return n
print(stringtoint('4A', 16,digits="0123456789ABCDEF"))
def stringtoint(s,k,digits="0123456789ABCDEF"):
"""Retourne l'entier reprรฉsentรฉ par la chaรฎne de caractรจres s en base k."""
n=0
exp=1
taille=len(s)
pass
return n
print(stringtoint('4A', 16,digits="0123456789ABCDEF"))
###Output
0
###Markdown
Si $a_i,a_{i-1},...,a_1,a_0$ est l'รฉcriture de $n$ en base $k$, alorson peut รฉgalement suivre la mรฉthode de Horner (vous l'avez par exemple fait รฉtudier ร vos รฉlรจves en cours de mathรฉmatiques)pour faire ce calcul plus efficacement. Ce calcul suppose $i $ produits: $O(n)$. Cela permet aussi plus naturellement de fairele calcul en lisant les chiffres de gauche ร droite.$n=((( ... (a_ik+a_{i-1})k+...)k+a_2)k+a_{1})k+a_0$
###Code
def stringtoint(s,k,digits="0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
"""Retourne l'entier reprรฉsentรฉ par la chaรฎne de caractรจres s en base k."""
n=0
pass
return n
print(stringtoint('AQ', 35,digits="0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"))
###Output
376
###Markdown
Mais les entiers python sont sophistiquรฉs...Sont-ils codรฉs sur 32 bits comme les entiers signรฉs en Java ou en C++? Sont-ils codรฉs sur 64 bits comme les `double` en Java ou en C++? Pour en avoir confirmation, essayez d'afficher le maximum +1 sur 32 bits, sur 64bits?
###Code
min= ...
max=...
print("min: ",min,"max: ",max)
###Output
min: Ellipsis max: Ellipsis
###Markdown
Peut-รชtre sont-ils codรฉs sur 128 bits? Essayez...
###Code
largeur=128
min=...
max=...
print("min: ",min,"max: ",max)
###Output
min: Ellipsis max: Ellipsis
###Markdown
Affichez les valeurs maximums en nรฉgatif comme en positif en faisant varier le nombre de bits utilisรฉ de 2 ร 4096. Oรน trouvez-vous la limite?Exemple d'affichage: ```Pythonprint("exposant: ",largeur ,"\nmin: ",min,"\nmax: ",max)``` La librairie ```sys``` permet de savoir la taille en octets de la repรฉsentation de l'entier```Pythonimport syslargeur=40a=2**(largeur-1)-1print(sys.getsizeof(a))``` Quel est le minimum? Incrรฉmentez dans une boucle jusqu'ร : 2**2048? La taille semble รฉvoluer en fonction des besoins...Pour mieux comprendre on peut utiliser la classe: `PyLongObject` de `ctype`La librairie `ctype` permet d'accรฉder aux structures de donnรฉes รฉcrites en C qui sont utilisรฉes par l'interprรฉteur python c'est-ร -dire le logiciel qui exรฉcute le code python. On dit qu'il l'interprรจte. Par consรฉquent il est possible grรขce ร cette librairie d'explorer comme sont repรฉsentรฉs les entiers en Python.Il est ainsi possible d'afficher par exemple une partie des donnรฉes contenues dans la structure de donnรฉes `PyLongObject`, laquelle permet de reprรฉsenter les entiers en Python. ```pythonimport ctypesclass PyLongObject(ctypes.Structure): _fields_ = [("ob_refcnt", ctypes.c_long), ("ob_type", ctypes.c_void_p), ("ob_size", ctypes.c_ulong), ("ob_digit", ctypes.c_int *1)]bignum = 2**40-1the_int=PyLongObject.from_address(id(bignum)) recupรจre l'objet via son identifiant size=the_int.ob_size recupรจre la sous-partie ob_size (qui est un long entier non signรฉ et donc codรฉ sur 64 bit)print( size) ```
###Code
import ctypes
class PyLongObject(ctypes.Structure):
_fields_ = [("ob_refcnt", ctypes.c_long),
("ob_type", ctypes.c_void_p),
("ob_size", ctypes.c_ulong),
("ob_digit", ctypes.c_int *1)]
bignum = 2**40-1
the_int=PyLongObject.from_address(id(bignum)) # recupรจre l'objet via son identifiant
size=the_int.ob_size # recupรจre la sous-partie ob_size (qui est un long entier non signรฉ et donc codรฉ sur 64 bit)
print( size)
###Output
2
###Markdown
Il semble donc que les entiers stockent une taille mais laquelle? A quoi correspond-elle?Pour le savoir allons explorer la derniรจre partie de la structure de donnรฉes `PyLongObject` c'est-ร -dire `ob_digit````Pythonimport ctypesclass PyLongObject(ctypes.Structure): _fields_ = [("ob_refcnt", ctypes.c_long), ("ob_type", ctypes.c_void_p), ("ob_size", ctypes.c_ulong), ("ob_digit", ctypes.c_int *1)] bignum = 2**40-1the_int=PyLongObject.from_address(id(bignum))size=the_int.ob_sizeprint( size)x=the_int.ob_digit rรฉcupรจre ob_digit qui est l'adresse d'un tableau d'entiers C (c_int *)px= ctypes.cast(ctypes.pointer(x),ctypes.POINTER(ctypes.c_int)) transforme le tableau d'entiers C en tableau Pythonprint(px[0]) Affiche la premiรจre case du tableau python.``` Explorez un peu plus loin ce tableau... Par exemple de 2 cases. NB: il serait possible d'explorer bien plus loin car le code tel qu'il est conรงu ici ne tient pas compte de la taille rรฉelle du tableau. Par consรฉquent les valeurs au delร de 2 cases ne sont pas facilement interprรฉtables. Afficher sur 64 bits la reprรฉsentation en binaire C2 de `2**40-1`puis de `1073741823`, puis de `1023`. Que remarque-t-on? Quelles hypothรจses peut-on faire sur la reprรฉsentation des entiers en python?
###Code
###Output
_____no_output_____ |
Project_3.ipynb | ###Markdown
Scaling data
###Code
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit
scaler = StandardScaler()
scaler.fit(X)
X_scaled = scaler.transform(X)
from sklearn.decomposition import PCA
pca = PCA()
pca.fit(X_scaled)
print('-'*20 + 'Explained variance ratio' + '-'*20)
print(pca.explained_variance_ratio_)
# keep the first two principal components of the data
pca2 = PCA(n_components=0.95)
# fit PCA model to data
pca2.fit(X_scaled)
print('-'*20 + 'Explained variance ratio' + '-'*20)
print(pca2.explained_variance_ratio_)
# We only need 27 features to capture 95% of the variance
###Output
_____no_output_____
###Markdown
Splitting scaled X values to prepare for logistic regression
###Code
split = StratifiedShuffleSplit(n_splits=10,test_size=0.20,
random_state=42)
for train_index, test_index in split.split(X_scaled, y):
X_train = X_scaled[train_index]
X_test = X_scaled[test_index]
y_train = y[train_index]
y_test = y[test_index]
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_score
logreg = LogisticRegression(max_iter=2000).fit(X_train, y_train)
print("Training set score: {:.3f}".format(logreg.score(X_train, y_train)))
print("Test set score: {:.3f}".format(logreg.score(X_test, y_test)))
print(classification_report(y_test, logreg.predict(X_test), target_names=["0.0", "1.0"]))
scores = cross_val_score(logreg, X_test, y_test, cv=split)
print("Cross-validation scores:\n{}".format(scores))
# transform data onto the first two principal components
X2_pca2 = pca2.transform(X_scaled)
for train_index, test_index in split.split(X_scaled, y):
X2_train = X2_pca2[train_index]
X2_test = X2_pca2[test_index]
y2_train = y[train_index]
y2_test = y[test_index]
logregpca = LogisticRegression(max_iter=2000).fit(X2_train, y2_train)
print("Accuracy on training set: {:.3f}".format(logregpca.score(X2_train, y2_train)))
print("Accuracy on test set: {:.3f}".format(logregpca.score(X2_test, y2_test)))
print(classification_report(y_test, logregpca.predict(X2_test), target_names=["0.0", "1.0"]))
scoreslogregpca = cross_val_score(logregpca, X2_test, y2_test, cv=split)
print("Cross-validation scores:\n{}".format(scoreslogregpca))
###Output
Accuracy on training set: 0.998
Accuracy on test set: 0.998
precision recall f1-score support
0.0 1.00 1.00 1.00 187307
1.0 0.33 0.06 0.10 413
accuracy 1.00 187720
macro avg 0.66 0.53 0.55 187720
weighted avg 1.00 1.00 1.00 187720
Cross-validation scores:
[0.99770935 0.99765608 0.99765608 0.99760281 0.99784253 0.99765608
0.99768272 0.99784253 0.99770935 0.99757618]
###Markdown
Since the best machine learning algorthim were decision trees, with a near perfect score of 99.98% all across the board, I chose to run PCA with logistic regression using scaled data. Performance was lowered but not by much. Logistic regression with transformed data is still 99.8% accurate even with 10 fold stratified shuffled split features. Performance was not improved by much, we can see that in the classification report that precision, recall, and the F1 scores are better when we train using a scaled version of our meteor features Applying KNN with and without pca
###Code
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=2)
kmeans.fit(X_train)
assignments=kmeans.labels_
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], assignments)
plt.legend(["Cluster 0", "Cluster 1"], loc="best")
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
inertiaVals = {}
for k in range(1, 11):
kmeansTmp = KMeans(n_clusters=k, max_iter=1000).fit(X_train)
inertiaVals[k] = kmeansTmp.inertia_
plt.figure()
plt.plot(list(inertiaVals.keys()), list(inertiaVals.values()))
plt.xlabel("Number of cluster")
plt.ylabel("Inertia")
plt.show()
# testing on KNN with scaled PCA data:
kmeans = KMeans(n_clusters=2)
kmeans.fit(X2_train)
assignments=kmeans.labels_
mglearn.discrete_scatter(X2_train[:, 0], X2_train[:, 1], assignments)
plt.legend(["Cluster 0", "Cluster 1"], loc="best")
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
inertiaVals = {}
for k in range(1, 11):
kmeansTmp = KMeans(n_clusters=k, max_iter=1000).fit(X2_train)
inertiaVals[k] = kmeansTmp.inertia_
plt.figure()
plt.plot(list(inertiaVals.keys()), list(inertiaVals.values()))
plt.xlabel("Number of cluster")
plt.ylabel("Inertia")
plt.show()
# Agglomarative Clustering:
#from sklearn.cluster import AgglomerativeClustering
#
#agg = AgglomerativeClustering(n_clusters=2)
#assignment = agg.fit_predict(X_train)
#
#mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], assignment)
#plt.legend(["Cluster 0", "Cluster 1"], loc="best")
#plt.xlabel("Feature 0")
#plt.ylabel("Feature 1")
#Now with scaled PCA dataset:
#aggpca = AgglomerativeClustering(n_clusters=2)
#assignmentpca = agg.fit_predict(X2_train)
#
#mglearn.discrete_scatter(X2_train[:, 4], X2_train[:, 5], assignmentpca)
#plt.legend(["Cluster 0", "Cluster 1"], loc="best")
#plt.xlabel("Feature 0")
#plt.ylabel("Feature 1")
# my data is too big
#from sklearn.cluster import DBSCAN
#
#dbscan = DBSCAN(min_samples=2,eps=0.3)
#clusters = dbscan.fit_predict(X_train)
#
#plt.scatter(X_train[:, 2], X_train[:, 3], c=clusters, cmap=mglearn.cm2, s=60)
#plt.xlabel("Feature 0")
#plt.ylabel("Feature 1")
#
#mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], clusters)
#plt.legend(["Cluster 0", "Cluster 1", "Cluster 2"], loc="best")
#plt.xlabel("Feature 0")#plt.ylabel("Feature 1")
#from sklearn.cluster import DBSCAN
#
#dbscanpca = DBSCAN(min_samples=2,eps=0.3)
#clusterspca = dbscanpca.fit_predict(X2_train)
#
#plt.scatter(X2_train[:, 0], X2_train[:, 1], c=clusters, cmap=mglearn.cm2, s=60)
#plt.xlabel("Feature 0")
#plt.ylabel("Feature 1")
#
#mglearn.discrete_scatter(X2_train[:, 0], X2_train[:, 1], clusterspca)
#plt.legend(["Cluster 0", "Cluster 1", "Cluster 2"], loc="best")
#plt.xlabel("Feature 0")
#plt.ylabel("Feature 1")
###Output
_____no_output_____
###Markdown
Since my data is too big to run both Agglomerative and DBSCAN, we were tasked to instead run the unsupervised methods using the cancer data set
###Code
# Loading cancer data set and scaling to have PCA run on it:
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
print("cancer.keys():\n", cancer.keys())
###Output
cancer.keys():
dict_keys(['data', 'target', 'frame', 'target_names', 'DESCR', 'feature_names', 'filename'])
###Markdown
Normalizing cancer data set
###Code
X_cancer = cancer.data
y_cancer = cancer.target
scaler_cancer = StandardScaler()
scaler_cancer.fit(X_cancer)
X_scaledcancer = scaler_cancer.transform(X_cancer)
pcacancer = PCA()
pcacancer.fit(X_scaledcancer)
print('-'*20 + 'Explained variance ratio' + '-'*20)
print(pcacancer.explained_variance_ratio_)
###Output
--------------------Explained variance ratio--------------------
[4.42720256e-01 1.89711820e-01 9.39316326e-02 6.60213492e-02
5.49576849e-02 4.02452204e-02 2.25073371e-02 1.58872380e-02
1.38964937e-02 1.16897819e-02 9.79718988e-03 8.70537901e-03
8.04524987e-03 5.23365745e-03 3.13783217e-03 2.66209337e-03
1.97996793e-03 1.75395945e-03 1.64925306e-03 1.03864675e-03
9.99096464e-04 9.14646751e-04 8.11361259e-04 6.01833567e-04
5.16042379e-04 2.72587995e-04 2.30015463e-04 5.29779290e-05
2.49601032e-05 4.43482743e-06]
###Markdown
Transforming cancer data set through PCA
###Code
pcacancer2 = PCA(n_components=0.95)
# fit PCA model to data
pcacancer2.fit(X_scaledcancer)
print('-'*20 + 'Explained variance ratio' + '-'*20)
print(pcacancer2.explained_variance_ratio_)
splitcancer = StratifiedShuffleSplit(n_splits=5,test_size=0.20,
random_state=42)
for train_index, test_index in splitcancer.split(X_scaledcancer, y_cancer):
X3_traincancer = X_scaledcancer[train_index]
X3_testcancer = X_scaledcancer[test_index]
y_traincancer = y_cancer[train_index]
y_testcancer = y_cancer[test_index]
# pca transformation:
X2_pca3 = pcacancer2.transform(X_scaledcancer)
for train_index, test_index in splitcancer.split(X_scaledcancer, y_cancer):
X3_traincancerpca = X2_pca3[train_index]
X3_testcancerpca = X2_pca3[test_index]
y2_traincancerpca = y_cancer[train_index]
y2_testcancerpca = y_cancer[test_index]
###Output
_____no_output_____
###Markdown
KNN without PCA transformation on cancer data set
###Code
kmeanscancer = KMeans(n_clusters=2)
kmeanscancer.fit(X3_traincancer)
assignmentscancer=kmeanscancer.labels_
mglearn.discrete_scatter(X3_traincancer[:, 0], X3_traincancer[:, 1], assignmentscancer)
plt.legend(["Cluster 0", "Cluster 1"], loc="best")
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
###Output
_____no_output_____
###Markdown
KNN with PCA transformation
###Code
kmeanscancerpca = KMeans(n_clusters=2)
kmeanscancerpca.fit(X3_traincancerpca)
assignmentscancerpca=kmeanscancerpca.labels_
mglearn.discrete_scatter(X3_traincancerpca[:, 0], X3_traincancerpca[:, 1], assignmentscancerpca)
plt.legend(["Cluster 0", "Cluster 1"], loc="best")
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
inertiaValscancer = {}
for k in range(1, 11):
kmeansTmp = KMeans(n_clusters=k, max_iter=1000).fit(X3_traincancerpca)
inertiaVals[k] = kmeansTmp.inertia_
plt.figure()
plt.plot(list(inertiaVals.keys()), list(inertiaVals.values()))
plt.xlabel("Number of cluster")
plt.ylabel("Inertia")
plt.show()
###Output
_____no_output_____
###Markdown
Agglomerative transformation on scaled cancer data set
###Code
from sklearn.cluster import AgglomerativeClustering
aggcancer = AgglomerativeClustering(n_clusters=2)
assignmentcancer = aggcancer.fit_predict(X3_traincancer)
mglearn.discrete_scatter(X3_traincancer[:, 0], X3_traincancer[:, 1], assignmentcancer)
plt.legend(["Cluster 0", "Cluster 1"], loc="best")
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
###Output
_____no_output_____
###Markdown
Agglomerative PCA transformation on cancer data set
###Code
aggcancerpca = AgglomerativeClustering(n_clusters=5)
assignmentcancerpca = aggcancerpca.fit_predict(X3_traincancerpca)
mglearn.discrete_scatter(X3_traincancerpca[:, 0], X3_traincancerpca[:, 1], assignmentcancerpca)
plt.legend(["Cluster 0", "Cluster 1"], loc="best")
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
###Output
_____no_output_____
###Markdown
DBSCAN without PCA transformation on cancer data set:
###Code
from sklearn.cluster import DBSCAN
dbscan = DBSCAN(min_samples=5,eps=0.8)
clusters = dbscan.fit_predict(X3_traincancer)
plt.scatter(X3_traincancer[:, 0], X3_traincancer[:, 1], c=clusters, cmap=mglearn.cm2, s=60)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
mglearn.discrete_scatter(X3_traincancer[:, 0], X3_traincancer[:, 1], clusters)
plt.legend(["Cluster 0", "Cluster 1", "Cluster 2"], loc="best")
plt.xlabel("Feature 0")#plt.ylabel("Feature 1")
###Output
_____no_output_____
###Markdown
DBSCAN on scaled PCA cancer data set
###Code
dbscanpca = DBSCAN(min_samples=5,eps=0.95)
clusterspca = dbscanpca.fit_predict(X3_traincancerpca)
plt.scatter(X3_traincancerpca[:, 0], X3_traincancerpca[:, 1], c=clusters, cmap=mglearn.cm2, s=60)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
mglearn.discrete_scatter(X3_traincancerpca[:, 0], X3_traincancerpca[:, 1], clusters)
plt.legend(["Cluster 0", "Cluster 1", "Cluster 2"], loc="best")
plt.xlabel("Feature 0")#plt.ylabel("Feature 1")
###Output
_____no_output_____
###Markdown
Calculating ARI without PCA
###Code
from sklearn.metrics.cluster import adjusted_rand_score
fig, axes = plt.subplots(1, 4, figsize=(15, 3),
subplot_kw={'xticks': (), 'yticks': ()})
algorithms = [KMeans(n_clusters=4), AgglomerativeClustering(n_clusters=4),
DBSCAN()]
random_state = np.random.RandomState(seed=0)
random_clusters = random_state.randint(low=0, high=2, size=len(X3_traincancer))
axes[0].scatter(X3_traincancer[:, 2], X3_traincancer[:, 3], c=random_clusters,
cmap=mglearn.cm3, s=60)
axes[0].set_title("Random assignment - ARI: {:.2f}".format(
adjusted_rand_score(y_traincancer, random_clusters)))
for ax, algorithm in zip(axes[1:], algorithms):
# plot the cluster assignments and cluster centers
clusters = algorithm.fit_predict(X3_traincancer)
ax.scatter(X3_traincancer[:, 0], X3_traincancer[:, 1], c=clusters,
cmap=mglearn.cm3, s=60)
ax.set_title("{} - ARI: {:.2f}".format(algorithm.__class__.__name__,
adjusted_rand_score(y_traincancer, clusters)))
###Output
_____no_output_____
###Markdown
Calculating ARI with PCA
###Code
from sklearn.metrics.cluster import adjusted_rand_score
fig, axes = plt.subplots(1, 4, figsize=(15, 3),
subplot_kw={'xticks': (), 'yticks': ()})
# make a list of algorithms to use
algorithms = [KMeans(n_clusters=4), AgglomerativeClustering(n_clusters=4),
DBSCAN()]
# create a random cluster assignment for reference
random_state = np.random.RandomState(seed=0)
random_clusters = random_state.randint(low=0, high=2, size=len(X3_traincancerpca))
# plot random assignment
axes[0].scatter(X3_traincancerpca[:, 2], X3_traincancerpca[:, 3], c=random_clusters,
cmap=mglearn.cm3, s=60)
axes[0].set_title("Random assignment - ARI: {:.2f}".format(
adjusted_rand_score(y2_traincancerpca, random_clusters)))
for ax, algorithm in zip(axes[1:], algorithms):
# plot the cluster assignments and cluster centers
clusters = algorithm.fit_predict(X3_traincancerpca)
ax.scatter(X3_traincancerpca[:, 0], X3_traincancerpca[:, 1], c=clusters,
cmap=mglearn.cm3, s=60)
ax.set_title("{} - ARI: {:.2f}".format(algorithm.__class__.__name__,
adjusted_rand_score(y2_traincancerpca, clusters)))
###Output
_____no_output_____
###Markdown
Calculating Silhouette Coefficient without PCA
###Code
from sklearn.metrics.cluster import silhouette_score
fig, axes = plt.subplots(1, 4, figsize=(15, 3),
subplot_kw={'xticks': (), 'yticks': ()})
# create a random cluster assignment for reference
random_state = np.random.RandomState(seed=0)
random_clusters = random_state.randint(low=0, high=2, size=len(X3_traincancer))
# plot random assignment
axes[0].scatter(X3_traincancer[:, 2], X3_traincancer[:, 3], c=random_clusters,
cmap=mglearn.cm3, s=60)
axes[0].set_title("Random assignment: {:.2f}".format(
silhouette_score(X3_traincancer, random_clusters)))
algorithms = [KMeans(n_clusters=3), AgglomerativeClustering(n_clusters=3),
DBSCAN()]
for ax, algorithm in zip(axes[1:], algorithms):
clusters = algorithm.fit_predict(X3_traincancer)
# plot the cluster assignments and cluster centers
ax.scatter(X3_traincancer[:, 2], X3_traincancer[:, 3], c=clusters, cmap=mglearn.cm3,
s=60)
ax.set_title("{} : {:.2f}".format(algorithm.__class__.__name__,
silhouette_score(X3_traincancer, clusters)))
###Output
_____no_output_____
###Markdown
Calculating Silhouette Coefficient with PCA
###Code
fig, axes = plt.subplots(1, 4, figsize=(15, 3),
subplot_kw={'xticks': (), 'yticks': ()})
# create a random cluster assignment for reference
random_state = np.random.RandomState(seed=0)
random_clusters = random_state.randint(low=0, high=2, size=len(X3_traincancerpca))
# plot random assignment
axes[0].scatter(X3_traincancerpca[:, 0], X3_traincancerpca[:, 1], c=random_clusters,
cmap=mglearn.cm3, s=60)
axes[0].set_title("Random assignment: {:.2f}".format(
silhouette_score(X3_traincancerpca, random_clusters)))
algorithms = [KMeans(n_clusters=4), AgglomerativeClustering(n_clusters=4),
DBSCAN()]
for ax, algorithm in zip(axes[1:], algorithms):
clusters = algorithm.fit_predict(X3_traincancerpca)
# plot the cluster assignments and cluster centers
ax.scatter(X3_traincancerpca[:, 0], X3_traincancerpca[:, 1], c=clusters, cmap=mglearn.cm3,
s=60)
ax.set_title("{} : {:.2f}".format(algorithm.__class__.__name__,
silhouette_score(X3_traincancerpca, clusters)))
###Output
_____no_output_____
###Markdown
Project 3: Two layer Neural Network
###Code
%load_ext tensorboard
%tensorboard --logdir logs/fit
###Output
_____no_output_____
###Markdown
Project 3: Oil Blending Problem
Group members: Andrew Tran, Ryan Bilas, Elizabeth Gurry, Joshua Allen, Matt Mester
###Code
#http://www.producao.ufrgs.br/arquivos/disciplinas/382_winston_cap3_introduction_to_linear_programming.pdf
#http://www.producao.ufrgs.br/arquivos/disciplinas/382_winston_cap3_introduction_to_linear_programming.pdf
# before you do anything...
# mount your drive!
# click folder on the left...
%matplotlib inline
from pylab import *
import shutil
import sys
import os.path
if not shutil.which("pyomo"):
!pip install -q pyomo
assert(shutil.which("pyomo"))
if not (shutil.which("glpsol") or os.path.isfile("glpsol")):
if "google.colab" in sys.modules:
!apt-get install -y -qq glpk-utils
else:
try:
!conda install -c conda-forge ipopt
except:
pass
assert(shutil.which("glpsol") or os.path.isfile("glpsol"))
from pyomo.environ import *
SOLVER = 'glpk'
EXECUTABLE = '/usr/bin/glpsol'
###Output
_____no_output_____
###Markdown
Model, Variable, Constraints
###Code
##declaring the model
model = ConcreteModel()
###variables
##variables are the crude stock and its final blend
##there are three sections of variables, with each section representing the final blend
##inside each section, there are 4 variables inside which represent the individual crude stocks
#regular blend
model.cs1_reg = Var(domain=NonNegativeReals)
model.cs2_reg = Var(domain=NonNegativeReals)
model.cs3_reg = Var(domain=NonNegativeReals)
model.cs4_reg = Var(domain=NonNegativeReals)
#multigrade blend
model.cs1_multi = Var(domain=NonNegativeReals)
model.cs2_multi = Var(domain=NonNegativeReals)
model.cs3_multi = Var(domain=NonNegativeReals)
model.cs4_multi = Var(domain=NonNegativeReals)
#supreme blend
model.cs1_sup = Var(domain=NonNegativeReals)
model.cs2_sup = Var(domain=NonNegativeReals)
model.cs3_sup = Var(domain=NonNegativeReals)
model.cs4_sup = Var(domain=NonNegativeReals)
###sales
##sales are defined as the sum of the barrels per blend type (aka variable section seen above) times the selling price of that blend
##we then sum up that workflow for all blends and we arrive at our answer for sales
sales = (model.cs1_reg + model.cs2_reg + model.cs3_reg + model.cs4_reg)*8.5 + (model.cs1_multi + model.cs2_multi + model.cs3_multi + model.cs4_multi)*9 + (model.cs1_sup + model.cs2_sup + model.cs3_sup + model.cs4_sup)*10
###production costs
##prod cost is found in a similar way to sales
##prod cost is defined as the sum of barrels per crude stock type times buying price of that blend
##we then sum up that workflow for all crude stocks and we arrive at our answer for production costs
prod_cost = (model.cs1_reg + model.cs1_multi + model.cs1_sup)*7.1 +(model.cs2_reg + model.cs2_multi + model.cs2_sup)*8.5 + (model.cs3_reg + model.cs3_multi + model.cs3_sup)*7.7 + (model.cs4_reg + model.cs4_multi + model.cs4_sup)*9
###objective to maximize profit per sales minus production costs
model.profit = Objective(expr = sales - prod_cost, sense = maximize)
###viscosity constraints
##found by cs barrels X *viscosity for cs barrels X / cs barrels X amount but rexpressed in order to work with pyomo
model.reg_mix = Constraint(expr = model.cs1_reg*5 + model.cs2_reg*-15 + model.cs3_reg*-5 + model.cs4_reg*-30 <= 0)
model.multi_mix = Constraint(expr = model.cs1_multi*15 + model.cs2_multi*-5 + model.cs3_multi*5 + model.cs4_multi*-20 <= 0)
model.sup_mix = Constraint(expr = model.cs1_sup*30 + model.cs2_sup*10 + model.cs3_sup*20 + model.cs4_sup*-5 <= 0)
###demand constraint
##demand must be met exactly
##found by sum of barrels per blend
model.reg_dem = Constraint(expr = model.cs1_reg + model.cs2_reg + model.cs3_reg + model.cs4_reg == 2000)
model.multi_dem = Constraint(expr = model.cs1_multi + model.cs2_multi + model.cs3_multi + model.cs4_multi == 1500)
model.sup_dem = Constraint(expr = model.cs1_sup + model.cs2_sup + model.cs3_sup + model.cs4_sup == 750)
###supply constraint
##supply only has a ceiling
##found by summing all barrels per crude stock type
model.cs1_supply = Constraint(expr = model.cs1_reg + model.cs1_multi + model.cs1_sup <= 1000)
model.cs2_supply = Constraint(expr = model.cs2_reg + model.cs2_multi + model.cs2_sup <= 1100)
model.cs3_supply = Constraint(expr = model.cs3_reg + model.cs3_multi + model.cs3_sup <= 1200)
model.cs4_supply = Constraint(expr = model.cs4_reg + model.cs4_multi + model.cs4_sup <= 1100)
### solve!
SolverFactory(SOLVER, executable=EXECUTABLE).solve(model)
###Output
_____no_output_____
###Markdown
Solve!
###Code
## code to generate report
SolverFactory(SOLVER, executable=EXECUTABLE).solve(model)
# Save the model
model.write("/content/model.lp", io_options={'symbolic_solver_labels': True})
# Generate the file "sensit.sen", this contains the report we want to see
!/usr/bin/glpsol -m /content/model.lp --lp --ranges sensit.sen
# Display the contents of the report: (this shows the sensitivity analysis report)
!cat /content/sensit.sen
##extra prints for double check
print("CS1 used: " + str(model.cs1_reg() + model.cs1_multi() + model.cs1_sup()))
print("CS2 used: " + str(model.cs2_reg() + model.cs2_multi() + model.cs2_sup()))
print("CS3 used: " + str(model.cs3_reg() + model.cs3_multi() + model.cs3_sup()))
print("CS4 used: " + str(model.cs4_reg() + model.cs4_multi() + model.cs4_sup()))
print(" ")
print("Reg sold: " + str(model.cs1_reg() + model.cs2_reg() + model.cs3_reg() + model.cs4_reg()))
print("Multi sold: " + str(model.cs1_multi() + model.cs2_multi() + model.cs3_multi() + model.cs4_multi()))
print("Sup sold: " + str(model.cs1_sup() + model.cs2_sup() + model.cs3_sup() + model.cs4_sup()))
print(" ")
###Output
CS1 used: 1000.0
CS2 used: 1100.0
CS3 used: 1199.9999999999975
CS4 used: 950.0
Reg sold: 2000.0000000000007
Multi sold: 1499.9999999999968
Sup sold: 750.0
###Markdown
Task 3 ->
###Code
# Importing Required Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Read the data
df = pd.read_csv('Iris.csv')
df.head()
# seperating 'X' and 'y'...
from sklearn import preprocessing as pre
X = df.iloc[:,1:5]
y = df.iloc[:,5]
le = pre.LabelEncoder()
Y = le.fit_transform(y)
Y
type(X)
###Output
_____no_output_____
###Markdown
Creating Decision Tree Classifier
###Code
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier()
model = clf.fit(X,Y)
print('Decision Tree Classifier Created !')
###Output
Decision Tree Classifier Created !
###Markdown
Visualizing Decision Tree Classifier
###Code
from sklearn import datasets
iris = datasets.load_iris()
from sklearn import tree
fig = plt.figure(figsize = (25,30))
_ = tree.plot_tree(clf, feature_names = iris.feature_names,
class_names = iris.target_names,
filled = True)
###Output
_____no_output_____
###Markdown
Running SQL in Jupyter notebook Set-Up
###Code
# Dependencies
import os
import csv
import datetime
import pandas as pd
#from sqlalchemy import create_engine
###Output
_____no_output_____
###Markdown
Read 1st csv into DataFrame
###Code
# Test to make sure the files are reading in OK
# csv_file = "./lending_club_csvs/LoanStats2.csv"
# loan_df = pd.read_csv(csv_file)
# loan_df
# View all columns, sorted
sorted(list(loan_df.columns))
###Output
_____no_output_____
###Markdown
Combine all 12 csvs
###Code
# Create a place to store all the .csv files together. Ignore the "set low_memory=False" error message that pops up
mega_list = []
for i in range (12): #12
csv_file = f"./lending_club_csvs/LoanStats{i}.csv"
loan_df = pd.read_csv(csv_file)
mega_list.append(loan_df)
mega_df=pd.concat(mega_list)
# Print new, combined datafile
#list(mega_df.columns) #to view all the columns
mega_df
###Output
_____no_output_____
###Markdown
Start here with any updates Create new data with select columns
###Code
final_df=mega_df[[
"int_rate",
"annual_inc",
"dti",
"delinq_2yrs",
"fico_range_low",
"fico_range_high",
"inq_last_6mths",
"mths_since_last_delinq",
"mths_since_last_record",
"open_acc",
"pub_rec",
"revol_bal",
"revol_util",
"total_acc",
"application_type",
"earliest_cr_line",
"total_acc",
"tax_liens",
"pub_rec_bankruptcies",
"term",
"addr_state"
]]
final_df.head()
# Remove joint applications
final_df=final_df.loc[final_df["application_type"] == "Individual",:] #loc = location, only select the rows where the column application type is individual
final_df
#populate NAN with 0
final_df=final_df.fillna(0)
# check to make sure all values in application_type are individual
final_df.groupby("application_type").count()
# drop column application_type
# to_drop=["application_type"]
# final_df=final_df.drop(columns=to_drop, axis=1)
final_df
# #len(to_drop)
# final_mega_df = mega_data_mr_df.drop(to_drop, axis=1)
# final_mega_df
# drop % from int_rate
final_df['int_rate']=final_df['int_rate'].map(lambda x: float(x.rstrip('%')))
final_df
# drop % from revol_util
final_df['revol_util']=final_df['revol_util'].map(lambda x: float(str(x).rstrip('%')))
final_df
# convert term from a string to a numeric, drop months
final_df['term']=final_df['term'].map(lambda x: int(str(x).rstrip(' months')))
final_df
# Create bins in for the interest rate (takes the high range)
bins = [0, 7.99, 11.99, 15.99, 19.99, 27.99, 100]
# Create labels for these bins
group_labels = ["0% to 8%", "8% to 12%", "12% to 16%", "16% to 20%", "20% to 28%", "over 28%"]
# Change the string values to numeric values
group_labels2 = [1, 2, 3, 4, 5, 6]
# Slice the data and place it into numeric bins
final_df["int_rate"]=pd.cut(final_df["int_rate"], bins, labels=group_labels2)
final_df
# id'ing the bins - create the interest rate key
interest_rate_df=pd.DataFrame({
"binned_interest":group_labels,
"interest_ref_num":[1,2,3,4,5,6]
})
interest_rate_df
# create the interest rate key csv
interest_rate_df.to_csv("final_df_project_3_int_key.csv", index=False)
# MAYA
#issue_d is a string, want to change to datetime format
#first, a dictionary comparing the string of month names to their numerical value
months = {
'Jan' : 1,
'Feb' : 2,
'Mar' : 3,
'Apr' : 4,
'May' : 5,
'Jun' : 6,
'Jul' : 7,
'Aug' : 8,
'Sep' : 9,
'Oct' : 10,
'Nov' : 11,
'Dec' : 12
}
#save in separate field, and split to two columns
temp = final_df['earliest_cr_line']
temp.str.split('-')
new = final_df['earliest_cr_line'].str.split("-", n = 1, expand = True)
#change to numerical value and save as Integers
new[0]= new[0].map(months)
new[1]= new[1].astype(int)
#MAYA
#change Year value to full numerical year by adding 2000
new[1] = 2000 + new[1]
dates = new
dates
# MAYA
#for dates that show up as 2020 - 2099, run for loop to change back to 1920-1999
# for i, row in dates.iterrows():
# if dates[1][i] > 2019:
# dates[1][i] = dates[1][i] - 100
dates.loc[dates[1]>2019,1]-=100
dates
# MAYA
#rename columns
dates.rename(columns = {0: 'Month', 1: 'Year'}, inplace = True)
#convert to datetime format
dates['Full Date'] = pd.to_datetime(dates.assign(Day=1).loc[:, ['Year','Month','Day']])
#add column that just shows Year/Month values, as Day value doesn't matter
dates['earliest_cr_line']= pd.to_datetime(dates['Full Date']).dt.to_period('M')
dates
# MAYA
#merge the date formatted column back into into the main df
final_df['credit_start_date']= dates['Year']
final_df.head()
# Create bins in for the credit_start_date (takes the high range)
year_bins = [0, 2004.99, 2009.99, 2014.99, 2018.99, 2020.99]
# Create labels for these bins
year_labels = ["More than 15 years ago", "10 to 15 years ago", "5 to 10 years ago", "1 to 5 years ago", "Less than a year"]
# Change the string values to numeric values
year_labels2 = [1, 2, 3, 4, 5]
# Slice the data and place it into numeric bins
final_df["credit_start_date"]=pd.cut(final_df["credit_start_date"], year_bins, labels=year_labels2)
final_df
# id'ing the bins - create the credit_start_date key
credit_start_date_df=pd.DataFrame({
"binned_interest":year_labels,
"credit_start_date_ref_num":year_labels2
})
credit_start_date_df
# create the interest rate key csv
credit_start_date_df.to_csv("final_df_project_3_credit_date_key.csv", index=False)
# drop string columns
final_df.drop(['application_type', 'earliest_cr_line'], axis=1)
# Create new final column database
final_df.to_csv("final_df_project_3.csv", index=False)
final_df['term'].unique()
###Output
_____no_output_____
###Markdown
Exploring Ebay Car Sales Data--- 1. IntroductionThis project is created to demonstrate the basics of data cleaning and data exploration using *pandas*.The dataset used in this project contain records of used cars from eBay-Kleinanzeigen, a classifieds section of the German eBay website.The dataset was originally scraped and uploaded to [Kaggle](https://www.kaggle.com/orgesleka/used-cars-database/data), but a few modifications have been made by DataQuest to the original dataset. About 50,000 data points were sampled from the full dataset, and the data was dirtied to make it more closely resemble what would be expected from a scraped dataset (the version uploaded to Kaggle was cleaned to be easier to work with).The data dictionary provided with data is as follows:- **dateCrawled** - When this ad was first crawled. All field-values are taken from this date.- **name** - Name of the car.- **seller** - Whether the seller is private or a dealer.- **offerType** - The type of listing- **price** - The price on the ad to sell the car.- **abtest** - Whether the listing is included in an A/B test.- **vehicleType** - The vehicle Type.- **yearOfRegistration** - The year in which the car was first registered.- **gearbox** - The transmission type.- **powerPS** - The power of the car in PS.- **model** - The car model name.- **kilometer** - How many kilometers the car has driven.- **monthOfRegistration** - The month in which the car was first registered.- **fuelType** - What type of fuel the car uses.- **brand** - The brand of the car.- **notRepairedDamage** - If the car has a damage which is not yet repaired.- **dateCreated** - The date on which the eBay listing was created.- **nrOfPictures** - The number of pictures in the ad.- **postalCode** - The postal code for the location of the vehicle.- **lastSeenOnline** - When the crawler saw this ad last online. Note: The fields **lastSeenOnline** and **dateCreated** could be used to estimate how long a car will be at least online before it is sold. 2. Goal & Objective of This ProjectThis project aims to clean the data and analyze the included used car listings. 3. General Observation of the Dataset
###Code
### Import the pandas and NumPy libraries.
### Reading the dataset into pandas.
import numpy as np
import pandas as pd
autos = pd.read_csv("autos.csv", encoding = "Latin-1")
### Note: Common encoding is "UTF-8" / "Latin-1" / "Windows-1252".
### Showing a preview of the dataset.
autos
### Showing the first 5 rows of data.
autos.head()
### Getting the overview of all the dtypes used in the dataset, along with its shape, columns, and other information.
autos.info()
### Check columns that have null values.
check_null = autos.isnull().sum()
check_null
###Output
_____no_output_____
###Markdown
**Observations from the above:**- The dataset contains 20 columns, most of which are strings (objects).- Some columns have null values, but none have more than ~20% null values.- Most of the dates columns are stored as strings (objects).- The column names use [camelcase](https://en.wikipedia.org/wiki/Camel_case) instead of Python's preferred [snakecase](https://en.wikipedia.org/wiki/Snake_case), which means we can't just replace spaces with underscores.Let's start by cleaning the column names so that the dataset is easier to work with.We will use snakecase for the column names. 4. Cleaning Column Names
###Code
print("Original Column Names:")
print("----------------------")
print(autos.columns)
### Define a customized function to rename the column of this dataset.
### There are other more advanced technic to do this, but we will just use a simple basic function.
def rename_column(column_name):
if column_name == "yearOfRegistration":
new_name = "registration_year"
elif column_name == "monthOfRegistration":
new_name = "registration_month"
elif column_name == "notRepairedDamage":
new_name = "unrepaired_damage"
elif column_name == "dateCreated":
new_name = "ad_created"
elif column_name == "dateCrawled":
new_name = "date_crawled"
elif column_name == "offerType":
new_name = "offer_type"
elif column_name == "abtest":
new_name = "ab_test"
elif column_name == "vehicleType":
new_name = "vehicle_type"
elif column_name == "gearbox":
new_name = "gear_box"
elif column_name == "powerPS":
new_name = "power_PS"
elif column_name == "fuelType":
new_name = "fuel_type"
elif column_name == "nrOfPictures":
new_name = "no_of_pictures"
elif column_name == "postalCode":
new_name = "postal_code"
elif column_name == "lastSeen":
new_name = "last_seen"
else:
new_name = column_name
return new_name
### Looping through each column in the dataset and call the function to rename the column.
new_columns = []
for c in autos.columns:
new_name = rename_column(c)
new_columns.append(new_name)
autos.columns = new_columns
### Alternatively, can also directly assign the column names as shown below.
autos.columns = ['date_crawled', 'name', 'seller', 'offer_type', 'price', 'ab_test',
'vehicle_type', 'registration_year', 'gear_box', 'power_PS', 'model',
'odometer', 'registration_month', 'fuel_type', 'brand',
'unrepaired_damage', 'ad_created', 'no_of_pictures', 'postal_code',
'last_seen']
print("Renamed Column Names:")
print("---------------------")
print(autos.columns)
### Showing the first 5 rows of data with new column names.
autos.head()
###Output
_____no_output_____
###Markdown
5. Initial Exploration - Checking General Statistics of the Dataset
###Code
### Check statistics for only numeric columns.
autos.describe()
### Check statistics for object columns (non-numeric columns).
autos.describe(include=['O'])
### Check statistics for all columns.
autos.describe(include='all')
###Output
_____no_output_____
###Markdown
**Observation:**Columns that have mostly one value (or where almost all of the values are the same) are candidates to be dropped:- `seller` (almost all records are "privat")- `offer_type` (almost all records are "Angebot")The `no_of_pictures` column looks odd, we'll need to investigate this further.
###Code
### Further investigation on the 'no_of_pictures' column.
### All the values in this column is 0.
autos["no_of_pictures"].value_counts()
###Output
_____no_output_____
###Markdown
6. Dropping ColumnsDrop the following columns that have mostly one value (or where almost all of the values are the same):- `seller` (almost all records are "privat")- `offer_type` (almost all records are "Angebot")- `no_of_pictures` (all records are "0")
###Code
autos = autos.drop(["seller", "offer_type", "no_of_pictures"], axis=1)
###Output
_____no_output_____
###Markdown
7. Cleaning Numeric Data Stored As Text 7.1 Converting the 'price' and 'odometer' column to numeric type
###Code
### Replace dollar sign and comma with empty string before converting the 'price' column to int type.
autos["price"] = autos["price"].str.replace("$","").str.replace(",","").astype(int)
### Replace 'km' with empty string before converting the 'odometer' column to int type.
autos["odometer"] = autos["odometer"].str.replace("km","").str.replace(",","").astype(int)
###Output
_____no_output_____
###Markdown
7.2 Renaming the 'odometer' column to 'odometer_km'
###Code
### Renaming the 'odometer' column to 'odometer_km' so that we know the numeric values are in km.
autos.rename({"odometer":"odometer_km"}, axis=1, inplace=True)
### Note: Either use inplace=True or assign the result back to the dataframe; otherwise, the modifications will be lost.
###Output
_____no_output_____
###Markdown
8. Identify Outliers (values that look unrealistically high or low) 8.1 Inspecting the 'price' Column
###Code
### Checking the number of unique price values.
autos["price"].unique().shape
### Checking statistics of the price column.
autos["price"].describe()
### Checking the count for each unique price value.
autos["price"].value_counts().sort_index(ascending=True)
autos["price"].value_counts(normalize=True).sort_index().head(10)
### Take a closer look on prices that are on the high end (sort by descending order).
autos["price"].value_counts().sort_index(ascending=False).head(20)
###Output
_____no_output_____
###Markdown
**Observation:**- There are 2357 unique price values.- 1421 records are with 0 price (which is about 2.8% of the listing), and the maximum price is one hundred million (99,999,999). - For prices that are on the high end, it seems like prices increased steadily until 350,000 and from there jump to unrealistically high values. There are 14 records with prices greater than 350,000.- These unrealistic records, which are less than 3% of the listing, shall be removed later. 8.2 Inspecting the 'odometer_km' Column
###Code
### Checking the number of unique mileage values.
autos["odometer_km"].unique().shape
### Checking statistics of the mileage.
autos["odometer_km"].describe()
### Checking the count for each unique mileage value.
autos["odometer_km"].value_counts().sort_index(ascending=True)
###Output
_____no_output_____
###Markdown
**Observation:**There seems to be more records with high mileage than low mileage in the listing. 8.3 Inspecting DatesAs seen below, dates for `date_crawled`, `ad_created`, and `last_seen` are all identified as string values by pandas.We need to convert the data into a numerical representation so we can understand it quantitatively.
###Code
### Showing the first 5 records of 'date_crawled', 'ad_created', and 'last_seen'.
autos[['date_crawled','ad_created','last_seen']][0:5]
###Output
_____no_output_____
###Markdown
8.3.1 Inspecting 'date_crawled'
###Code
### Select only the first 10 characters of the date to generate a distribution, and then sort by the index.
### To include missing values in the distribution with 'dropna=False'
### Use sort_index() to rank by date in ascending order.
autos["date_crawled"].str[:10].value_counts(dropna=False).sort_index()
# Following code is to use percentages instead of counts with 'normalize=True'.
# autos["date_crawled"].str[:10].value_counts(normalize=True, dropna=False).sort_index()
autos["date_crawled"].str[:10].describe()
###Output
_____no_output_____
###Markdown
**Observation:**- There are 34 unique date value for `date_crawled`.- Looks like the website is crawled daily for about a month in March and April 2016 (from 2016-03-05 to 2016-04-07).- The distribution of listings crawled on each day is roughly uniform.- The date with the highest number of ads crawled, 1934, is on 2016-04-03. 8.3.2 Inspecting 'ad_created' Date
###Code
autos["ad_created"].str[:10].describe()
### Very few ads are created on 2015.
### Let's check further below.
autos["ad_created"].str[:10].value_counts(dropna=False).sort_index()
### Noticed that there are significant number of ads created on and after March 2016.
### Very few ads are created before March 2016.
autos["ad_created"].str[:10].value_counts(dropna=False).sort_index().head(50)
autos["ad_created"].str[:10].value_counts(dropna=False).sort_index().tail(50)
###Output
_____no_output_____
###Markdown
**Observation:**- There are 76 unique date value for `ad_created`.- Noticed that there are significant number of ads created on and after March 2016, particularly during the crawling period which is from 2016-03-05 to 2016-04-07.- Very few ads are created before March 2016.- The date with the highest number of ads created, 1946, is on 2016-04-03.- This is also the same date where the highest number of ads are crawled. 8.3.3 Inspecting 'last_seen' Date
###Code
autos["last_seen"].str[:10].describe()
### There is significant number of ads that are last seen on 2016-04-06.
autos["last_seen"].str[:10].value_counts(dropna=False).sort_index()
###Output
_____no_output_____
###Markdown
**Observation:**- There are 34 unique date value for `last_seen`.- There is a spike of records (6214, 11050, 6546) with `last seen` date on the last 3 days of the crawling period.- The highest number of records (11050) are last seen on 2016-04-06, a day before the crawling period ends.- This is unlikely due to a spike in sales. 8.4 Inspecting 'registration_year'
###Code
autos["registration_year"].describe()
autos["registration_year"].value_counts()
autos["registration_year"].value_counts().sort_index()
###Output
_____no_output_____
###Markdown
**Observation:**The year that the car was first registered will likely indicate the age of the car. Looking at this column, we note some odd values. The minimum value is 1000, long before cars were invented and the maximum is 9999, many years into the future.Because a car can't be first registered after the listing was seen, any vehicle with a registration year above 2016 is definitely inaccurate. Determining the earliest valid year is more difficult. Realistically, it could be somewhere in the first few decades of the 1900s.Let's count the number of listings with cars that fall outside the 1900 - 2016 interval and see if it's safe to remove those rows entirely, or if we need more custom logic.
###Code
### Checking the number of records where 'registration_year' is before 1900 OR after 2016.
autos[(autos["registration_year"] < 1900) | (autos["registration_year"] > 2016)]
###Output
_____no_output_____
###Markdown
There are 1972 records where 'registration_year' is before 1900 OR after 2016, which is about 4%.We will remove these outliers in the following section. 9. Remove Outliers 9.1 Remove records with unrealistic 'registration_year'
###Code
### Filter records where 'registration_year' is between 1900 and 2016.
boo = autos["registration_year"].between(1900, 2016)
autos2 = autos.loc[boo]
autos2.describe()
autos2["registration_year"].value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
9.2 Remove records with unrealistic 'price'
###Code
### Filter records where 'price' is not 0, and not greater than 350,000.
boo = autos2["price"].between(1, 350000)
autos2 = autos2.loc[boo]
autos2.describe()
###Output
_____no_output_____
###Markdown
10. Find Popular Brands
###Code
### Create an array of unique brands
brands = autos2["brand"].unique()
print(brands)
### Getting the count for each unique brand.
autos2["brand"].value_counts()
### Selecting the top 10 brands to be used for further analysis.
top10_brands = autos2["brand"].value_counts().head(10)
print(top10_brands)
### Getting the index label of the top 10 brands.
top10_brands = top10_brands.index
print(top10_brands)
###Output
Index(['volkswagen', 'bmw', 'opel', 'mercedes_benz', 'audi', 'ford', 'renault',
'peugeot', 'fiat', 'seat'],
dtype='object')
###Markdown
**Observation:**Volkswagen is the most popular brand, having 9862 records, which is almost double of it's first rival, BMW (5137 records). 11. Analyze the Top 10 Brands
###Code
### Create a general function to sort a given dictionary in descending order.
### This function will return a sorted list of tuple.
### Note that sorted() built-in function doesn't work too well with dictionaries because
### it only considers and returns the dictionary keys (instead of the key+value pair).
### We need to transform the dictionary into a list of tuples in order to do sorting.
def sort_dict(dictionary):
### Initialise a blank list to store tuples.
list_of_tuple = []
for key in dictionary:
val_key_tuple = (dictionary[key], key)
list_of_tuple.append(val_key_tuple)
list_of_tuple_sorted = sorted(list_of_tuple, reverse = True)
# Either print the sorted records, or return the sorted list of tuple.
#for item in list_of_tuple_sorted:
# print(item[1], ':', item[0])
return list_of_tuple_sorted
###Output
_____no_output_____
###Markdown
11.1 Exploring Price by Brand
###Code
### Getting the average price for each of the top 10 brands.
avg_price_by_brand = {}
for b in top10_brands:
selected_rows = autos2[autos2["brand"] == b]
mean = selected_rows["price"].mean()
avg_price_by_brand[b] = int(mean)
print("### Average Price by Brand ###")
avg_price_by_brand
### Call the sort_dict() function to sort the records.
print("### Average Price by Brand (Sorted) ###")
sort_dict(avg_price_by_brand)
###Output
### Average Price by Brand (Sorted) ###
###Markdown
**Observation:**In the top 10 brands, there's a distinct price gap.- Audi, Mercedes Benz, and BMW are more expensive.- Opel, Fiat, and Renault are less expensive.- Volkswagen, Seat, Ford and Peugeot are in between.This may explain why Volkswagen is the most popular brand, as it's price is in between. 11.2 Exploring Mileage by Brand
###Code
### Getting the average mileage for each of the top 10 brands.
avg_mileage_by_brand = {}
for b in top10_brands:
selected_rows = autos2[autos2["brand"] == b]
mean = selected_rows["odometer_km"].mean()
avg_mileage_by_brand[b] = int(mean)
#print("### Average Mileage by Brand ###")
#avg_mileage_by_brand
### Call the sort_dict() function to sort the records.
print("### Average Mileage by Brand (Sorted) ###")
sort_dict(avg_mileage_by_brand)
###Output
### Average Mileage by Brand (Sorted) ###
###Markdown
**Observation:**All the top 10 brands have high mileage. 11.3 Analyzing Price & Mileage by Brand (Part 1)Let's use aggregation to understand the average mileage for those cars and if there's any visible link with mean price. The following will combine the data from both series objects into a single dataframe (with a shared index) and display the dataframe directly.
###Code
### Convert the 'avg_price_by_brand' dictionary to series objects, using the series constructor.
### The keys in the dictionary became the index in the series object.
bmp_series = pd.Series(avg_price_by_brand)
print(bmp_series)
### We can then create a single-column dataframe from this series object.
### We need to use the columns parameter when calling the dataframe constructor (which accepts a array-like object)
### to specify the column name (or the column name will be set to 0 by default).
df = pd.DataFrame(bmp_series, columns=['mean_price'])
df
### Adding 'mean_mileage' new column to the dataframe.
df["mean_mileage"] = pd.Series(avg_mileage_by_brand)
df
###Output
_____no_output_____
###Markdown
**Observation:**The average mileage seems to have no visible link with mean price.There is no significant difference on the average mileage for cars with expensive or cheaper price. 11.4 Analyzing Price & Mileage by Brand (Part 2)Split the mileage (odometer_km) into groups, and use aggregation to see if average prices follows any patterns based on the mileage.
###Code
### Checking the count and unique values of 'odometer_km'.
autos2["odometer_km"].value_counts().sort_index()
### Split the odometer_km into 3 groups.
### Getting the average price for each of the top 10 brands, for each of the 'odometer_km' groups.
avg_price_group1 = {}
avg_price_group2 = {}
avg_price_group3 = {}
for b in top10_brands:
selected_rows_group1 = autos2[(autos2["brand"] == b) & (autos2["odometer_km"] <= 50000)]
selected_rows_group2 = autos2[(autos2["brand"] == b) & (autos2["odometer_km"].between(50001, 100000))]
selected_rows_group3 = autos2[(autos2["brand"] == b) & (autos2["odometer_km"] > 100000)]
mean_group1 = selected_rows_group1["price"].mean()
mean_group2 = selected_rows_group2["price"].mean()
mean_group3 = selected_rows_group3["price"].mean()
avg_price_group1[b] = int(mean_group1)
avg_price_group2[b] = int(mean_group2)
avg_price_group3[b] = int(mean_group3)
### Convert the 'avg_price_with_damage' dictionary to series objects, using the series constructor.
### The keys in the dictionary became the index in the series object.
mp_series = pd.Series(avg_price_group1)
### We can then create a single-column dataframe from this series object.
### We need to use the columns parameter when calling the dataframe constructor (which accepts a array-like object)
### to specify the column name (or the column name will be set to 0 by default).
df = pd.DataFrame(mp_series, columns=['mean_price_odo_up_to_50km'])
### Adding 'mean_price_non_damage' new column to the dataframe.
df["mean_price_odo_50_to_100km"] = pd.Series(avg_price_group2)
df["mean_price_odo_above_100km"] = pd.Series(avg_price_group3)
df
###Output
_____no_output_____
###Markdown
**Observation:**For the same brand, average price is reduced when the mileage is increased. 11.5 Analyzing Price For Cars With Damaged and Non-DamageHere we would want to find out how much cheaper are cars with damage than their non-damaged counterparts.
###Code
### Checking the unique values for the 'unrepaired_damage' column.
autos2["unrepaired_damage"].value_counts()
### Getting the average price for each of the top 10 brands, for cars with damage and without damage.
avg_price_with_damage = {}
avg_price_non_damaged = {}
for b in top10_brands:
selected_rows_with_damage = autos2[(autos2["brand"] == b) & (autos2["unrepaired_damage"] == "ja")]
selected_rows_non_damage = autos2[(autos2["brand"] == b) & (autos2["unrepaired_damage"] == "nein")]
mean_with_damage = selected_rows_with_damage["price"].mean()
mean_non_damage = selected_rows_non_damage["price"].mean()
avg_price_with_damage[b] = int(mean_with_damage)
avg_price_non_damaged[b] = int(mean_non_damage)
### Convert the 'avg_price_with_damage' dictionary to series objects, using the series constructor.
### The keys in the dictionary became the index in the series object.
mp_series = pd.Series(avg_price_with_damage)
### We can then create a single-column dataframe from this series object.
### We need to use the columns parameter when calling the dataframe constructor (which accepts a array-like object)
### to specify the column name (or the column name will be set to 0 by default).
df = pd.DataFrame(mp_series, columns=['mean_price_with_damage'])
### Adding 'mean_price_non_damage' new column to the dataframe.
df["mean_price_non_damage"] = pd.Series(avg_price_non_damaged)
df
### Calculate the percentage and adding it as column to the dataframe.
df["percent_cheaper"] = (df["mean_price_with_damage"] / df["mean_price_non_damage"]) * 100
df
###Output
_____no_output_____
###Markdown
**Observation:**For the top 10 brands, cars with damage are around 30-40% cheaper than their non-damaged counterparts. 12. Find the most common brand/model combinations
###Code
### Create a new column 'brand_and_model', which combines the brand and the model columns.
autos2["brand_and_model"] = autos2["brand"] + "/" + autos2["model"]
autos2
### Getting the count for each unique brand/model combination.
autos2["brand_and_model"].value_counts()
###Output
_____no_output_____
###Markdown
Why Walk When You Can Zipline? Sam Daitzman and Jocelyn JimenezDecember, 2018
###Code
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last'
# import functions from the modsim.py module
from modsim import *
###Output
_____no_output_____
###Markdown
IntroductionAlthough Olin is a very small campus, it is sometimes very tedious having to walk through various curved paths and flights of stairs. We noticed that there was great potential of a zip line from Olinโs Academic Center to West Hall. Initially there were multiple issues that arose. For instance, where will the landing spot be? In the case of our model, we believe it is best if the zip line ends on the corner room in West Hall on the second floor because it gives us the fastest access to most dorms in West Hall. If we were to ask โWhich floor in the AC allows us to get faster to our destination?โ the answer would be obvious, the 4th floor; but if you zipline at too steep an angle, you are likely to get injured. As a result, we decided to take into consideration the safety of the individual in order to make it a more useful model. Our model asks what the fastest safe zip line starting mount-point would be to get from the AC to West Hall as fast as (safely) possible. Stick around to find out where to anchor a zip-line in the Academic Center. Code Setup
###Code
# units!
year = UNITS.year
s = UNITS.second
N = UNITS.newton
kg = UNITS.kilogram
m = UNITS.meter
###Output
_____no_output_____
###Markdown
To prove the point that is is a good idea for Olin to have a zip line, we modeled how the change in position will affect the velocity of the individual. We considered important initial/constant variables like the starting position, where (0,0) is at the tallest part of the Academic Center. For the following model, the starting velocity of the person in the x and y dimensions is 0 m/s, which can be seen below.
###Code
#Starting position on the x and y axis
x_0 = (0 * m)
y_0 = (0 * m)
#Starting velocity in the x and y dimension
vx_0 = 0 * m / s
vy_0 = 0 * m / s
init = State(x=x_0,
y=y_0,
vx=vx_0,
vy=vy_0)
###Output
_____no_output_____
###Markdown
Other constants include the mass, density, and area of the individual, gravity, drag force, the height of the West Hall window, the maximum landing speed and the end time. These constants are defined as the parameters of the model.
###Code
params = Params(init=init,
# Mass of the individual ziplining
m_human=70 * kg,
#Gravity
grav = Vector(0 * m / s ** 2, -9.81),
#Mass Density of the Fluid
rho = 1.275 * kg / m ** 3,
#Reference Area of individual
area = 0.7 * m ** 2,
#Drag Coefficent
cd = 1.2,
#End Time
t_end=100e6 * s,
#Height of West Hall Window in meters
WH_window_abs_height = 59.3,
#Maximum Landing Speed
max_landing_speed = 12 * m / s
)
#Creates a system containing constants
def make_system(params, zipline):
unpack(params)
system = System(params, z=zipline)
return system
make_system(params, Vector(100 * m, -25.1))
###Output
_____no_output_____
###Markdown
Force Functions We decided to calculate the different forces that might affect the zip-liner by including forces like gravity, drag, net and effective force. The cells below demonstrate functions and equations that contribute to the final velocity of the individual. We calculate the force of gravity pulling the zipliner downward and the drag force resisting their motion, and their sum is the net force. Then we calculate the effective force, which is the component of the force in line with the zip-line.
###Code
def earth_grav(m, g):
return g * m
earth_grav(100*kg, make_system(params, Vector(100 * m, -25.1)).grav)
def drag_force(rho, v, area, cd):
#Direction
direction= -v.hat()
#Drag Equation
drag = (1/2) * rho * v.mag**2 *area * cd
#Drag as a Vector
return direction * drag
drag_force(20 * kg / m ** 3, Vector(3 * m / s, 4), 10 * m ** 2, 0.8)
def effective_force(state, system):
"""Calculates gravitational force for arbitrary objects"""
x, y, vx, vy = state
unpack(system)
#Force of Gravity
grav_f = earth_grav(m_human, grav)
#Force of Drag
drag_f = drag_force(rho, Vector(vx, vy), area, cd)
#Net Force
net_force = grav_f + drag_f
#Effective Force
effective_force = net_force.proj(z.hat())
return effective_force
###Output
_____no_output_____
###Markdown
Simulation Setup
###Code
def slope_func(state, t, system):
x, y, vx, vy = state
unpack(system)
# make velocity
v = Vector(vx, vy)
# calculate force
force = effective_force(state, system)
# calculate acceleration
a = force * (1/m_human)
# cast acceleration to modsimvector
a = Vector(a[0], a[1])
# return velocity and acceleration
return vx, vy, a.x, a.y
vx, vy, ax, ay = slope_func(init, 0, make_system(params, Vector(100 * m, -25.1)))
print(vx, vy, ax, ay)
def event_func(state, t, system):
x, y, vx, vy = state
position = Vector(x, y)
return position.mag - z.mag
event_func(init, 0, make_system(params, Vector(100 * m, -25.1)))
###Output
_____no_output_____
###Markdown
Simulation This simulation shows the zip-liner descending from the AC to West Hall from a particular starting height.
###Code
results, details = run_ode_solver(make_system(params, Vector(100 * m, -25.1)), slope_func, events=event_func, method='LSODA')
plot(results.x, results.y - z.y.magnitude, 'go-')
decorate(title='Zip-lining from AC to West Hall',
xlabel='X Position (m)',
ylabel='Y Position (m)')
###Output
_____no_output_____
###Markdown
This plot shows the position over time. Each point along the line is a different moment in time.
###Code
plot(results.y, label='Y position (m)')
plot(results.x, label='X position (m)')
decorate(title='Position over Time',
xlabel='Time (s)',
ylabel='Position (m)',
legend=True)
###Output
_____no_output_____
###Markdown
These lines represent the change in X and Y position over time.
###Code
v_final = Vector(get_last_value(results.vx) * m / s, get_last_value(results.vy))
print(v_final.mag)
###Output
15.73897673293782 meter / second
###Markdown
The cell above shows how we obtain the arrival velocity. Sweeping Start Height
###Code
thresholds = linspace(60, 90, 10)
for i,e in enumerate(thresholds):
thresholds[i] -= params.WH_window_abs_height
print(thresholds)
for y in thresholds:
system = make_system(params, Vector(100 * m, -y))
results, details = run_ode_solver(system, slope_func, events=event_func, method='LSODA')
plot(results.x, results.y - z.y.magnitude + params.WH_window_abs_height, 'ro-')
decorate(title='Zip-lining from AC to West Hall (Different Starting Points)',
xlabel='X Position (m)',
ylabel='Y Position (m)')
###Output
_____no_output_____
###Markdown
The plot above shows the process of zip-lining from various starting heights. We obtained the range of height differences using Olin College's blueprints, by comparing the absolute heights of the top of the AC, bottom of the AC, and our destination room in West Hall. Finding End Velocities
###Code
thresholds = linspace(60, 90, 15)
print(thresholds)
def landing_speed(startHeight, params):
height_y = startHeight - params.WH_window_abs_height
system = make_system(params, Vector(100 * m, -height_y))
results, details = run_ode_solver(system, slope_func, events=event_func, method='LSODA')
v_final = Vector(get_last_value(results.vx) * m / s, get_last_value(results.vy))
return v_final.mag
landing_speed(60, params)
landing = SweepSeries()
for y in thresholds:
landing[y] = landing_speed(y, params)
plot(landing, 'ro-')
decorate(title='Zip-lining from AC to West Hall',
xlabel='Starting Height (m)',
ylabel='Landing speed(m/s)')
###Output
_____no_output_____
###Markdown
This plot shows the landing speed depending on starting height. By looking at this plot, we can choose a particular start height depending on our desired landing speed.
###Code
landing
###Output
_____no_output_____
###Markdown
Ideal Starting Height We wanted to calculate our ideal starting height. We wrote an error function that approaches zero as the ideal starting height is approached. The maximum landing speed is determined based on the maximum safe landing speed of a hang-glider, which exerts analagous forces on a human.
###Code
def error_func(startHeight, params):
return params.max_landing_speed - landing_speed(startHeight, params)
error_func(60, params)
res = fsolve(error_func, 75, params)
print(res)
error_func(fsolve(error_func, 75, params)[0], params)
system = make_system(params, Vector(100 * m, -(fsolve(error_func, 75, params)[0] - params.WH_window_abs_height)))
results, details = run_ode_solver(system, slope_func, events=event_func, method='LSODA')
plot(results.x, results.y - z.y.magnitude + params.WH_window_abs_height, 'ro-')
decorate(title='Zip-lining from AC to West Hall',
xlabel='X Position (m)',
ylabel='Y Position (m)')
plot(results.y - z.y.magnitude + params.WH_window_abs_height, 'ro-')
decorate(title='Zip-lining from AC to West Hall',
xlabel='Time (s)',
ylabel='Y Position (m)')
###Output
_____no_output_____
###Markdown
This plot represents the ideal descent of a zip-liner from the AC to West Hall.
###Code
Vector(get_last_value(results.vx), get_last_value(results.vy)).mag
###Output
_____no_output_____
###Markdown
The zip-liner arrives incredibly close to, but slightly below, the maximum safe velocity. ConclusionsIn this model, we find that the ideal starting height to arrive as quickly as possible (but at a safe speed) when zip-lining to West Hall would be around the third floor (about 15m above the first floor of the AC). The zip-liner would arrive safely at a speed slightly below 12 m/s. Before attempting this, we would want to conduct more precise modeling and account for the forces we've abstracted out of our model, like the tension in the rope and the changing normal force of the rope against the zip-liner through the handle. Future Steps: Questioning Assumptions. Straight Zipline? We didn't have time to finish this modeling work, but with more work we might be able to simulate a more accurate zip-line curvature. To simplify our modeling, we assumed the line would be under infinite tension (in other words, perfectly straight) and the wheel would roll perfectly. For the wheel to behave efficiently, it's more likely that the line would have to maintain some slack. To get a more accurate estimate, we would continue this modeling work.
###Code
def calc_parabola_vertex(x1, y1, x2, y2, x3, y3):
'''
Adapted and modifed to get the unknowns for defining a parabola:
http://stackoverflow.com/questions/717762/how-to-calculate-the-vertex-of-a-parabola-given-three-points
With thanks to http://chris35wills.github.io/parabola_python/
'''
d = (x1-x2) * (x1-x3) * (x2-x3);
A = (x3 * (y2-y1) + x2 * (y1-y3) + x1 * (y3-y2)) / d;
B = (x3*x3 * (y1-y2) + x2*x2 * (y3-y1) + x1*x1 * (y2-y3)) / d;
C = (x2 * x3 * (x2-x3) * y1+x3 * x1 * (x3-x1) * y2+x1 * x2 * (x1-x2) * y3) / d;
return A,B,C
range = linrange(60, 90, 5)
for i in range:
xNum = linrange(0, 100)
x1,y1=[0,i]
x3,y3=[100,59.3]
x2,y2=[50,y3-(y1-y3)/(2*pi)]
#Calculate the unknowns of the equation y=ax^2+bx+c
a,b,c=calc_parabola_vertex(x1, y1, x2, y2, x3, y3)
zline = SweepSeries()
dz = SweepSeries()
for x in xNum:
zline[x] = a * x ** 2 + b * x + c
dz[x] = 2 * a * x + b
plot(xNum, zline)
decorate(title='Parabolic Zip-line Curves',
xlabel='X (m)',
ylabel='Y (m)')
for i in range:
xNum = linrange(0, 100)
x1,y1=[0,i]
x3,y3=[100,59.3]
x2,y2=[50,y3-(y1-y3)/(2*pi)]
#Calculate the unknowns of the equation y=ax^2+bx+c
a,b,c=calc_parabola_vertex(x1, y1, x2, y2, x3, y3)
zline = SweepSeries()
dz = SweepSeries()
for x in xNum:
zline[x] = a * x ** 2 + b * x + c
dz[x] = 2 * a * x + b
plot(xNum, dz)
decorate(title='Parabolic Zip-line Derivatives',
xlabel='X (m)',
ylabel='Y (m)')
def effective_parabolic_force(state, system):
"""Calculates gravitational force for arbitrary objects"""
x, y, vx, vy = state
unpack(system)
#Force of Gravity
grav_f = earth_grav(m_human, grav)
#Force of Drag
drag_f = drag_force(rho, Vector(vx, vy), area, cd)
#Net Force
net_force = grav_f + drag_f
# Find parabola
x1,y1=[0,z[0].magnitude] # AC startpoint
x3,y3=[100,WH_window_abs_height] # WH endpoint
x2,y2=[50,y3-(y1-y3)/(2*pi)] # midpoint
#Calculate the unknowns
a,b,c=calc_parabola_vertex(x1, y1, x2, y2, x3, y3)
# Find the tangent vector
tangent_vector = Vector(1, 2 * a * x + b)
print(tangent_vector)
#Effective Force
effective_force = net_force.proj(tangent_vector.hat())
return effective_force
def slope_func_parabolic(state, t, system):
x, y, vx, vy = state
unpack(system)
# make velocity
v = Vector(vx, vy)
# calculate force
force = effective_parabolic_force(state, system)
# calculate acceleration
a = force * (1/m_human)
# cast acceleration to modsimvector
a = Vector(a[0], a[1])
# return velocity and acceleration
return vx, vy, a.x, a.y
def event_func_parabolic(state, t, system):
x, y, vx, vy = state
position = Vector(x, y)
return position.mag - z.mag
results, details = run_ode_solver(make_system(params, Vector(100 * m, -25.1)), slope_func_parabolic, events=event_func_parabolic, method='LSODA')
plot(results.x, results.y - z.y.magnitude, 'go-')
decorate(title='Zip-lining from AC to West Hall',
xlabel='X Position (m)',
ylabel='Y Position (m)')
plot(results.y, label='Y position (m)')
plot(results.x, label='X position (m)')
decorate(title='Position over Time',
xlabel='Time (s)',
ylabel='Position (m)',
legend=True)
###Output
_____no_output_____
###Markdown
Third floor near the ceiling
###Code
# Imports
from IPython.display import clear_output
import os
import time
# Install Python libraries
!pip3 install pandas seaborn sympy beautifulsoup4 lxml pint scipy==1.1.0 numpy
# Removes code before re-downloading it
!rm -rf ./ModSimPy
!rm -rf ./modsim.py
# Grabs ModSimPy
!git clone https://github.com/AllenDowney/ModSimPy.git
!cp "ModSimPy/code/modsim.py" .
# Resets kernel to flush outdated libraries (especially SciPy)
clear_output()
print("Configured for ModSimPy. Restarting kernel.")
time.sleep(1)
os._exit(0)
###Output
_____no_output_____
###Markdown
Dropping columns with 80% NaN
###Code
#axis=1 specifies column, inplace=True overwrites df
df.dropna(thresh=(df.shape[0] * 0.8), axis=1, inplace=True)
# we can now see how many columns have been thrashed
df.shape
# visualizing the missingness partern
msno.matrix(df[0:100000])
df.dtypes.unique()
int_df = df.select_dtypes(include=['float64','int64'])
int_df.head()
np.absolute(int_df.corr()[['loan_status']]).sort_values(by='loan_status')[35:]
np.absolute(int_df.corr()[['loan_status']]).sort_values(by='loan_status')[35:].index
cat_df = df.select_dtypes(include=['O'])
cat_df.head()
cat_df.shape
cat_df.grade.unique()
cat_df.columns
# Encoding grade as a predictor variable
encode = {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6,'G': 7}
cat_df['grade'].replace(encode, inplace=True)
# adding encoded grade feature to int_df dataframe for modeling
int_df['grade'] = cat_df['grade'].copy()
###Output
_____no_output_____
###Markdown
Modeling with continuous predictor variables
###Code
# plotting the distribution of the target variable
sns.countplot(df['loan_status'])
# filling missing values with IterativeImputer
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
imp = IterativeImputer(max_iter=1, verbose=0)
imputed_df = imp.fit_transform(int_df)
int_df = pd.DataFrame(imputed_df, columns=int_df.columns)
int_df['loan_status'] = df['loan_status'].copy()
# finding the sum of null values in the df after Iterative imputation
int_df.isna().sum()
from google.colab import files
int_df.to_csv('int_df.csv')
files.download('int_df.csv')
###Output
_____no_output_____
###Markdown
Model Feature Selection
###Code
X_test_df = int_df[['total_rec_late_fee', 'int_rate', 'total_pymnt_inv', 'total_pymnt', 'grade', 'last_pymnt_amnt', 'total_rec_prncp',
'collection_recovery_fee', 'recoveries', 'loan_status']].copy()
# checking for little or no multicollinearity between good predictor variables
sns.heatmap(X_test_df.corr(), annot=True)
# feature selection while dealing with multicollinearity
X = int_df[['total_rec_int', 'mths_since_recent_inq', 'total_rev_hi_lim','revol_util', 'inq_last_6mths','funded_amnt_inv','loan_amnt','funded_amnt',
'bc_util', 'percent_bc_gt_75', 'num_rev_tl_bal_gt_0','num_actv_rev_tl', 'tot_cur_bal', 'total_bc_limit', 'mort_acc',
'tot_hi_cred_lim', 'avg_cur_bal', 'bc_open_to_buy','num_tl_op_past_12m', 'dti', 'acc_open_past_24mths', 'total_rec_late_fee',
'int_rate','total_rec_prncp','recoveries']]
# Dropped columns as a result of multicollinearity: 'collection_recovery_fee','total_pymnt_inv','total_pymnt','grade','last_pymnt_amnt'
y = int_df['loan_status']
# values to be scaled later
X.describe()
# splitting data into train and tests sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=123, stratify=y)
###Output
_____no_output_____
###Markdown
Logistic Regression
###Code
logreg_model_1 = LogisticRegression()
logreg_model_1.fit(X_train,y_train)
#y_pred_train = logreg_model_1.predict(X_train)
y_pred_test = logreg_model_1.predict(X_test)
metrics.accuracy_score(y_test, y_pred_test)
metrics.confusion_matrix(y_test, y_pred_test)
print(metrics.classification_report(y_test, y_pred_test))
y_pred_prob = logreg_model_1.predict_proba(X_test)[:,1]
metrics.roc_auc_score(y_test, y_pred_prob)
###Output
_____no_output_____
###Markdown
Support Vector Machine Classifier
###Code
#svm_model_1 = SVC(kernel='linear')
#svm_model_1.fit(X_train,y_train)
#y_pred_train = svm_model_1.predict(X_train)
#y_pred_test = svm_model_1.predict(X_test)
#metrics.accuracy_score(y_test, y_pred_test)
#metrics.confusion_matrix(y_test, y_pred_test)
#print(metrics.classification_report(y_test, y_pred_test))
###Output
_____no_output_____
###Markdown
RandomForestClassifier
###Code
rf_model_1 = RandomForestClassifier()
rf_model_1.fit(X_train,y_train)
#y_pred_train = rf_model_1.predict(X_train)
y_pred_test = rf_model_1.predict(X_test)
metrics.accuracy_score(y_test, y_pred_test)
metrics.confusion_matrix(y_test, y_pred_test)
print(metrics.classification_report(y_test, y_pred_test))
y_pred_prob = rf_model_1.predict_proba(X_test)[:,1]
metrics.roc_auc_score(y_test, y_pred_prob)
###Output
_____no_output_____
###Markdown
MinMaxScaler* Scaling features and remodeling
###Code
from sklearn.preprocessing import MinMaxScaler, StandardScaler
mm = MinMaxScaler()
feature_names = X_train.columns
X_train_mm = mm.fit_transform(X_train)
X_train_mm = pd.DataFrame(X_train_mm, columns=feature_names)
X_test_mm = mm.transform(X_test)
X_test_mm = pd.DataFrame(X_test_mm, columns=feature_names)
# showing a DESC of the normalized X_train features
X_train_mm.describe()
###Output
_____no_output_____
###Markdown
Logistic Regression
###Code
logreg_model_2 = LogisticRegression()
logreg_model_2.fit(X_train_mm,y_train)
#y_pred_train = logreg_model_2.predict(X_train_mm)
y_pred_test = logreg_model_2.predict(X_test_mm)
metrics.accuracy_score(y_test, y_pred_test)
metrics.confusion_matrix(y_test, y_pred_test)
print(metrics.classification_report(y_test, y_pred_test))
y_pred_prob = logreg_model_2.predict_proba(X_test_mm)[:,1]
metrics.roc_auc_score(y_test, y_pred_prob)
###Output
_____no_output_____
###Markdown
Support Vector Machine Classifier
###Code
#svm_model_2 = SVC(kernel='linear')
#svm_model_2.fit(X_train_mm,y_train)
#y_pred_train = svm_model_2.predict(X_train_mm)
#y_pred_test = svm_model_2.predict(X_test_mm)
#metrics.accuracy_score(y_test, y_pred_test)
#metrics.confusion_matrix(y_test, y_pred_test)
#print(metrics.classification_report(y_test, y_pred_test))
###Output
_____no_output_____
###Markdown
RandomForestClassifier
###Code
rf_model_2 = RandomForestClassifier()
rf_model_2.fit(X_train_mm,y_train)
#y_pred_train = rf_model_2.predict(X_train_mm)
y_pred_test = rf_model_2.predict(X_test_mm)
metrics.accuracy_score(y_test, y_pred_test)
metrics.confusion_matrix(y_test, y_pred_test)
print(metrics.classification_report(y_test, y_pred_test))
y_pred_prob = rf_model_2.predict_proba(X_test_mm)[:,1]
metrics.roc_auc_score(y_test, y_pred_prob)
###Output
_____no_output_____
###Markdown
StandardScaler
###Code
sc = StandardScaler()
X_train_sc = sc.fit_transform(X_train)
X_train_sc = pd.DataFrame(X_train_sc, columns=feature_names)
X_test_sc = sc.transform(X_test)
X_test_sc = pd.DataFrame(X_test_sc, columns=feature_names)
X_train_sc.describe()
###Output
_____no_output_____
###Markdown
Logistic Regression
###Code
logreg_model_3 = LogisticRegression()
logreg_model_3.fit(X_train_sc,y_train)
#y_pred_train = logreg_model_3.predict(X_train_sc)
y_pred_test = logreg_model_3.predict(X_test_sc)
metrics.accuracy_score(y_test, y_pred_test)
metrics.confusion_matrix(y_test, y_pred_test)
print(metrics.classification_report(y_test, y_pred_test))
y_pred_prob = logreg_model_3.predict_proba(X_test_sc)[:,1]
metrics.roc_auc_score(y_test, y_pred_prob)
###Output
_____no_output_____
###Markdown
Support Vector Machine Classifier
###Code
#svm_model_3 = SVC(kernel='linear')
#svm_model_3.fit(X_train_mm,y_train)
#y_pred_train = svm_model_3.predict(X_train_sc)
#y_pred_test = svm_model_3.predict(X_test_sc)
#metrics.accuracy_score(y_test, y_pred_test)
#metrics.confusion_matrix(y_test, y_pred_test)
#print(metrics.classification_report(y_test, y_pred_test))
###Output
_____no_output_____
###Markdown
RandomForestClassifier
###Code
rf_model_3 = RandomForestClassifier()
rf_model_3.fit(X_train_sc,y_train)
#y_pred_train = rf_model_3.predict(X_train_sc)
y_pred_test = rf_model_3.predict(X_test_sc)
metrics.accuracy_score(y_test, y_pred_test)
metrics.confusion_matrix(y_test, y_pred_test)
print(metrics.classification_report(y_test, y_pred_test))
y_pred_prob = rf_model_3.predict_proba(X_test_sc)[:,1]
metrics.roc_auc_score(y_test, y_pred_prob)
###Output
_____no_output_____
###Markdown
Data Cleaning
###Code
movies_clean = movies.copy()
# Drop columns that don't provide useful information: tagline, title, movie_id, title.1, overview, original_title, id, homepage
# ?Drop 'status' != Released before column drop?
movies_clean.drop(columns=['tagline', 'title', 'movie_id', 'title.1', 'overview', 'original_title', 'id', 'homepage'], inplace=True)
movies_clean.head(1)
# Look at Zero-budget/Zero-revenue movies
sum((movies_clean['budget']==0).apply(int) & (movies_clean['revenue']==0).apply(int))
# Drop observations with zero budget, less than $1000 revenue
movies_clean = movies_clean[movies_clean['budget']>0]
movies_clean = movies_clean[movies_clean['revenue']>1000]
movies_clean = movies_clean.reset_index(drop=True)
# Clean up genres column/remove empty's
print(movies_clean.shape)
print(movies_clean['genres'][movies_clean['genres'].apply(len)<3].count())
movies_clean = movies_clean[movies_clean['genres'].apply(len)>2]
print(movies_clean.shape)
# Clean up 'production_companies' column. 35 empty crew observation
print(movies_clean.shape)
print(movies_clean['production_companies'][movies_clean['production_companies'].apply(len)<3].count())
movies_clean = movies_clean[movies_clean['production_companies'].apply(len)>2]
print(movies_clean.shape)
# Clean up 'cast' column. 1 empty cast observations
print(movies_clean.shape)
print(movies_clean['cast'][movies_clean['cast'].apply(len)<3].count())
movies_clean = movies_clean[movies_clean['cast'].apply(len)>2]
print(movies_clean.shape)
# Clean up 'crew' column. 1 empty crew observation
print(movies_clean.shape)
print(movies_clean['crew'][movies_clean['crew'].apply(len)<3].count())
movies_clean = movies_clean[movies_clean['crew'].apply(len)>2]
print(movies_clean.shape)
# Clean up 'release_date' column.
print(movies_clean.shape)
print(movies_clean['release_date'][movies_clean['release_date'].apply(len)<3].count())
movies_clean = movies_clean[movies_clean['release_date'].apply(len)>1]
print(movies_clean.shape)
#movies_clean['release_date'] = movies_clean['release_date'].astype(int)
movies_clean = movies_clean.reset_index(drop=True)
# One-hot encode genres
#movies_clean['genre_names'] = movies_clean['genres'].apply(lambda s: '|'.join([e['name'] for e in json.loads(s)]))
#genre_one_hot = movies_clean['genre_names'].str.get_dummies('|')
#movies_clean_one_hot = pd.concat([movies_clean, genre_one_hot], axis=1)
movies_clean_one_hot = movies_clean.copy()
movies_clean_one_hot.head(2)
# Pull out all production_companies
production_companies_df = pd.DataFrame(columns=['production_companies'])
for row in movies_clean_one_hot.iterrows():
for i in range(len(json.loads(row[1][5]))): # row[1][5] is production_countries string
production_companies_df = pd.concat([production_companies_df, pd.DataFrame([{'production_companies':json.loads(row[1][5])[i]['name']}])], ignore_index=True)
print(production_companies_df.shape)
production_companies_df.value_counts()
# Create dataframe from production_companies_df.value_counts()
production_companies_counts_df = pd.concat([production_companies_df.value_counts().index.get_level_values(0).to_frame(index=False), pd.DataFrame(production_companies_df.value_counts().values, columns=['count'])], axis=1)
# Create production_companies rank summation
comp_counts = movies_clean_one_hot['production_companies'].apply(lambda s: sum([int(production_companies_counts_df[production_companies_counts_df['production_companies']==comp['name']]['count']) for comp in json.loads(s)]))
comp_counts.rename('production_rank', inplace=True)
movies_clean_one_hot = pd.concat([movies_clean_one_hot, comp_counts], axis=1)
#movies_clean_one_hot.drop(['production_companies', 'production_companies_names'], axis=1, inplace=True)
movies_clean_one_hot.head(2)
movies_clean_one_hot.shape
# Create dataframe from .value_counts()
#cast_counts_df = pd.concat([cast_df.value_counts().index.get_level_values(0).to_frame(index=False), pd.DataFrame(cast_df.value_counts().values, columns=['count'])], axis=1)
#cast_counts_df
"""
cast_revenue_df = pd.DataFrame(columns=['name', 'revenue'])
for row in movies_clean_one_hot.iterrows():
for i in range(len(json.loads(row[1][13]))): # row[1][21] is cast string in ORIGINAL dataset
cast_revenue_df = pd.concat([cast_revenue_df, pd.DataFrame([{'name':json.loads(row[1][13])[i]['name'], 'revenue':row[1][15]*math.exp(-0.4*i)}])], ignore_index=True)
cast_revenue_df.to_csv('/content/drive/MyDrive/Colab Notebooks/cast_revenue_budget.csv', index=False)
"""
cast_revenue_df = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/cast_revenue_budget.csv')
cast_revenue_df_unique = pd.unique(cast_revenue_df['name'])
cast_revenue_df_unique
"""
cast_cumm_revenue = pd.DataFrame(columns=['name', 'cumm_revenue'])
for name in cast_revenue_df_unique:
cast_cumm_revenue = cast_cumm_revenue.append({'name': name, 'cumm_revenue': sum([row[1][1] for row in cast_revenue_df[cast_revenue_df['name'] == name].iterrows()])}, ignore_index=True)
#cast_cumm_revenue = cast_cumm_revenue.sort_values(by='cumm_revenue', ascending=False)
"""
#cast_cumm_revenue.to_csv('/content/drive/MyDrive/Colab Notebooks/cast_cumm_revenue_budget.csv', index=False)
cast_cumm_revenue = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/cast_cumm_revenue_budget.csv')
# Concat a 'cast_worth' column to movies, which comprises a weighted-sum of all cast member's cummulative worth
"""
cast_worth = pd.DataFrame(columns=['cast_worth'])
for row in movies_clean_one_hot.iterrows():
cast_worth = pd.concat([cast_worth, pd.DataFrame({'cast_worth': sum([int(cast_cumm_revenue[cast_cumm_revenue['name']==e['name']]['cumm_revenue'])*math.exp(-0.4*i) for i,e in enumerate(json.loads(row[1][13]))])}, index=[0])], ignore_index=True)
"""
#cast_worth.to_csv('/content/drive/MyDrive/Colab Notebooks/cast_worth.csv', index=False)
cast_worth = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/cast_worth.csv')
movies_clean_one_hot['cast_worth'] = cast_worth
# Add director column
"""
director = []
for row in movies_clean.iterrows():
for e in json.loads(row[1][14]):
if e['job']=='Director':
director.append(e['name'])
break
"""
#movies_clean_one_hot = pd.concat([movies_clean_one_hot, pd.DataFrame(director, columns=['director'])], axis=1)
movies_clean_one_hot['release_date'] = movies_clean_one_hot['release_date'].str.slice(start=5, stop=7).apply(int)
#movies_clean['release_date'] = movies_clean['release_date'].astype(int)
movies_clean_one_hot.head()
###Output
_____no_output_____
###Markdown
Exploratory Data Analysis
###Code
# Popularity histogram
plt.hist(movies_clean_one_hot['popularity'], bins=50);
movies_clean_one_hot['vote_average'].hist();
movies_clean_one_hot.plot(kind='scatter', x='budget', y='revenue', title='stuff vs. stuff');
# vote_count
movies_clean_one_hot['vote_count'].hist();
movies_clean_one_hot['cast_worth'].hist();
movies_clean_one_hot.plot(kind='scatter', loglog=True, x='cast_worth', y='revenue', title='stuff vs. stuff');
movies_clean_one_hot.corr()['revenue']
###Output
_____no_output_____
###Markdown
Data Processing
###Code
y = movies_clean_one_hot['revenue']
X = movies_clean_one_hot.drop('revenue', axis=1).copy()
#X.drop(['genres','keywords','status','original_language','production_companies','production_countries','spoken_languages','cast','crew','genre_names'], axis=1, inplace=True)
X.drop(['genres','keywords','status','original_language','production_companies','production_countries','spoken_languages','cast','crew'], axis=1, inplace=True)
X.head()
###Output
_____no_output_____
###Markdown
Decision Trees
###Code
numLoops = 100
depth_vec = np.arange(1,11)
depth_error = np.zeros(len(depth_vec))
for outIdx in range(0,len(depth_error)):
mean_error = np.zeros(numLoops)
depth_val = outIdx + 1
model = DecisionTreeRegressor(max_depth=depth_val)
for idx in range(0,numLoops):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
mean_error[idx] = rmspe(y_test, y_pred)
depth_error[outIdx] = np.mean(mean_error)
depth_error
plt.plot(depth_error);
###Output
_____no_output_____
###Markdown
Decision Tree Graph
###Code
model = DecisionTreeRegressor(max_depth=3, criterion='mae')
model.fit(X,y)
import graphviz
import pydotplus
from IPython.display import display
from sklearn import tree
display(graphviz.Source(tree.export_graphviz(model, feature_names=X.columns)));
###Output
_____no_output_____
###Markdown
Random Forest
###Code
numTrees = 20
rmse_all = np.zeros(numTrees)
for outIdx in range(0,numTrees):
numLoops = 20
rmse_inside = np.zeros(numLoops)
for inIdx in range(0,numLoops):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
rfModel = RandomForestRegressor(n_estimators = (outIdx+1), max_depth=4)
rfModel.fit(X_train, y_train)
y_pred_rf = rfModel.predict(X_test)
rmse_inside[inIdx] = root_mean_squared_percentage_error(y_test, y_pred_rf, 1e6)
#print(outIdx,' trees finished.')
rmse_all[outIdx] = np.mean(rmse_inside)
print(rmse_all)
plt.plot(rmse_all);
###Output
[620.58320017 635.17325615 561.55904365 560.66516669 599.25409158
549.82133987 620.98341717 553.41249321 576.21220601 607.00702501
569.58732482 591.78915279 580.93628101 589.78547018 571.59883378
586.24518045 578.6423132 556.44891043 567.65653475 551.95085959]
###Markdown
XGBoost
###Code
numLoops = 100
mse_xgb = np.zeros(numLoops)
for idx in range(0,numLoops):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
xgbr = xgb.XGBRegressor(objective = 'reg:squarederror')
xgbr.fit(X_train, np.log10(y_train))
y_pred_xgb = xgbr.predict(X_test)
mse_xgb[idx] = root_mean_squared_percentage_error(y_test, 10**y_pred_xgb, 1e6)
mse_xgb.mean()
np.sqrt(mse_xgb.mean())
plt.scatter(np.log10(y_test),((y_test - 10**y_pred_xgb)/(y_test))*100)
plt.ylim([-1000, 1000])
#plt.xlim([0,1e9])
feat_imp = pd.Series(xgbr.feature_importances_, index=X.columns)
feat_imp.plot(kind='bar', title='Feature Importance')
plt.ylabel('Feature Importance Score');
print('Done')
###Output
Done
|
analytics/nba_player_stats.ipynb | ###Markdown
Take Home Challenge: Data Analysis and Visualization*Note: this notebook is included just as an example; feel free to use the tools with which you are most comfortable!*Analytics is about more than just numbers! At ICX, we take a holistic view of analysis, which includes gathering and interacting with data, performing statistical analyses, creating visualizations, and telling stories. For this challenge, you'll get to show off your end-to-end analytics chops!Given a dataset of NBA players performance and salary in 2014, use your preferred analytical tool to load the dataset and compute summary statistics, then determine the relationship between player efficiency and create a custom visualization to illustrate that story.The data can be found at [http://bit.ly/2n9twqX](http://bit.ly/2n9twqX) Imports
###Code
# Imports
###Output
_____no_output_____
###Markdown
Data Loading
###Code
# Load the data
###Output
_____no_output_____
###Markdown
Statistical Analysis
###Code
# Compute summary statistics
###Output
_____no_output_____
###Markdown
Visualization
###Code
# Create a visualization to show PER vs. Salary
###Output
_____no_output_____
###Markdown
NBA Player Statistics ChallengeAnalytics is about more than just numbers! At ICX, we take a holistic view of analysis, which includes gathering and interacting with data, performing statistical analyses, creating visualizations, and telling stories. For this challenge, you'll get to show off your end-to-end analytics chops!Given a dataset of NBA players performance and salary in 2014, use Python to load the dataset and compute summary statistics, then determine the relationship between player efficiency and create a custom visualization using Python to illustrate that story.The data can be found at [http://bit.ly/2n9twqX](http://bit.ly/2n9twqX) Imports
###Code
# Imports
###Output
_____no_output_____
###Markdown
Data Loading
###Code
# Load the data
# Print the first few rows
###Output
_____no_output_____
###Markdown
Statistical Analysis
###Code
# Compute summary statistics
###Output
_____no_output_____
###Markdown
Visualization
###Code
# Create a visualization to show PER vs. Salary
###Output
_____no_output_____ |
04-Pandas-Exercises/03-Ecommerce Purchases Exercise .ipynb | ###Markdown
Ecommerce Purchases ExerciseIn this Exercise you will be given some Fake Data about some purchases done through Amazon! Just go ahead and follow the directions and try your best to answer the questions and complete the tasks. Feel free to reference the solutions. Most of the tasks can be solved in different ways. For the most part, the questions get progressively harder.Please excuse anything that doesn't make "Real-World" sense in the dataframe, all the data is fake and made-up.Also note that all of these questions can be answered with one line of code.____**Import pandas and read in the Ecommerce Purchases csv file and set it to a DataFrame called ecom.**
###Code
import pandas as pd
ecom = pd.read_csv('Ecommerce Purchases')
###Output
_____no_output_____
###Markdown
**Check the head of the DataFrame.**
###Code
ecom.head()
###Output
_____no_output_____
###Markdown
**How many rows and columns are there?**
###Code
ecom.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 10000 entries, 0 to 9999
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Address 10000 non-null object
1 Lot 10000 non-null object
2 AM or PM 10000 non-null object
3 Browser Info 10000 non-null object
4 Company 10000 non-null object
5 Credit Card 10000 non-null int64
6 CC Exp Date 10000 non-null object
7 CC Security Code 10000 non-null int64
8 CC Provider 10000 non-null object
9 Email 10000 non-null object
10 Job 10000 non-null object
11 IP Address 10000 non-null object
12 Language 10000 non-null object
13 Purchase Price 10000 non-null float64
dtypes: float64(1), int64(2), object(11)
memory usage: 1.1+ MB
###Markdown
**What is the average Purchase Price?**
###Code
ecom['Purchase Price'].mean()
###Output
_____no_output_____
###Markdown
**What were the highest and lowest purchase prices?**
###Code
ecom['Purchase Price'].max()
ecom['Purchase Price'].min()
###Output
_____no_output_____
###Markdown
**How many people have English 'en' as their Language of choice on the website?**
###Code
def en_string(title):
if 'en' in title.lower():
return True
else:
return False
ecom['Language'].apply(lambda x: en_string(x)).sum()
ecom[ecom['Language']=='en'].count()
###Output
_____no_output_____
###Markdown
**How many people have the job title of "Lawyer" ?**
###Code
ecom[ecom['Job']=='Lawyer'].count()
ecom[ecom['Job']=='Lawyer'].info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 30 entries, 470 to 9979
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Address 30 non-null object
1 Lot 30 non-null object
2 AM or PM 30 non-null object
3 Browser Info 30 non-null object
4 Company 30 non-null object
5 Credit Card 30 non-null int64
6 CC Exp Date 30 non-null object
7 CC Security Code 30 non-null int64
8 CC Provider 30 non-null object
9 Email 30 non-null object
10 Job 30 non-null object
11 IP Address 30 non-null object
12 Language 30 non-null object
13 Purchase Price 30 non-null float64
dtypes: float64(1), int64(2), object(11)
memory usage: 3.5+ KB
###Markdown
**How many people made the purchase during the AM and how many people made the purchase during PM ?****(Hint: Check out [value_counts()](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.value_counts.html) )**
###Code
ecom['AM or PM'].value_counts()
###Output
_____no_output_____
###Markdown
**What are the 5 most common Job Titles?**
###Code
ecom['Job'].value_counts().head(5)
###Output
_____no_output_____
###Markdown
**Someone made a purchase that came from Lot: "90 WT" , what was the Purchase Price for this transaction?**
###Code
ecom[ecom['Lot']=='90 WT']['Purchase Price']
###Output
_____no_output_____
###Markdown
**What is the email of the person with the following Credit Card Number: 4926535242672853**
###Code
ecom[ecom['Credit Card']== 4926535242672853]['Email']
###Output
_____no_output_____
###Markdown
**How many people have American Express as their Credit Card Provider *and* made a purchase above $95 ?**
###Code
ecom[(ecom['CC Provider']=='American Express') & (ecom['Purchase Price']>95)].count()
###Output
_____no_output_____
###Markdown
**``Hard: How many people have a credit card that expires in 2025?``**
###Code
sum(ecom['CC Exp Date'].apply(lambda x: x[3:])=='25')
###Output
_____no_output_____
###Markdown
**Hard: What are the top 5 most popular email providers/hosts (e.g. gmail.com, yahoo.com, etc...)**
###Code
ecom['Email'].apply(lambda x: x.split('@')[1]).value_counts().head(5)
import re
def findall_yes(x):
n = re.findall('\w+@(\w+.com)',x)
if n:
return n[0]
ecom['Email'].apply(lambda x: findall_yes(x)).value_counts().head(5)
import re
def find(x):
n = re.search('\w+.com',x) # return object
if n:
return n.group(0)
ecom['Email'].apply(lambda x: find(x)).value_counts().head(5)
###Output
_____no_output_____
###Markdown
___ ___ Ecommerce Purchases ExerciseIn this Exercise you will be given some Fake Data about some purchases done through Amazon! Just go ahead and follow the directions and try your best to answer the questions and complete the tasks. Feel free to reference the solutions. Most of the tasks can be solved in different ways. For the most part, the questions get progressively harder.Please excuse anything that doesn't make "Real-World" sense in the dataframe, all the data is fake and made-up.Also note that all of these questions can be answered with one line of code.____** Import pandas and read in the Ecommerce Purchases csv file and set it to a DataFrame called ecom. **
###Code
import pandas as pd
ecom = pd.read_csv('Ecommerce Purchases')
ecom.head()
###Output
_____no_output_____
###Markdown
**Check the head of the DataFrame.**
###Code
len(ecom.columns)
len(ecom.index)
ecom.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 10000 entries, 0 to 9999
Data columns (total 14 columns):
Address 10000 non-null object
Lot 10000 non-null object
AM or PM 10000 non-null object
Browser Info 10000 non-null object
Company 10000 non-null object
Credit Card 10000 non-null int64
CC Exp Date 10000 non-null object
CC Security Code 10000 non-null int64
CC Provider 10000 non-null object
Email 10000 non-null object
Job 10000 non-null object
IP Address 10000 non-null object
Language 10000 non-null object
Purchase Price 10000 non-null float64
dtypes: float64(1), int64(2), object(11)
memory usage: 1.1+ MB
###Markdown
** How many rows and columns are there? ** ** What is the average Purchase Price? **
###Code
ecom['Purchase Price'].mean()
###Output
_____no_output_____
###Markdown
** What were the highest and lowest purchase prices? **
###Code
ecom['Purchase Price'].max()
ecom['Purchase Price'].min()
###Output
_____no_output_____
###Markdown
** How many people have English 'en' as their Language of choice on the website? **
###Code
sum(ecom['Language'] == 'en')
ecom[ecom['Language'] == 'en'].count()
###Output
_____no_output_____
###Markdown
** How many people have the job title of "Lawyer" ? **
###Code
# ecom[ecom['Job'] == 'Lawyer'].info()
# ecom[ecom['Job'] == 'Lawyer'].count()
len(ecom[ecom['Job'] == 'Lawyer'].index)
###Output
_____no_output_____
###Markdown
** How many people made the purchase during the AM and how many people made the purchase during PM ? ****(Hint: Check out [value_counts()](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.value_counts.html) ) **
###Code
ecom['AM or PM'].unique()
ecom['AM or PM'].value_counts()
###Output
_____no_output_____
###Markdown
** What are the 5 most common Job Titles? **
###Code
ecom['Job'].value_counts().head(5)
###Output
_____no_output_____
###Markdown
** Someone made a purchase that came from Lot: "90 WT" , what was the Purchase Price for this transaction? **
###Code
ecom[ecom['Lot']=='90 WT']['Purchase Price']
###Output
_____no_output_____
###Markdown
** What is the email of the person with the following Credit Card Number: 4926535242672853 **
###Code
ecom[ecom['Credit Card']== 4926535242672853]['Email']
###Output
_____no_output_____
###Markdown
** How many people have American Express as their Credit Card Provider *and* made a purchase above $95 ?**
###Code
# ecom[(ecom['CC Provider']=='American Express')& (ecom['Purchase Price']>95)].count()
# ecom[(ecom['CC Provider']=='American Express')& (ecom['Purchase Price']>95)].info()
len(ecom[(ecom['CC Provider']=='American Express')& (ecom['Purchase Price']>95)].index)
###Output
_____no_output_____
###Markdown
** Hard: How many people have a credit card that expires in 2025? **
###Code
# ecom['CC Exp Date'].iloc[0][3:]
# sum(ecom['CC Exp Date'].apply(lambda exp: exp[3:]== '25'
ecom[ecom['CC Exp Date'].apply(lambda exp: exp[3:]== '25')].count()
###Output
_____no_output_____
###Markdown
** Hard: What are the top 5 most popular email providers/hosts (e.g. gmail.com, yahoo.com, etc...) **
###Code
example_email = ecom['Email'].iloc[0]
example_email.split('@')[1]
ecom['Email'].apply(lambda email: email.split('@')[1]).value_counts().head(5)
###Output
_____no_output_____
###Markdown
___ ___ Ecommerce Purchases ExerciseIn this Exercise you will be given some Fake Data about some purchases done through Amazon! Just go ahead and follow the directions and try your best to answer the questions and complete the tasks. Feel free to reference the solutions. Most of the tasks can be solved in different ways. For the most part, the questions get progressively harder.Please excuse anything that doesn't make "Real-World" sense in the dataframe, all the data is fake and made-up.Also note that all of these questions can be answered with one line of code.____** Import pandas and read in the Ecommerce Purchases csv file and set it to a DataFrame called ecom. **
###Code
import pandas as pd
ecom = pd.read_csv('Ecommerce Purchases')
###Output
_____no_output_____
###Markdown
**Check the head of the DataFrame.**
###Code
ecom.head(2)
ecom.index
len(ecom.columns)
###Output
_____no_output_____
###Markdown
** How many rows and columns are there? **
###Code
ecom.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 10000 entries, 0 to 9999
Data columns (total 15 columns):
Address 10000 non-null object
Lot 10000 non-null object
AM or PM 10000 non-null object
Browser Info 10000 non-null object
Company 10000 non-null object
Credit Card 10000 non-null int64
CC Exp Date 10000 non-null object
CC Security Code 10000 non-null int64
CC Provider 10000 non-null object
Email 10000 non-null object
Job 10000 non-null object
IP Address 10000 non-null object
Language 10000 non-null object
Purchase Price 10000 non-null float64
Email Providers 10000 non-null object
dtypes: float64(1), int64(2), object(12)
memory usage: 1.1+ MB
###Markdown
** What is the average Purchase Price? **
###Code
ecom['Purchase Price'].mean()
###Output
_____no_output_____
###Markdown
** What were the highest and lowest purchase prices? **
###Code
ecom['Purchase Price'].max()
ecom['Purchase Price'].min()
###Output
_____no_output_____
###Markdown
** How many people have English 'en' as their Language of choice on the website? **
###Code
ecom['Language'].value_counts()['en']
ecom[ecom['Language'] == 'en']['Language'].count()
ecom.sort_values(by='Language')['Language'].value_counts()['en']
ecom[ecom['Language'] == 'en'].count()
###Output
_____no_output_____
###Markdown
** How many people have the job title of "Lawyer" ? **
###Code
ecom['Job'].value_counts()['Lawyer']
len(ecom[ecom['Job'] == 'Lawyer'].index)
ecom[ecom['Job'] == 'Lawyer']['Job'].count()
ecom[ecom['Job'] == 'Lawyer'].info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 30 entries, 470 to 9979
Data columns (total 15 columns):
Address 30 non-null object
Lot 30 non-null object
AM or PM 30 non-null object
Browser Info 30 non-null object
Company 30 non-null object
Credit Card 30 non-null int64
CC Exp Date 30 non-null object
CC Security Code 30 non-null int64
CC Provider 30 non-null object
Email 30 non-null object
Job 30 non-null object
IP Address 30 non-null object
Language 30 non-null object
Purchase Price 30 non-null float64
Email Providers 30 non-null object
dtypes: float64(1), int64(2), object(12)
memory usage: 3.8+ KB
###Markdown
** How many people made the purchase during the AM and how many people made the purchase during PM ? ****(Hint: Check out [value_counts()](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.value_counts.html) ) **
###Code
ecom['AM or PM'].value_counts()
###Output
_____no_output_____
###Markdown
** What are the 5 most common Job Titles? **
###Code
ecom['Job'].value_counts().head()
###Output
_____no_output_____
###Markdown
** Someone made a purchase that came from Lot: "90 WT" , what was the Purchase Price for this transaction? **
###Code
ecom[ecom['Lot'] == '90 WT']['Purchase Price']
###Output
_____no_output_____
###Markdown
** What is the email of the person with the following Credit Card Number: 4926535242672853 **
###Code
ecom[ecom['Credit Card'] == 4926535242672853]['Email']
###Output
_____no_output_____
###Markdown
** How many people have American Express as their Credit Card Provider *and* made a purchase above $95 ?**
###Code
ecom[(ecom['CC Provider'] == "American Express") & (ecom['Purchase Price'] > 95)].count()
###Output
_____no_output_____
###Markdown
** Hard: How many people have a credit card that expires in 2025? **
###Code
def CC_Exp_Check(x):
return 1 if '/25' in x else 0
sum(ecom['CC Exp Date'].apply(lambda x: CC_Exp_Check(x)))
sum(ecom['CC Exp Date'].apply(lambda exp: '/25' in exp))
sum(ecom['CC Exp Date'].apply(lambda exp: exp[3:]=='25'))
ecom[ecom['CC Exp Date'].apply(lambda exp: exp[3:]=='25')].count()['CC Exp Date']
###Output
_____no_output_____
###Markdown
** Hard: What are the top 5 most popular email providers/hosts (e.g. gmail.com, yahoo.com, etc...) **
###Code
ecom['Email'].value_counts().head()
def EmailProvider(x):
return x.split('@')[-1]
EmailProvider('[email protected]')
ecom['Email Providers'] = ecom['Email'].apply(lambda x: EmailProvider(x))
ecom['Email Providers'].value_counts().head()
ecom['Email'].apply(lambda email: email.split('@')[-1]).value_counts().head()
###Output
_____no_output_____ |
Code/Day 19 GMM Examples.ipynb | ###Markdown
Day 19 GMM Examples
###Code
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
###Output
_____no_output_____
###Markdown
1.ๅฐGMM็จไฝๅฏๅบฆไผฐ่ฎก ่ฝ็ถ GMM ้ๅธธ่ขซๅฝ็ฑปไธบ่็ฑป็ฎๆณ๏ผไฝๅฎๆฌ่ดจไธๆฏไธไธชๅฏๅบฆไผฐ่ฎก็ฎๆณ;ไนๅฐฑๆฏ่ฏด๏ผไปๆๆฏ็่งๅบฆ่่๏ผไธไธช GMM ๆๅ็็ปๆๅนถไธๆฏไธไธช่็ฑปๆจกๅ๏ผ่ๆฏๆ่ฟฐๆฐๆฎๅๅธ็็ๆ ๆฆ็ๆจกๅใ
###Code
from sklearn.datasets import make_moons
Xmoon, ymoon = make_moons(200, noise=.05, random_state=0)
plt.scatter(Xmoon[:, 0], Xmoon[:, 1]);
###Output
_____no_output_____
###Markdown
ๅฆๆ็จ GMM ๅฏนๆฐๆฎๆๅๅบไธคไธชๆๅ๏ผ้ฃไนไฝไธบไธไธช่็ฑปๆจกๅ็็ปๆ๏ผๅ
ถๅฎๆฒกไปไน็จ:
###Code
from sklearn.mixture import GaussianMixture
from matplotlib.patches import Ellipse
import warnings
warnings.filterwarnings(action='ignore')
def draw_ellipse(position, covariance, ax=None, **kwargs):
"""็จ็ปๅฎ็ไฝ็ฝฎๅๅๆนๅทฎ็ปไธไธชๆคญๅ"""
ax = ax or plt.gca()
# ๅฐๅๆนๅทฎ่ฝฌๆขๆไธป่ฝด
if covariance.shape == (2, 2):
U, s, Vt = np.linalg.svd(covariance)
angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))
width, height = 2 * np.sqrt(s)
else:
angle = 0
width, height = 2 * np.sqrt(covariance)
# ็ปๅบๆคญๅ
for nsig in range(1, 4):
ax.add_patch(Ellipse(position, nsig * width, nsig * height,
angle, **kwargs))
def plot_gmm(gmm, X, label=True, ax=None):
ax = ax or plt.gca()
labels = gmm.fit(X).predict(X)
if label:
ax.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis', zorder=2)
else:
ax.scatter(X[:, 0], X[:, 1], s=40, zorder=2)
ax.axis('equal')
w_factor = 0.2 / gmm.weights_.max()
for pos, covar, w in zip(gmm.means_, gmm.covariances_, gmm.weights_): #covariances_ --> covars_
draw_ellipse(pos, covar, alpha=w * w_factor)
gmm2 = GaussianMixture(n_components=2, covariance_type='full', random_state=0)
plot_gmm(gmm2, Xmoon)
###Output
_____no_output_____
###Markdown
ไฝๅฆๆ้็จๆดๅค็ๆๅ่ๅฟฝ่ง็ฐๆ ็ญพ๏ผๅฐฑๅฏไปฅๆพๅฐไธไธชๆดๆฅ่ฟ่พๅ
ฅๆฐๆฎ็ๆๅ็ปๆ:
###Code
gmm16 = GaussianMixture(n_components=16, covariance_type='full', random_state=0)
plot_gmm(gmm16, Xmoon, label=False)
###Output
_____no_output_____
###Markdown
่ฟ้้็จ 16 ไธช้ซๆฏๆฒ็บฟ็ๆททๅๅฝขๅผ**ไธๆฏไธบไบๆพๅฐๆฐๆฎ็ๅ้็็ฐ๏ผ่ๆฏไธบไบๅฏน่พๅ
ฅๆฐๆฎ ็ๆปไฝๅๅธๅปบๆจกใ**่ฟๅฐฑๆฏ**ๅๅธๅฝๆฐ็็ๆๆจกๅ**โโGMM ๅฏไปฅไธบๆไปฌ็ๆๆฐ็ใไธ่พๅ
ฅๆฐ ๆฎ็ฑปไผผ็้ๆบๅๅธๅฝๆฐใGMM ๆฏไธ็ง้ๅธธๆนไพฟ็ๅปบๆจกๆนๆณ๏ผๅฏไปฅไธบๆฐๆฎไผฐ่ฎกๅบไปปๆ็ปดๅบฆ็้ๆบๅๅธใ
###Code
Xnew = gmm16.sample(400)[0]
plt.scatter(Xnew[:, 0], Xnew[:, 1]);
###Output
_____no_output_____
###Markdown
้่ฆๅคๅฐๆๅ?ไฝไธบไธ็ง็ๆๆจกๅ๏ผGMM ๆไพไบไธ็ง็กฎๅฎๆฐๆฎ้ๆไผๆๅๆฐ้็ๆนๆณใ็ฑไบ็ๆๆจก ๅๆฌ่บซๅฐฑๆฏๆฐๆฎ้็ๆฆ็ๅๅธ๏ผๅ ๆญคๅฏไปฅๅฉ็จ่ฏฅๆจกๅๆฅ่ฏไผฐๆฐๆฎ็ไผผ็ถไผฐ่ฎก๏ผๅนถๅฉ็จ ไบคๅๆฃ้ช้ฒๆญข่ฟๆๅใ่ฟๆไธไบ็บ ๆญฃ่ฟๆๅ็ๆ ๅๅๆๆนๆณ๏ผไพๅฆ็จ่ตคๆฑ ไฟกๆฏ้ๅๅ(Akaike information criterion๏ผAIC)ใ่ดๅถๆฏไฟกๆฏๅๅ(Bayesian information criterion๏ผBIC)ไธ้ข็จ AIC ๅ BIC ๅๅซไฝไธบๆ็ๆฐๆฎ้็ GMM ๆๅๆฐ้็ๅฝๆฐ:
###Code
n_components = np.arange(1, 21)
models = [GaussianMixture(n, covariance_type='full', random_state=0).fit(Xmoon)
for n in n_components]
plt.plot(n_components, [m.bic(Xmoon) for m in models], label='BIC')
plt.plot(n_components, [m.aic(Xmoon) for m in models], label='AIC')
plt.legend(loc='best')
plt.xlabel('n_components');
###Output
_____no_output_____
###Markdown
AICๅ่ฏๆไปฌ๏ผ้ๆฉ 16 ไธชๆๅๅฏ่ฝๅคชๅค๏ผ8ไธช ~12 ไธชๆๅๅฏ่ฝๆฏๆดๅฅฝ็้ๆฉ, ่BICๅๆจ่ไบไธไธชๆด็ฎๅ็ๆจกๅใ่ฟ้้่ฆๆณจๆ็ๆฏ:ๆๅๆฐ้็้ๆฉๅบฆ้็ๆฏ GMM ไฝไธบไธไธชๅฏๅบฆ่ฏไผฐๅจ็ๆง่ฝ๏ผ่ไธๆฏไฝไธบไธไธช่็ฑป็ฎๆณ็ๆง่ฝใๅปบ่ฎฎ่ฟๆฏๆ GMM ๅฝๆไธไธชๅฏๅบฆ่ฏไผฐๅจ๏ผไป
ๅจ็ฎๅๆฐๆฎ้ไธญๆๅฐๅฎไฝไธบ่็ฑป็ฎๆณไฝฟ็จใ 2.็คบไพ:็จGMM็ๆๆฐ็ๆฐๆฎ
###Code
from sklearn.datasets import load_digits
digits = load_digits()
digits.data.shape
def plot_100_digits(data):
fig, ax = plt.subplots(10, 10, figsize=(4, 4),
subplot_kw=dict(xticks=[], yticks=[]))
fig.subplots_adjust(hspace=0.05, wspace=0.05)
for i, axi in enumerate(ax.flat):
im = axi.imshow(data[i].reshape(8, 8), cmap='binary')
im.set_clim(0, 16)
plot_100_digits(digits.data)
###Output
_____no_output_____
###Markdown
็ฐๅจๆๅคง็บฆ 1800 ไธช 64 ็ปดๅบฆ็ๆฐๅญ๏ผ**ๅฏไปฅๅๅปบไธไธช GMM ๆจกๅๆฅ็ๆๆดๅค็ๆฐๅญ**ใGMM ๅจ่ฟๆ ทไธไธช้ซ็ปด็ฉบ้ดไธญๅฏ่ฝไธๅคชๅฎนๆๆถๆ๏ผๅ ๆญคๅ
ไฝฟ็จไธไธชไธๅฏ้็้็ปด็ฎๆณใๆไปฌๅจ่ฟ้็ดๆฅ็จ PCA๏ผ่ฎฉ PCA ็ฎๆณไฟ็ๆๅฝฑๅๆ ทๆฌๆฐๆฎ 99% ็ๆนๅทฎ:
###Code
from sklearn.decomposition import PCA
pca = PCA(0.99, whiten=True) # ่ฎฉ PCA ้็ปดไฟ็ๆๅฝฑๅๆ ทๆฌๆฐๆฎ 99% ็ๆนๅทฎ
data = pca.fit_transform(digits.data)
data.shape
###Output
_____no_output_____
###Markdown
็ปๆ้ๅฐไบ 41 ็ปด๏ผๅๅไบๆฅ่ฟ 1/3 ็็ปดๅบฆ็ๅๆถ๏ผๅ ไนๆฒกๆไฟกๆฏๆๅคฑใๅๅฏน่ฟไธชๆๅฝฑๆฐๆฎ ไฝฟ็จ AIC๏ผไป่ๅพๅฐ GMM ๆๅๆฐ้็็ฒ็ฅไผฐ่ฎก:
###Code
n_components = np.arange(50, 310, 10)
models = [GaussianMixture(n, covariance_type='full', random_state=0)
for n in n_components]
aics = [model.fit(data).aic(data) for model in models]
plt.plot(n_components, aics);
###Output
_____no_output_____
###Markdown
ๅจๅคง็บฆ 120 ไธชๆๅ็ๆถๅ๏ผAIC ๆฏๆๅฐ็๏ผๅ ๆญคๆไปฌๆ็ฎไฝฟ็จ่ฟไธชๆจกๅโโ็ซๅป็จๅฎๆๅๆฐๆฎ๏ผๅนถไธ็กฎ่ฎคๅฎๅทฒ็ปๆถๆ:
###Code
gmm = GaussianMixture(120, covariance_type='full', random_state=0)
gmm.fit(data)
print(gmm.converged_)
###Output
True
###Markdown
็ฐๅจๅฐฑๅฏไปฅๅจ 41 ็ปดๆๅฝฑ็ฉบ้ดไธญ็ปๅบ่ฟ 100 ไธช็น็็คบไพ๏ผๅฐ GMM ไฝไธบ็ๆๆจกๅ:
###Code
data_new = gmm.sample(100)[0]
data_new.shape
###Output
_____no_output_____
###Markdown
ๆๅ๏ผๅฏไปฅ้่ฟ PCA ๅฏน่ฑก้ๅๆขๆฅๆๅปบๆฐ็ๆฐๅญ:
###Code
digits_new = pca.inverse_transform(data_new)
plot_100_digits(digits_new)
###Output
_____no_output_____ |
30_entornos_virtuales.ipynb | ###Markdown
[](https://www.pythonista.io) Entornos virtuales.Cuando se tiene varios proyectos de desarrollo en Python es muy conveniente separar las bibliotecas que utiliza un proyecto en particular de las bibliotecas principales del sistema, en especial cuando los usuarios no cuentan con permisos de administrador. **NOTA:**En el caso de estar usando la mรกquina virtual de Pythonistaยฎ, el sistema ya se encuentra utilizando un entorno virtual localizado en ```/home/oi/pythonista/```. En el caso de estar usando Anaconda, รฉsta genera su propio entorno virtual mediante ```conda```. Directorios de busqueda de bibliotecas de mรณdulos.Como se discutiรณ previamente, Python define las rutas en las cuales puede acceder a las diveras bibliotecas de mรณdulos, las cuales pueden ser consultadas y/o modificadas mediante ```sys.path```.
###Code
from sys import path
path
###Output
_____no_output_____
###Markdown
Los entornos virtuales permiten indicarle a Python que utilice otra ruta por defecto desde la lรญnea de comando o el sรญmbolo del sistema. El paquete ```virtualenv```.Este paquete copia los elementos mรญnimos para conformar una biblioteca de Python en el directorio indicado, incluyendo una versiรณn propia de ```pip``` y ```setuptools```.
###Code
!pip install virtualenv
!mkdir prueba
!virtualenv prueba
###Output
_____no_output_____
###Markdown
Habilitando el entorno virtual.Para habilitar el nuevo entorno virtual, es necesario modificar las variables de entorno desde una terminal. Estas modificaciones sรณlo tendrรกn efecto en la terminal en cuestiรณn. Para Linux y MacOS X:```source /bin/activate``` Para Windows:```\bin\activate```Una vez que el entorno estรก habilitado, todos los paquetes instalados mediante *pip*, se instalarรกn en el directorio del entorno virtual, dejando la biblioteca principal del sistema, intacta. **Ejemplo:**Para habilitar al entorno virtual localizado en [```prueba```](prueba) es necesario ejecutar lo siguiente desde una terminal en Linux desde el directorio en el que se encuentra esta notebook.Para conocer el directorio en el se se encuentra la notebook se utiliza el siguiente comando mรกgico de Jupyter:
###Code
%pwd
###Output
_____no_output_____
###Markdown
[](https://pythonista.mx) Entornos virtuales.Cuando se tiene varios proyectos de desarrollo en Python es muy conveniente separar las bibliotecas que utiliza un proyecto en particular de las bibliotecas principales del sistema, en especial cuando los usuarios no cuentan con permisos de administrador. **NOTA:**En el caso de estar usando la mรกquina virtual de Pythonistaยฎ, el sistema ya se encuentra utilizando un entorno virtual localizado en ```/home/oi/pythonista/```. En el caso de estar usando Anaconda, รฉsta genera su propio entorno virtual mediante ```conda```. Directorios de busqueda de bibliotecas de mรณdulos.Como se discutiรณ previamente, Python define las rutas en las cuales puede acceder a las diveras bibliotecas de mรณdulos, las cuales pueden ser consultadas y/o modificadas mediante ```sys.path```.
###Code
from sys import path
path
###Output
_____no_output_____
###Markdown
Los entornos virtuales permiten indicarle a Python que utilice otra ruta por defecto desde la lรญnea de comando o el sรญmbolo del sistema. El paquete ```virtualenv```.Este paquete copia los elementos mรญnimos para conformar una biblioteca de Python en el directorio indicado, incluyendo una versiรณn propia de ```pip``` y ```setuptools```.
###Code
!pip install virtualenv
!mkdir prueba
!virtualenv prueba
###Output
_____no_output_____
###Markdown
Habilitando el entorno virtual.Para habilitar el nuevo entorno virtual, es necesario modificar las variables de entorno desde una terminal. Estas modificaciones sรณlo tendrรกn efecto en la terminal en cuestiรณn. Para Linux y MacOS X:```source /bin/activate``` Para Windows:```\bin\activate```Una vez que el entorno estรก habilitado, todos los paquetes instalados mediante *pip*, se instalarรกn en el directorio del entorno virtual, dejando la biblioteca principal del sistema, intacta. **Ejemplo:**Para habilitar al entorno virtual localizado en [```prueba```](prueba) es ncesario ejecutar lo siguiente desde una terminal en Linux desde el directorio en el que se encuentra esta notebook.Para conocer el directorio en el se se encuentra la notebook se utiliza el siguiente comando mรกgico de Jupyter:
###Code
%pwd
###Output
_____no_output_____ |
source/measuring_activity_space.ipynb | ###Markdown
Activity Space With the Point GeoDataFrame, we can measure activity space by building geometric shapes using minimum bounding box methods. There are several ways to build minimum bounding box, including buffer, convex hull, circle, envelope, etc., each with pros and cons depending on the geographic distribution of the Point GeoDataFrame. Currently, we support buffer and convex hull methods in building minimum bounding box and calculating corresponding activity space. The following examples demonstrate how to implement buffer- and convex hull-based activity space. We will use the example data we used in the last section as an example to illustrate how to calculate buffer- and convex hull-based activity space. You can refer to [here](https://github.com/shuai-zhou/gps2space/blob/master/notebooks/createdata.ipynb) for how we compile the data. We first need to import libraries we will be using for the examples.
###Code
%matplotlib inline
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
We then load the data and create spatial data using the `df_to_gdf` method as we did in the last section.
###Code
df = pd.read_csv('../data/example.csv')
df.head()
###Output
_____no_output_____
###Markdown
There are two persons, P1 and P2, and their locations along with timestamp.
###Code
from gps2space import geodf
gdf = geodf.df_to_gdf(df, x='longitude', y='latitude')
gdf.head()
pa = gpd.read_file('../data/pacounty.shp')
ax = pa.boundary.plot(figsize=(12, 12), edgecolor='black', linewidth=0.6)
gdf.plot(ax=ax, column='pid')
plt.show();
###Output
_____no_output_____
###Markdown
The figure shows the distribution of our data with two different colors representing P1 and P2, respectively. Buffer-based activity space We import the `space` module. The `space` module has a function `buffer_space` which takes four parameters:- gdf: This is your GeoDataFrame- dist: This is the buffer distance, the default value is 0 meter- dissolve: This is the level of aggregating from which you aggregate points to form polygon, the default value is week- proj: This is the EPSG identifier you want to use to project your spatial data, the default value is 2163**Please note:** Buffer distance and your projection are related. For raw Lat/Long coordinate pairs (often called unprojected data), the unit is degree. It is not usual to buffer geometry in degrees. You have to decide which projection system is most appropriate for your own data based on the geographical location. For example, **EPSG:2163** is commonly used in the United States, and the unit of distance is meter. See [here](https://epsg.io/) for more information about EPSG identifier.In this example, we will calculate activity space on a weekly basis. Before that, we need to create a column represents `week` from the timestamp. We will also create `year` and `month` just to show how you can obtain those information from timestamp. It is better to include `infer_datetime_format=True` because this will make `datetime` function much faster, especially when dealing with big data.
###Code
gdf['timestamp'] = pd.to_datetime(gdf['timestamp'], infer_datetime_format=True)
gdf['year'] = gdf['timestamp'].dt.year
gdf['month'] = gdf['timestamp'].dt.month
gdf['week'] = gdf['timestamp'].dt.week
gdf.head()
###Output
_____no_output_____
###Markdown
Now that we have the week column, we can calculate buffer-based activity space on a weekly basis using the `buffer_space` function and pass your choice to the four parameters we mentioned before. In this example, we will use 100 meters as buffer distance and project our data in **EPSG:2163**. **Please also note** that Pandas use the [ISO week date system](https://en.wikipedia.org/wiki/ISO_week_date) to determine week of a specific date. Sometimes the results is not intuitive, for example:`print(pd.Timestamp('01-01-2017 12:00:00').week)`This will give a result of week number 52, rather than week number 1. This is not necessary wrong, see [discussion](https://stackoverflow.com/questions/44372048/python-pandas-timestamp-week-returns-52-for-first-day-of-year/44372130) here. You can work around this issue (if it is a issue to you) by using `DataOffset` (see [discussion](https://stackoverflow.com/questions/53175035/issues-in-getting-week-numbers-with-weeks-starting-on-sunday-in-python) here) like the following (although we do not recommend doing this):`gdf['week_new'] = (gdf['timestamp'] + pd.DateOffset(days=1)).dt.week`
###Code
from gps2space import space
buff_space = space.buffer_space(gdf, dist=100, dissolve='week', proj=2163)
buff_space.head()
###Output
_____no_output_____
###Markdown
We can double-check what is the unit in **EPSG:2163** projection system:
###Code
buff_space.crs.axis_info[0].unit_name
###Output
_____no_output_____
###Markdown
The result is "metre". Accordingly, the `buff_area` column represents the buffer-based activity space measured in square meters on a weekly basis. You probably noticed that this example did not separate P1 and P2 in calculating activity space. Currently, the `dissolve` parameter only accept one string, not a list of strings. To get activity space for each person on a weekly basis is easy, all you need to do is to concatenate `pid` and `week`, then dissolve by the newly created column.
###Code
gdf['person_week'] = gdf['pid'].astype(str) + '_' + gdf['week'].astype(str)
buff_space_person_week = space.buffer_space(gdf, dist=100, dissolve='person_week', proj=2163)
buff_space_person_week[['person_week','geometry','year','month','week','buff_area']].head()
###Output
_____no_output_____
###Markdown
Now you get the activity space for each person on a weekly basis. Likewise, you can easily get each person's activity space on a yearly basis by simply concatenating `pid` and `year`, or activity space on a monthly basis by simply concatenating `pid` and `month`You can select the columns you are interested and save the GeoDataFrame to a spatial dataset or non-spatial dataset. Here, we save the GeoDataFrame to a `shp` file and a `csv` file.
###Code
buff_space_person_week[['person_week','buff_area','geometry']].to_file('../data/buffer_space.shp')
buff_space_person_week[['person_week','buff_area']].to_csv('../data/buffer_space.csv')
###Output
_____no_output_____
###Markdown
Convex hull-based space We can also calculate the convex hull-based activity space using the `convex_space` function. The `convex_space` takes three parameters:- gdf: This is your GeoDataFrame- group: This is the level of aggregating from which you group points to form polygon, the default value is week- proj: This is the EPSG identifier you want to use to project your spatial data, the default value is 2163In this example, we will dissolve the points by `person_week`
###Code
convex_space = space.convex_space(gdf, group='person_week', proj=2163)
convex_space.head()
###Output
_____no_output_____
###Markdown
The `convx_area` column represents the convex hull-based activity space measured in square meters in **EPSG:2163**. This dataset is not perfect, we see that in Week 11 for P1, there is only one point, therefore a Point shape is constructed and 0 is returned for its area. Similarly, in Week 12 for P1, a line shape is constructed rather than a Polygon and 0 is returned for its area. You can also save this GeoDataFrame to a spatial dataset or a non-spatial dataset just like what we have done for the buffer-based activity space measure. Which method to choose There are pros and cons of the buffer- and convex hull-based measure of activity space. Knowing how they work will help you understand the process and choose the appropriate method.The following figure shows the buffer-based activity space (in red color) and convex hull-based activity space (in blue color) from a set of Lat/Long coordinate pairs (in black color). In essence, what buffer-based activity space does is: first, draw a circle around every point using specified buffer distance, then dissolve all the buffers into a single feature to form a Polygon. What convex hull-based activity space does is to line up the outermost points and form the polygon.  The pro of buffer-based activity space is that it works with even only one point where the activity space is eventually the area of the circle. However, you have to specify the buffer distance which sometimes is arbitrary and varies across disciplines.The convex hull-based activity space does not need any arbitrary parameter. However, if there are two or three points that can line up, it is impossible to form an enclosing shape, and the returned activity space will be 0. In addition, if there are extreme points that are beyond the point cluster, the convex hull-based activity space will be inflated.The choice of methods depends on which one makes more sense for you research questions and which one is widely acceptable in your field.Other than the buffer- and convex hull-based minimum bounding box, there are other methods, for example, circle, envelope, concave, etc. We may (or may not) include those methods in our package later on. Shared space Activity space can act as the building box for compiling shared space to indicate interactions by overlaying the activity space at different scales. In the following example, we provide a workflow for building shared space from activity space between P1 and P2 on a weekly basis. We use the buffer-based activity space and redo it with buffer distance of 1000 meter to get more overlapping areas for P1 and P2.
###Code
gdf.head()
buff1000m = space.buffer_space(gdf, dist=1000, dissolve='person_week', proj=2163)
buff1000m.head()
buff1000m['idx'] = buff1000m.index + 1
buff1000m.set_index('idx', inplace=True)
share_space_list = []
for idx, row in buff1000m.iterrows():
main_poly = buff1000m.iloc[idx-1: idx, :]
# print('\n' + 'Main polygon to be overlaid:', main_poly['person_week'].values.tolist()[:])
other_poly_list = buff1000m.index.tolist()
other_poly_list.remove(idx)
other_poly = buff1000m[buff1000m.index.isin(other_poly_list)]
# print('Polygons to be overlaid:', other_poly['person_week'].values.tolist()[:])
share_space = gpd.overlay(main_poly, other_poly, how='intersection')
share_space['share_space'] = share_space['geometry'].area
share_space_list.append(share_space)
df = pd.concat(share_space_list)
df.shape
df[['person_week_1','person_week_2','week_1','week_2','share_space']]
###Output
_____no_output_____ |
I Resolving Pytho with Data Science/#01. Data Tables & Basic Concepts of Programming/01session.ipynb | ###Markdown
01. Data Tables & Basic Concepts of Programming - Book + Private Lessons [Here โ](https://sotastica.com/reservar)- Subscribe to my [Blog โ](https://blog.pythonassembly.com/)- Let's keep in touch on [LinkedIn โ](www.linkedin.com/in/jsulopz) ๐ Define a Variable > Asign an `object` (numbers, text) to a `variable`.
###Code
x = "Hola"
x
x = Hola
x
pandas
pandas = 'hola'
pandas
import pandas
###Output
_____no_output_____
###Markdown
The Registry (_aka The Environment_) > Place where Python goes to **recognise what we type**.
###Code
type(89)
type(3.4)
type('342')
type([])
type(())
type({})
###Output
_____no_output_____
###Markdown
Use of Functions Predefined Functions in Python (_Built-in_ Functions) > https://docs.python.org/3/library/functions.html
###Code
x = "Hola mundo"
len(x)
###Output
_____no_output_____
###Markdown
Discipline to Search Solutions in Google > Apply the following steps when **looking for solutions in Google**:>> 1. **Necesity**: How to load an Excel in Python?> 2. **Search in Google**: by keywords> - `load excel python`> - ~~how to load excel in python~~> 3. **Solution**: What's the `function()` that loads an Excel in Python?> - A Function to Programming is what the Atom to Phisics.> - Every time you want to do something in programming> - **You will need a `function()`** to make it> - Theferore, you must **detect parenthesis `()`**> - Out of all the words that you see in a website> - Because they indicate the presence of a `function()`. External Functions > Download [this Excel](https://github.com/sotastica/data/raw/main/internet_usage_spain.xlsx).> Apply the above discipline and make it happen ๐> I want to see the table, c'mon ๐
###Code
read_excel()
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import pandas as pepa
pepa.read_excel
read_excel()
pandas.read_excel
pandas.read_excel()
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit
pandas.read_excel(io = 'internet_usage_spain.xlsx')
pandas.read_excel(io = internet_usage_spain.xlsx, sheet_name=1)
pandas.read_exce(io = internet_usage_spain.xlsx, sheet_name=1)
'internet_usage_spain.xlsx'
pandas.read_excel(io = 'internet_usage_spain.xlsx', sheet_name=1)
type(pandas.read_excel(io = 'internet_usage_spain.xlsx', sheet_name=1))
###Output
_____no_output_____
###Markdown
The Elements of Programming > - `Library`: where the code of functions are stored.> - `Function`: execute several lines of code with one `word()`.> - `Parameter`: to **configure** the function's behaviour.> - `Object`: **data structure** to store information. Code Syntax **What happens inside the computer when we run the code?**> In which order Python reads the line of code?> - From left to right.> - From up to down.> Which elements are being used in the previous line of code?
###Code
model
model.altura
model.normalize
'internet_usage_spain.xlsx'
'hola'
x= 'hola'
x
1
###Output
_____no_output_____
###Markdown
1. `library`2. `.` *DOT NOTATION** para entrar dentro del objeto/libreria3. `function()`4. pasando `objetos` a los `parametros`5. ejecutamos6. magia borras7. la funcion nos devuelve un `objeto`
###Code
def juana_camina():
a = pandas.read_excel(io = 'internet_usage_spain.xlsx', sheet_name=1)
b = a.age.mean()
return b
juana_camina()
'hola'
type('internet_usage_spain.xlsx')
type(1)
pandas.read_excel(io = 5, sheet_name=1)
pandas.read_excel(io = 'internet_usage_spain.xlsx', sheet_name=1).sum()
pandas.read_excel(io = 'internet_usage_spain.xlsx', sheet_name=1).mean()
pandas.read_excel(io = 'internet_usage_spain.xlsx', sheet_name=1).education.value_counts()
pandas.read_excel(io = 'internet_usage_spain.xlsx', sheet_name=1).education.value_counts().plot(kind='bar')
pandas.read_excel(io = 'internet_usage_spain.xlsx', sheet_name=1).education.value_counts().plot(kind='pie')
pandas.read_excel(io = 'internet_usage_spain.xlsx', sheet_name=1).education.value_counts().plot(kind='pie')
df = pandas.read_excel(io = 'internet_usage_spain.xlsx', sheet_name=1)
###Output
_____no_output_____
###Markdown
hazme la media de todas las columnas
###Code
df.mean()
df
type(df)
df.describe()
###Output
_____no_output_____
###Markdown
Functions inside Objects > - The `dog` makes `guau()`: `dog.guau()`> - The `cat` makes `miau()`: `cat.miau()`> - What could a `DataFrame` make? `object.` + `[tab key]`
###Code
df
df.describe()
type(df)
df
df.values
type(df)
type(df.values)
df.values
df.values.describe()
df.describe().plot(kind='bar')
df.describe().plot(kind='pie', subplots=True)
df1 = df.describe()
df1.plot(kind='bar', )
df.describe().plot(kind='pie')
###Output
_____no_output_____
###Markdown
Conclusion | Types of Functions > 1. Buit-in (Predefined) Functions> 2. External Functions from Libraries> 3. Functions within Objects
###Code
df = pandas.read_excel(io = 'internet_usage_spain.xlsx', sheet_name=1)
df.head()
###Output
_____no_output_____
###Markdown
Accessing `Objects` > Objects are **data structures** that store information. > Which **syntax** do we use to access the information? Dot Notation `.`
###Code
df.describe()
df.age.describe
df.age.describe()
###Output
_____no_output_____
###Markdown
Square Brackets `[]`
###Code
df.age
df[age]
df['age']
df['age'].describe()
###Output
_____no_output_____
###Markdown
Filter & Masking Todas las personas mayores de 80 anos
###Code
df.age()
pepa = df.age > 80
pepa
df[pepa]
###Output
_____no_output_____
###Markdown
Todas las personas que no han estudiado
###Code
mask = df.education == 'No studies'
mask
df[mask]
###Output
_____no_output_____
###Markdown
Todas las personas mayores de 80 anos Todas las personas que no han estudiado
###Code
df[mask & pepa]
###Output
_____no_output_____ |
nbs/01_environment.ipynb | ###Markdown
Environment> Set variables for: username and password; nifi endpoints; nifi ids.
###Code
#export
class Credentials:
user = environ['CLOUDERA_USER']
password = environ['CLOUDERA_PASS']
credentials = (user, password)
#export
class NifiEndpoint:
cluster = environ['CLOUDERA_CLUSTER']
nifi_rest = cluster + environ['CLOUDERA_NIFI_REST']
processors = nifi_rest + "processors/"
connections = nifi_rest + "connections/"
flowfile_queues = nifi_rest + "flowfile-queues/"
#export
class DataFlowIds:
""" Dataflow uuids. Every valid DataFlow must have initial-middle-final
procesors and initial-final connections
"""
def __init__(
self,
pipeline: dict,
) -> None:
self.in_connection = pipeline['in_connection']['Id']
self.in_processor = pipeline['in_processor']['Id']
self.middle_processor = pipeline['middle_processor']['Id']
self.out_connection = pipeline['out_connection']['Id']
self.out_processor = pipeline['out_processor']['Id']
#hide
from nbdev.export import notebook2script; notebook2script()
###Output
Converted 01_environment.ipynb.
Converted 02_rest.ipynb.
Converted 03_dataflow.ipynb.
Converted 04_source_to_refined.ipynb.
Converted 09_tools.ipynb.
Converted index.ipynb.
|
archive/aug-2020-numpy/numpy_attempt.ipynb | ###Markdown
Define constants
###Code
r = 0.5
rs = 0.9
z_range = 10
n = 4000
inc = z_range / n
galton_r = 0.511
galton_rs = 0.902
###Output
_____no_output_____
###Markdown
Create the population
###Code
%%time
# Parent Generation
px = np.linspace(-z_range/2, z_range/2, n)
py = np.exp(-px**2/2) / (2*np.pi)**0.5
parea = np.trapz(py, px)
psd = np.sqrt(sum(py*px**2) / sum(py))
%%time
# Individual Offspring distributions
oxs = rs*px + r*px.reshape(-1, 1)
oys = py * py.reshape(-1, 1)
ox = px # remember these are defined to be the same
oys = py * py.reshape(-1, 1)
###Output
_____no_output_____
###Markdown
Correlation coefficient
###Code
%%time
# Offspring Generation
oys_interp = np.array([np.interp(ox, oxs[i], oys[i],
left=0, right=0) for i in range(n)])
oy = oys_interp.sum(axis=0)
oarea = np.trapz(oy, ox)
oy = oy * parea / oarea # parea is essential equal to one
osd = np.sqrt(sum(oy*ox**2) / sum(oy))
plt.plot(px, py)
plt.plot(ox, oy);
###Output
_____no_output_____
###Markdown
Example of the edge
###Code
plt.plot(px, oys_interp[0])
plt.plot(oxs[0], oys[0], linestyle='--');
###Output
_____no_output_____
###Markdown
Possibilities* Get expectation of the parent phenotype score as a function of offspring phenotype score.* Get five distributions of the parent phenotypes for each of the five offspring quintiles. (Attributable.)* Do the reverse as above - get five distributions of the offspring phenotypes for each of the five parent quintiles. (Destined.)* Generate the standard deviation over all different pairs of `(r, rs)`. With 2.3 seconds per run, and 3,600 points (r from 0.1 to 1.6 and rs from 0.1 to 1.6, increments of 0.025) that would take 2.3 hours. We could do it in an hour with smaller `n`. Parameter space
###Code
def get_sd(r, rs):
# Individual Offspring distributions
oxs = rs*px + r*px.reshape(-1, 1)
# Offspring Generation
oys_interp = np.array([np.interp(ox, oxs[i], oys[i],
left=0, right=0) for i in range(n)])
oy = oys_interp.sum(axis=0)
oarea = np.trapz(oy, ox)
oy = oy * parea / oarea # parea is essential equal to one
osd = np.sqrt(sum(oy*ox**2) / sum(oy))
return osd
n_grid = 64
r_space = np.linspace(0.025, 1.6, n_grid)
rv, rsv = np.meshgrid(r_space, r_space)
sdv = np.zeros((n_grid, n_grid))
for i in range(n_grid):
for j in range(n_grid):
print(i, j)
sdv[i, j] = get_sd(rv[i, j], rsv[i, j])
r = 0.5
np.sqrt(1-r**2)
get_sd(r, np.sqrt(1-r**2))
get_sd(r, 0.91)
df = pd.read_csv('r_columns_rs_rows.csv', index_col=0)
df.columns = r_space
sdv = df.to_numpy()
dec = 3
np.array_equal(np.round(sdv, decimals=dec).T, np.round(sdv, decimals=dec))
dec = 4
np.round(sdv, decimals=dec)[
np.where((np.round(sdv, decimals=dec).T != np.round(sdv, decimals=dec)))]
rv[np.where((np.round(sdv, decimals=dec).T != np.round(sdv, decimals=dec)))]
sdv
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(rv, rsv, sdv, cmap='viridis',
linewidth=0, antialiased=False)
ax.set_xlabel('$r$')
ax.set_ylabel('$r_s$')
ax.set_zlabel('SD ratio')
plt.show()
plt.style.use('default')
fig = plt.figure(figsize=(10, 6))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(rv, rsv, abs(sdv - 1), cmap='viridis',
linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.set_xlabel('$r$', fontsize=1)
ax.set_ylabel('$r_s$', fontsize=1)
ax.set_zlabel('SD Diff.', fontsize=1)
plt.tight_layout()
plt.savefig('parameters-surface-unsmoothed.png', dpi=300)
plt.show()
###Output
_____no_output_____
###Markdown
Add smoothing
###Code
from scipy.interpolate import interp2d
f = interp2d(rv, rsv, sdv, kind='cubic')
r_space2 = np.linspace(0.025, 1.6, 10*n_grid)
sdv2 = f(r_space2, r_space2)
rv2, rsv2 = np.meshgrid(r_space2, r_space2)
fig = plt.figure(figsize=(10, 6))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(rv2, rsv2, abs(sdv2 - 1), cmap='viridis',
linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.set_xlabel('$r$', fontsize=12)
ax.set_ylabel('$r_s$', fontsize=12)
ax.set_zlabel('SD Diff.', fontsize=12)
plt.tight_layout()
plt.savefig('parameters-surface-cubic_smoothing.png', dpi=300)
plt.show()
###Output
_____no_output_____
###Markdown
Make circle of radius 1
###Code
x_circle = np.arange(0.025, 1, 0.0005)
y_circle = np.sqrt(1 - x_circle**2)
###Output
_____no_output_____
###Markdown
Colormesh
###Code
fig = plt.figure()
ax = fig.gca()
ax.pcolormesh(rv, rsv, abs(sdv - 1),
linewidth=0, antialiased=False)
plt.scatter(galton_r, galton_rs, color='r', s=20,
label='Measured Parameters\nin Galton Height Data')
plt.plot(x_circle, y_circle, label='Circle of Radius One', linestyle=':')
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.gca().set_aspect('equal', adjustable='box')
ax.set_xlabel('$r$')
ax.set_ylabel('$r_s$')
plt.legend()
plt.tight_layout()
plt.savefig('parameters-colormesh-unsmoothed.png', dpi=300)
###Output
_____no_output_____
###Markdown
Add smoothing
###Code
fig = plt.figure()
ax = fig.gca()
ax.pcolormesh(rv2, rsv2, abs(sdv2 - 1),
linewidth=0, antialiased=False)
plt.scatter(galton_r, galton_rs, color='r', s=20,
label='Measured Parameters\nin Galton Height Data')
plt.plot(x_circle, y_circle, label='Circle of Radius One', linestyle=':')
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.gca().set_aspect('equal', adjustable='box')
ax.set_xlabel('$r$')
ax.set_ylabel('$r_s$')
plt.legend()
plt.tight_layout()
plt.savefig('parameters-colormesh-cubic_smoothing.png', dpi=300)
plt.show()
###Output
_____no_output_____
###Markdown
Get minimum path directly
###Code
sdva = abs(sdv-1)
sdva2 = abs(sdv2-1)
plt.imshow(sdva)
np.argmin(sdva, axis=0)
np.argmin(sdva, axis=1)
###Output
_____no_output_____
###Markdown
Compare minimum path to circle of radius one
###Code
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
ax1.plot(x_circle, y_circle, label='Circle of Radius One', linestyle='-')
ax2.plot(x_circle, y_circle, label='Circle of Radius One', linestyle='-')
ax1.scatter(rv[np.argmin(sdva, axis=1)][0,:][:40],
rsv[np.argmin(sdva, axis=0)][:,0][:40], alpha=0.4, linewidth=1.5, c='green',
label='Model Simulation')
ax2.plot(rv2[np.argmin(sdva2, axis=1)][0,:][:397],
rsv2[np.argmin(sdva2, axis=0)][:,0][:397],
linewidth=5, alpha=0.4, c='green',
label='Model Simulation\nwith Cubic Smoothing')
ax1.set_aspect('equal', adjustable='box')
ax2.set_aspect('equal', adjustable='box')
ax1.set(xlabel='$r$', ylabel='$r_s$')
ax2.set(xlabel='$r$', ylabel='$r_s$')
ax1.legend()
ax2.legend()
ax1.grid(alpha=0.4)
ax2.grid(alpha=0.4)
# plt.tight_layout()
plt.savefig('parameters-compare_to_circle.png', dpi=300)
plt.show()
rv[np.argmin(sdva, axis=1)][0,:][:40]
rsv[np.argmin(sdva, axis=0)][:,0][:40]
(np.argmin(abs(sdv - 1), axis=1) == np.argmin(abs(sdv - 1), axis=0)).all()
a = np.array([[4, 6, 7],
[1, 5, 2],
[2, 2, 0]])
np.argmin(a, axis=0)
np.argmin(a, axis=1)
a[np.argmin(a, axis=0), np.argmin(a, axis=1)]
###Output
_____no_output_____
###Markdown
Amazing result! $\sigma_2 = \sigma_1 \, \sqrt{r^2 + r_s^2}$ $z_{g} = r \, z_{p}$ $\sigma_g = r_s \, \sigma_1$ * $r$ is the regression coefficient between the expectation of an individual offspring distribution ($g$) and its mid-parental phenotype. Note that $z$ is relative to the parental generation in both cases. * $r_s$ is the ratio of the standard deviation of the individual offspring distribution ($g$) to that of the standard deviation of the parental (1st) generation.Together, these two constants have been shown through computer simulation to determine the standard deviation of the offspring (2nd) generation with remarkable simplicity. The mid-parental values generally have a smaller spread than the parental generation as a whole because of regression to the mean in mating selection. This model does not take into account mating selection or assumes it is the same from one generation unto the next. Through a simple linear transformation, the mid-parental values can stretch so as to have the same standard deviation as the parental generation or the offspring generation values can be shrunk by the same factor. This does not affect the correlation coefficient between mid-parental phenotypes and the expectation of their individual offspring distribution. This model uses field-measured parameter values in which the correlation coefficient between the parental generation and expected parameter values will equal the regression coefficient ($r$) (as in the model). However, for certain parameter values, the offspring generation will not have approximately the same standard deviation as the parent generation (as in the field-measurements) - which means the regression coefficient may no longer equal the correlation coefficient.
###Code
fig = plt.figure(figsize=(10, 6))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(rv, rsv, abs(np.sqrt(rv**2 + rsv**2)-1), cmap='viridis',
linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.set_xlabel('$r$', fontsize=12)
ax.set_ylabel('$r_s$', fontsize=12)
ax.set_zlabel('SD Diff.', fontsize=12)
plt.title('$ |{1 - \sqrt{r^2 + r_s^2}}|$')
plt.tight_layout()
# plt.savefig('parameters-surface-cubic_smoothing.png', dpi=300)
plt.show()
fig = plt.figure()
ax = fig.gca()
ax.pcolormesh(rv, rsv, abs(np.sqrt(rv**2 + rsv**2) - 1),
linewidth=0, antialiased=False)
plt.scatter(galton_r, galton_rs, color='r', s=20,
label='Measured Parameters\nin Galton Height Data')
plt.plot(x_circle, y_circle, label='Circle of Radius One', linestyle=':')
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.gca().set_aspect('equal', adjustable='box')
ax.set_xlabel('$r$')
ax.set_ylabel('$r_s$')
plt.title('$ |{1 - \sqrt{r^2 + r_s^2}}|$')
plt.legend()
plt.tight_layout();
np.sqrt(rv**2 + rsv**2).shape
(sdv - np.sqrt(rv**2 + rsv**2)).mean()
(sdv - np.sqrt(rv**2 + rsv**2)).std()
###Output
_____no_output_____
###Markdown
Symmetry of $r$ and $r_s$
###Code
get_sd(r=0.1, rs=1)
get_sd(r=1, rs=0.1)
get_sd(r=0.2, rs=0.4)
get_sd(r=0.4, rs=0.2)
get_sd(r=0.5, rs=0.6)
get_sd(rs=0.5, r=0.6)
r1, r2 = 0.5, 0.6
a1 = np.sort((r1*px + r2*px.reshape(-1, 1)).ravel())
a2 = np.sort((r2*px + r1*px.reshape(-1, 1)).ravel())
(a1 == a2).all()
(r1*px + r2*px.reshape(-1, 1)).T
((r2*px + r1*px.reshape(-1, 1)) == (r1*px + r2*px.reshape(-1, 1)).T).all()
a = np.array([1, 2, 5])
a + a.reshape(-1, 1)
px
0.8*px.reshape(-1, 1)[0] + 0.1*px
plt.plot(0.8*px.reshape(-1, 1)[0] + 0.1*px)
plt.plot(0.1*px.reshape(-1, 1)[0] + 0.8*px)
0.1*px.reshape(-1, 1)[0] + 0.8*px
0.1*px.reshape(-1, 1)[0] + 0.8*px
px
plt.plot((0.1*px.reshape(-1, 1)[0] + 0.8*px) - 0.8*px.reshape(-1, 1)[0] + 0.1*px)
0.1*px + 0.8*px.reshape(-1, 1)
0.8*px + 0.1*px.reshape(-1, 1)
###Output
_____no_output_____
###Markdown
Miscellaneous plots
###Code
plt.plot(px, py)
for i in range(len(oxs)):
plt.plot(oxs[i], oys[i])
plt.xlim(-4, 4)
plt.plot(px, oys_interp[0])
plt.plot(oxs[0], oys[0], linestyle='--')
###Output
_____no_output_____
###Markdown
Object oriented - perhaps later
###Code
class Population:
def __init__(self, r, rs, z_range, n):
# Parent generation x and y
self.px = np.linspace(-z_range / 2, z_range/2, n)
self.py = np.exp(-px**2/2) / (2*np.pi)**0.5
# Individual Offspring dists. x and y
self.oxs = rs*px + r*px.reshape(-1, 1)
self.oys = py * py.reshape(-1, 1)
gen = Population(r, rs, z_range, n)
###Output
_____no_output_____ |
15_PDEs/.ipynb_checkpoints/15_PDEs-1-checkpoint.ipynb | ###Markdown
15 Partial Differential Equations โ 1 Solving Laplace's or Poisson's equation**Poisson's equation** for the electric potential $\Phi(\mathbf{r})$ and the charge density $\rho(\mathbf{r})$:$$\nabla^2 \Phi(x, y, z) = -4\pi\rho(x, y, z)\\$$ For a region of space without charges ($\rho = 0$) this reduces to **Laplace's equation**$$\nabla^2 \Phi(x, y, z) = 0$$ Solutions depend on the **boundary conditions**: * the *value of the potential* on the *boundary* or * the *electric field* (i.e. the derivative of the potential, $\mathbf{E} = -\nabla\Phi$ *normal to the surface* ($\mathbf{n}\cdot\mathbf{E}$), which directly follows from the charge distribution). Example: 2D Laplace equation$$\frac{\partial^2 \Phi(x,y)}{\partial x^2} + \frac{\partial^2 \Phi(x,y)}{\partial y^2} = 0$$("elliptic PDE") Boundary conditions:* square area surrounded by wires* three wires at ground (0 V), one wire at 100 V Finite difference algorithm for Poisson's equationDiscretize space on a lattice (2D) and solve for $\Phi$ on each lattice site. Taylor-expansion of the four neighbors of $\Phi(x, y)$:\begin{align}\Phi(x \pm \Delta x, y) &= \Phi(x, y) \pm \Phi_x \Delta x + \frac{1}{2} \Phi_{xx} \Delta x^2 + \dots\\\Phi(x, y \pm \Delta y) &= \Phi(x, y) \pm \Phi_y \Delta x + \frac{1}{2} \Phi_{yy} \Delta x^2 + \dots\\\end{align} Add equations in pairs: odd terms cancel, and **central difference approximation** for 2nd order partial derivatives (to $\mathcal{O}(\Delta^4)$):\begin{align}\Phi_{xx}(x,y) = \frac{\partial^2 \Phi}{\partial x^2} & \approx \frac{\Phi(x+\Delta x,y) + \Phi(x-\Delta x,y) - 2\Phi(x,y)}{\Delta x^2} \\\Phi_{yy}(x,y) = \frac{\partial^2 \Phi}{\partial y^2} &\approx \frac{\Phi(x,y+\Delta y) + \Phi(x,y-\Delta y) - 2\Phi(x,y)}{\Delta y^2}\end{align} Take $x$ and $y$ grids of equal spacing $\Delta$: Discretized Poisson equation$$\begin{split}\Phi(x+\Delta x,y) + \Phi(x-\Delta x,y) +\Phi(x,y+\Delta y) &+ \\ +\, \Phi(x,y-\Delta y) - 4\Phi(x,y) &= -4\pi\rho(x,y)\,\Delta^2 \end{split}$$ Defines a system of $N_x \times N_y$ simultaneous algebraic equations for $\Phi_{ij}$ to be solved. Can be solved directly via matrix approaches (and then is the best solution) but can be unwieldy for large grids. Alternatively: **iterative solution**:$$\begin{split}4\Phi(x,y) &= \Phi(x+\Delta x,y) + \Phi(x-\Delta x,y) +\\ &+ \Phi(x,y+\Delta y) + \Phi(x,y-\Delta y) + 4\pi\rho(x,y)\,\Delta^2\end{split}$$ Or written for lattice sites $(i, j)$ where $$x = x_0 + i\Delta\quad\text{and}\quad y = y_0 + j\Delta, \quad 0 \leq i,j < N_\text{max}$$ $$\Phi_{i,j} = \frac{1}{4}\Big(\Phi_{i+1,j} + \Phi_{i-1,j} + \Phi_{i,j+1} + \Phi_{i,j-1}\Big) + \pi\rho_{i,j} \Delta^2$$ * Converged solution at $(i, j)$ will be the average potential from the four neighbor sites + charge density contribution.* *Not a direct solution*: iterate and hope for convergence. Jacobi methodDo not change $\Phi_{i,j}$ until a complete sweep has been completed. Gauss-Seidel methodImmediately use updated new values for $\Phi_{i-1, j}$ and $\Phi_{i, j-1}$ (if starting from $\Phi_{1, 1}$).Leads to *accelerated convergence* and therefore *less round-off error* (but distorts symmetry of boundary conditions... hopefully irrelevant when converged but check!) Solution via relaxation (Gauss-Seidel) Solve the box-wire problem on a lattice.Note: $\rho=0$ inside the box. Note for Jupyter notebook use:* For interactive 3D plots, select ``` %matplotlib notebook ```* For standard inline figures (e.g. for exporting the notebook to LaTeX/PDF or html) use ``` %matplotlib inline ```
###Code
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#%matplotlib inline
%matplotlib notebook
%matplotlib inline
###Output
_____no_output_____
###Markdown
Wire on a box: Solution of Laplace's equation with the Gauss-Seidel algorithm
###Code
Nmax = 100
Max_iter = 70
Phi = np.zeros((Nmax, Nmax), dtype=np.float64)
# initialize boundaries
# everything starts out zero so nothing special for the grounded wires
Phi[:, 0] = 100 # wire at y=0 at 100 V
for n_iter in range(Max_iter):
for xi in range(1, Nmax-1):
for yj in range(1, Nmax-1):
Phi[xi, yj] = 0.25*(Phi[xi+1, yj] + Phi[xi-1, yj]
+ Phi[xi, yj+1] + Phi[xi, yj-1])
###Output
_____no_output_____
###Markdown
Visualization of the potential
###Code
# plot Phi(x,y)
x = np.arange(Nmax)
y = np.arange(Nmax)
X, Y = np.meshgrid(x, y)
Z = Phi[X, Y]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, Y, Z, rstride=2, cstride=2)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel(r'potential $\Phi$ (V)')
ax.view_init(elev=40, azim=20)
###Output
_____no_output_____
###Markdown
Nicer plot (use this code for other projects):
###Code
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X, Y, Z, cmap=plt.cm.coolwarm, rstride=2, cstride=2, alpha=0.3)
cset = ax.contourf(X, Y, Z, zdir='z', offset=-50, cmap=plt.cm.coolwarm)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel(r'potential $\Phi$ (V)')
ax.set_zlim(-50, 100)
ax.view_init(elev=40, azim=20)
cb = fig.colorbar(surf, shrink=0.5, aspect=5)
cb.set_label(r"potential $\Phi$ (V)")
###Output
_____no_output_____ |
demo_script.ipynb | ###Markdown
###Code
### installing dependencies
! pip install matplotlib
! pip install altair
! pip install pandas
! pip install numpy
# unzipping the demo dataset file
! unzip demo_dataset_30K_individuals.zip
# changing working directory
%cd demo_dataset_30K_individuals
########################################################################
##################### CONFIGURABLE VARIABLES #########################
########################################################################
regenerate_graphs = True
load_graphs_in_RAM = True
store_graphs_folder_name = 'SAMPLE_POPULATION'
individual_sample_file_name = 'AS_individual_30K'
########################################################################
########## INDEXING FOR DIFFERENT ITEMS for easy separation ###########
########################################################################
import time
whole_note_book_start_time = time.time()
index_start = {}
index_start['PID'] = 0
index_start['NODE'] = 10000000
index_start['HHID'] = 20000000
index_start['BUSLINE_TRIPNUM'] = 30000000
index_start['MRTLINE_TRIPNUM'] = 40000000
index_start['WORK_NODE'] = 50000000
index_start['EDUCATION_NODE'] = 60000000
index_start['SHOPPING_NODE'] = 70000000
index_start['OTHER_NODE'] = 80000000
####################################################
########## PREPROCESSING NODE IDS SECTION #########
####################################################
node_lat_lon = {}
node_dict = {}
index = index_start['NODE']
with open('AS_node_latlon.csv') as f:
for row in f:
listed = row.strip().split(',')
node_lat_lon[int(listed[0])] = [float(listed[1]), float(listed[2])]
if int(listed[0]) not in node_dict:
node_dict[int(listed[0])] = index
index += 1
voronoi_area = {}
with open('AS_node_area_using_grids.csv') as f:
for row in f:
listed = row.strip().split(',')
voronoi_area[int(listed[0])] = float(listed[1])
###############################################################################
########## PREPROCESSING SECTION FOR CREATING SOME ENCODING OF IDs #############
###############################################################################
import csv
import random
hhidlist = []
hhid = {}
with open(individual_sample_file_name) as f:
next(f)
for row in f:
listed = row.strip().split(',')
hhidlist.append(int(listed[1])) # this index is 3 in case of AI
pid = listed[0] + "-1"
hhid[pid] = int(listed[1]) # this index is 3 in case of AI
hhidlist = list(set(hhidlist))
hhidlist = set(hhidlist)
pid_list = []
with open(individual_sample_file_name) as f:
next(f)
with open('ind_small','w') as f2:
csvwriter = csv.writer(f2)
for row in f:
listed = row.strip().split(',')
pid = listed[0] + "-1"
csvwriter.writerow(listed)
pid_list.append(pid)
pid_list = list(set(pid_list))
pid_list = set(pid_list)
pidDict = {}
pid_list = list(pid_list)
reverse_pidDict = {}
for i in range(len(pid_list)):
pidDict[pid_list[i]] = i
reverse_pidDict[i] = pid_list[i]
modes_share = {}
with open('AS_BC_das.csv') as f:
with open('das_file','w') as f2:
csvwriter = csv.writer(f2)
for row in f:
listed = row.strip().split(',')
pid = listed[0]
if pid in pidDict:
csvwriter.writerow(listed)
with open('traveltime.csv') as f:
with open('ttfile','w') as f2:
csvwriter = csv.writer(f2)
for row in f:
listed = row.strip().split(',')
pid = listed[0]
if pid in pidDict:
csvwriter.writerow(listed)
node_wise_A = {}
for node in node_dict:
if node in voronoi_area:
node_wise_A[node_dict[node]] = voronoi_area[node]
else:
node_wise_A[node_dict[node]] = sum(voronoi_area.values())/len(voronoi_area)
hhid_unique_ids = []
with open('ind_small') as f:
for row in f:
listed = row.strip().split(',')
hhid_unique_ids.append(int(listed[1]))
hhid_unique_ids = list(set(hhid_unique_ids))
hhidDict = {}
reverse_hhidDict = {}
for i in range(len(hhid_unique_ids)):
hhidDict[hhid_unique_ids[i]] = i + index_start['HHID']
reverse_hhidDict[i + index_start['HHID']] = hhid_unique_ids[i]
age = {}
hhid = {}
with open('ind_small') as f:
for row in f:
listed = row.strip().split(',')
pid = listed[0]+"-1"
age[pidDict[pid]] = int(listed[9])
hhid[pidDict[pid]] = hhidDict[int(listed[1])]
hhid_pid_list = {}
for pid in hhid:
if hhid[pid] not in hhid_pid_list:
hhid_pid_list[hhid[pid]] = [pid]
else:
hhid_pid_list[hhid[pid]].append(pid)
###################################################################################
########## STORING INDIVIDUAL MAPS FOR REPEATABILITY OF GRAPH ###################
###################################################################################
import pickle
with open(store_graphs_folder_name+'/running_statevectors/hhid.pickle', 'wb') as handle:
pickle.dump(hhid, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(store_graphs_folder_name+'/running_statevectors/age.pickle', 'wb') as handle:
pickle.dump(age, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(store_graphs_folder_name+'/running_statevectors/pidDict.pickle', 'wb') as handle:
pickle.dump(pidDict, handle, protocol=pickle.HIGHEST_PROTOCOL)
##############################################################
############### GENERATE HOME LOCATION GRAPH #################
##############################################################
import csv
import time
import pickle
import numpy as np
globalTime = time.time()
if regenerate_graphs:
hopsize = 1
count = 0
for sttt in range(0,hopsize):
forwardDict = {}
backwardDict = {}
for pid in hhid:
if count % 10000 == 0:
print (count, 'PIDs processed')
count += 1
for time_slots in range(sttt,288,hopsize):
if time_slots not in forwardDict:
forwardDict[time_slots] = {}
forwardDict[time_slots][pid] = hhid[pid]
if time_slots not in backwardDict:
backwardDict[time_slots] = {}
if hhid[pid] in backwardDict[time_slots]:
backwardDict[time_slots][hhid[pid]].append(pid)
else:
backwardDict[time_slots][hhid[pid]] = [pid]
for time_slots in range(sttt,288,hopsize) :
d = {}
d['forward'] = forwardDict[time_slots]
d['backward'] = backwardDict[time_slots]
with open(store_graphs_folder_name+'/HOME_dicts/home_dict_'+str(time_slots)+'.pickle', 'wb') as handle:
pickle.dump(d, handle, protocol=pickle.HIGHEST_PROTOCOL)
print ("Graphs saved ", (sttt,288,hopsize))
###########################################
########## PRE-PROCESSING SECTION #########
###########################################
import csv
import time
import matplotlib.pyplot as plt
import pickle
import numpy as np
import gc
# Filename: Activity_schedule
"""
create a list of pids persons from traveltime.csv
We also create two dictionaries;
(1) PID->(node->[start_time, end_time])
(2) PID->(lat_lon) for home location and plotting purposes (For this we take help of node-> lat_lon table )
"""
das_mem = []
with open('das_file') as f:
for row in f:
listed = row.strip().split(',')
stop_node = int(listed[5])
stop_zone = int(listed[6])
prev_stop_node = int(listed[11])
prev_stop_zone = int(listed[12])
## nodes with missing data
if stop_node == 0 or stop_node == 8165 or prev_stop_node == 0 or prev_stop_node == 8165:
continue
das_mem.append(row.strip().split(','))
###########################################
########## GENERATING ACTIVITY GRAPHS ####
###########################################
countMissing = 0
notMissing = 0
debug = []
index = 0
numpy_das = []
import csv
import time
import matplotlib.pyplot as plt
import pickle
import numpy as np
import gc
globalTime = time.time()
if regenerate_graphs:
hopsize = 6
for sttt in range(0,hopsize):
forwardDict = {}
backwardDict = {}
count = 1
relevant_ts = set(range(sttt,288,hopsize))
for listed in das_mem:
count += 1
if count % 100000 == 0:
print (count, "lines_processed in ",time.time()-globalTime )
listed = list(listed)
# listed = row.strip().split(',')
stop_type = listed[4]
if stop_type == 'Home':
continue # this is taken care of by the HHID
stop_node = int(listed[5])
node_seq = node_dict[stop_node]
if stop_type == 'Work':
node_seq += index_start['WORK_NODE']
if stop_type == 'Education':
node_seq += index_start['EDUCATION_NODE']
if stop_type == 'Shop':
node_seq += index_start['SHOPPING_NODE']
if stop_type == 'Other':
node_seq += index_start['OTHER_NODE']
pid = listed[0]
stop_arrival_time = float(listed[9])
stop_departure_time = float(listed[10])
start_5_min_slot = int((int(stop_arrival_time) * 60 + (stop_arrival_time - int(stop_arrival_time) -0.25 )*60) // 5 ) # % (1440//5)
end_5_min_slot = int((int(stop_departure_time) * 60 + (stop_departure_time - int(stop_departure_time) -0.25 )*60) // 5 ) # % (1440//5)
if end_5_min_slot >= (1440//5):
end_5_min_slot = 1440//5 -1
activity_dur = (end_5_min_slot - start_5_min_slot)
listed[9] = start_5_min_slot # + round(6*np.random.rand())
listed[10] = end_5_min_slot # + round(6*np.random.rand())
if listed[10] == listed[9]:
listed[10] += 1
if listed[10] >= (1440//5):
listed[10] = 1440//5 -1
if listed[9] >= (1440//5):
listed[9] = 1440//5 -1
for time_slots in range(listed[9] , listed[10] ):
if time_slots not in relevant_ts:
continue
if time_slots not in forwardDict:
forwardDict[time_slots] = {}
forwardDict[time_slots][pidDict[pid]] = node_seq # cantor(time_slots, ) # str(time_slots)+'_dummy_'+str(stop_node)
if time_slots not in backwardDict:
backwardDict[time_slots] = {}
if node_seq in backwardDict[time_slots]:
backwardDict[time_slots][node_seq].append(pidDict[pid])
else:
backwardDict[time_slots][node_seq] = [pidDict[pid]]
for time_slots in range(sttt,288,hopsize) :
if time_slots not in forwardDict:
forwardDict[time_slots] = {}
if time_slots not in backwardDict:
backwardDict[time_slots] = {}
d = {}
d['forward'] = forwardDict[time_slots]
d['backward'] = backwardDict[time_slots]
with open(store_graphs_folder_name+'/ACT_dicts/act_dict_'+str(time_slots)+'.pickle', 'wb') as handle:
pickle.dump(d, handle, protocol=pickle.HIGHEST_PROTOCOL)
print ("Graphs saved ", (sttt,288,hopsize))
###Output
_____no_output_____
###Markdown

###Code
########################################################
########## PREPROCESSING SECTION for TRANSIT #########
########################################################
import numpy as np
import csv
import time
"""
Table: journey_time
Column name Data Type Description
0 pt_stop_id character (50) PT stop ID
1 pt_line_id character (50) PT line ID
2 trip_id integer Run number ID of that PT line
3 pc_occ double precision Percentage of occupancy of that vehicle at that stop
4 sequence_id integer Sequence ID of the stop in the route of the PT line
5 arrival_time time without time zone Arrival time of the vehicle at that stop
6 waiting_time time without time zone Waiting/dwell time of vehicle at that stop
"""
# ! gunzip journeytime.csv.gz
# ! head -n 1000 journeytime.csv
# creating a map with
# key: (<bus_line>, <stop id>, <time_slot>)
# value: (<bus_line>, <trip_id_of_bus>)
M_JT = {} # Map from journeytime.csv file
a = []
with open('journeytime.csv') as f:
for row in f:
listed = row.strip().split(',')
arr_t = (listed[5])
hh,mm,ss = arr_t.split(':')
ss = int(hh) * 3600 + int(mm) * 60 + int(ss)
M_JT[(listed[1], (listed[0]), int(ss//(300)))] = ( listed[1], int(listed[2])) # 5 minute intervals: 60*5 = 300 seconds
print (len(M_JT.keys()))
#####################################################
########## PREPROCESSING FOR TRANSIT GRAPHS #########
#####################################################
"""
S_keys : a dictionary to map all spatial keys to numbers ;
for travel modes in [ON_MRT, ON_BUS], the spatial identifier is (<Bus/MRT Line>,<>)
"""
import time
startTime = time.time()
S_keys = {}
count_mismatches = 0
count_matches = 0
max_slide = 0
index_bus = index_start['BUSLINE_TRIPNUM']
index_mrt = index_start['MRTLINE_TRIPNUM']
unique_bus_mrt_lines = []
pt_location_wise_A = {}
validLinesCount = 0
ccc = 0
with open('ttfile') as f:
for row in f:
listed = row.strip().split(',')
if listed[7] not in [ 'ON_MRT', 'ON_BUS','WAIT_MRT', 'WAIT_BUS']:
continue
pid = listed[0]
arr_t = (listed[8])
hh,mm,ss = arr_t.split(':')
ss = int(hh) * 3600 + int(mm) * 60 + int(ss)
strt_t = ss - int(listed[9])
arr_five_min_int = ss // 300
start_five_min_int = strt_t // 300
st_key = (listed[10], listed[3], arr_five_min_int)
# print (st_key)
problematicRow = False
if st_key not in M_JT:
tt = st_key[2]
counter = 0
while((st_key[0], st_key[1], tt)) not in M_JT:
tt -= 1
counter += 1
if counter > 1:
problematicRow = True
break
if max_slide < st_key[2] - tt :
max_slide = st_key[2] - tt
st_key = (st_key[0], st_key[1], tt)
if problematicRow:
continue
else:
validLinesCount += 1
if st_key not in M_JT:
continue
pt_line_trip_id = M_JT[st_key]
if pt_line_trip_id not in S_keys:
if listed[7] in [ 'ON_MRT', 'WAIT_MRT']: # , 'WAIT_BUS']:'ON_BUS'
S_keys[pt_line_trip_id] = index_mrt
index_mrt += 1
if listed[7] in [ 'ON_BUS', 'WAIT_BUS']: # , 'WAIT_BUS']:'ON_BUS'
S_keys[pt_line_trip_id] = index_bus
index_bus += 1
# if listed[7] in ['ON_BUS', 'WAIT_BUS']:
# pt_location_wise_A[index] = ( 1/4 * 11.95 * 2.55 ) # taken from https://www.dimensions.guide/element/city-transit-buses
# if listed[7] == ['ON_MRT', 'WAIT_MRT']:
# pt_location_wise_A[index] = (1/5 * 54.91) # average of the values present at https://en.wikipedia.org/wiki/MBTA_subway
unique_bus_mrt_lines.append(listed[10])
print (" Maximum slide: possibility of errors! ", max_slide)
print (" Time taken to read through traveltime.csv file ", time.time() - startTime)
print (" Number of (bus/MRT_line, trip_number) combo ", len(S_keys))
print (" Number of unique MRT/Bus lines ", len(set(unique_bus_mrt_lines)))
print ("Number of valid lines processed ", validLinesCount)
##############################################
########## GENERATING TRANSIT GRAPHS #########
##############################################
"""
Creating ST graph for PT
"""
if regenerate_graphs:
starTime = time.time()
for sttt in range(0,12):
forwardDict = {}
backwardDict = {}
with open('ttfile') as f:
for row in f:
listed = row.strip().split(',')
if listed[7] not in [ 'ON_MRT', 'ON_BUS', 'WAIT_MRT', 'WAIT_BUS']:
continue
pid = listed[0]
arr_t = (listed[8])
hh,mm,ss = arr_t.split(':')
ss = int(hh) * 3600 + int(mm) * 60 + int(ss)
strt_t = ss - int(listed[9])
arr_five_min_int = ss // 300
start_five_min_int = strt_t // 300
st_key = (listed[10], listed[3], arr_five_min_int)
# print (st_key)
problematicRow = False
if st_key not in M_JT:
tt = st_key[2]
counter = 0
while((st_key[0], st_key[1], tt)) not in M_JT:
tt -= 1
counter += 1
if counter > 1:
problematicRow = True
break
if max_slide < st_key[2] - tt :
max_slide = st_key[2] - tt
st_key = (st_key[0], st_key[1], tt)
if problematicRow:
continue
else:
validLinesCount += 1
if st_key not in M_JT:
continue
pt_line_trip_id = M_JT[st_key]
for time_slots in range(start_five_min_int , arr_five_min_int + 1): # +1 because it can make a difference in PT
if time_slots not in range(sttt,288,12):
continue
if time_slots not in forwardDict:
forwardDict[time_slots] = {}
forwardDict[time_slots][pidDict[pid]] = S_keys[pt_line_trip_id] # str(time_slots)+'_dummy_'+str(stop_node)
if time_slots not in backwardDict:
backwardDict[time_slots] = {}
if S_keys[pt_line_trip_id] in backwardDict[time_slots]:
backwardDict[time_slots][S_keys[pt_line_trip_id]].append(pidDict[pid])
else:
backwardDict[time_slots][S_keys[pt_line_trip_id]] = [pidDict[pid]]
for time_slots in range(sttt,288,12) :
if time_slots not in forwardDict:
forwardDict[time_slots] = {}
if time_slots not in backwardDict:
backwardDict[time_slots] = {}
d = {}
d['forward'] = forwardDict[time_slots]
d['backward'] = backwardDict[time_slots]
with open(store_graphs_folder_name+'/PT_dicts/pt_dict_'+str(time_slots)+'.pickle', 'wb') as handle:
pickle.dump(d, handle, protocol=pickle.HIGHEST_PROTOCOL)
print ("Graphs saved ", range(sttt,288,12))
print ("Time taken to create all PT graphs ", time.time() - startTime)
####################################################################
########## GENERATING UNION OF HOME/ACT and TRANSIT GRAPHS #########
####################################################################
# UNION of act and PT graphs
# Since the resolution of PT graph is higher, In cases of clash, the PT graph gets precedence
from multiprocessing import Pool
import multiprocessing
def process_one_t(t):
with open(store_graphs_folder_name+'/HOME_dicts/home_dict_'+str(t)+'.pickle', 'rb') as handle:
g_act = pickle.load(handle)
# converting g_act["backward"] to set for faster removal
for dummy in g_act["backward"]:
g_act["backward"][dummy] = set(g_act["backward"][dummy])
# print ("HH |V|,|E| = ",len(g_act["forward"]), len(g_act["backward"]))
aa = []
with open(store_graphs_folder_name+'/ACT_dicts/act_dict_'+str(t)+'.pickle', 'rb') as handle:
g_pt = pickle.load(handle)
for dummy in g_pt["backward"]:
aa.append(dummy)
# print ("ACT |V|,|E| = ",len(g_pt["forward"]), len(g_pt["backward"]))
# removing clashes
mark_for_removal = []
for pid in g_act["forward"]:
if pid in g_pt["forward"]:
g_act["backward"][g_act["forward"][pid]].remove(pid)
mark_for_removal.append(pid)
for pid in mark_for_removal:
del g_act["forward"][pid]
# taking union
g_union = g_act
for pid in g_pt["forward"]:
assert( pid not in g_union["forward"])
g_union["forward"][pid] = g_pt["forward"][pid]
for dummy in g_pt["backward"]:
assert( dummy not in g_union["backward"])
g_union["backward"][dummy] = g_pt["backward"][dummy]
# print ("UNION_HHID_ACT |V|,|E| = ",len(g_union["forward"]), len(g_union["backward"]))
####### UNION OF RESULT WITH PT #######
g_act = g_union
for dummy in g_act["backward"]:
g_act["backward"][dummy] = set(g_act["backward"][dummy])
aa = []
with open(store_graphs_folder_name+'/PT_dicts/pt_dict_'+str(t)+'.pickle', 'rb') as handle:
g_pt = pickle.load(handle)
# print ("PT |V|,|E| = ",len(g_pt["forward"]) , len(g_pt["backward"]))
for dummy in g_pt['backward']:
aa.append(dummy)
# removing clashes
mark_for_removal = []
for pid in g_act["forward"]:
if pid in g_pt["forward"]:
g_act["backward"][g_act["forward"][pid]].remove(pid)
mark_for_removal.append(pid)
for pid in mark_for_removal:
del g_act["forward"][pid]
# print ("ACT_REDUCTION |V|,|E| = ",len(g_act["forward"]) + len(g_act["backward"]), len(g_act["forward"]))
# taking union
g_union = g_act
for pid in g_pt["forward"]:
assert( pid not in g_union["forward"])
g_union["forward"][pid] = g_pt["forward"][pid]
for dummy in g_pt["backward"]:
assert( dummy not in g_union["backward"])
g_union["backward"][dummy] = g_pt["backward"][dummy]
# print ("UNION_ALL |V|,|E| = ",len(g_union["forward"]), len(g_union["backward"]))
with open(store_graphs_folder_name+'/UNION_dicts/union_dict_'+str(t)+'.pickle', 'wb') as handle:
pickle.dump(g_union, handle, protocol=pickle.HIGHEST_PROTOCOL)
# print ("UNION completed for time slot ",t)
pool = Pool(1) # Create a multiprocessing Pool
res = pool.map(process_one_t, range(0,288))
################################################################
########## LOADING GRAPHS IN RAM FOR FASTER PROCESSING #########
################################################################
if load_graphs_in_RAM:
import time
import pickle
## saving load time if there is a server
G_loaded = {}
for i in range(288):
time.sleep(0.1)
with open(store_graphs_folder_name+'/UNION_dicts/union_dict_'+str(i)+'.pickle', 'rb') as handle:
G_loaded[i] = pickle.load(handle)
###Output
_____no_output_____
###Markdown
Estimating $\theta ' $ using the equation below$$R_0 = \frac{1}{X}\sum_m^X\sum_n^S ( 1 - e^{-\Theta'\sum_m \tau_{nm}})$$
###Code
#############################################################################################
########## CALIBRATION SECTION(MANUAL TWEAKING OF PARAMETER calibrated_theta_prime) #########
#############################################################################################
###### The only difference from the actual simulation is that here the E->I_a transition is not allowed
calibrated_theta_prime = 10000 # 0.27 actual # 0.385- no PT
est_r_0 = []
# 1: S
# 2: E
# 3: I_s
# 4: I_a
# 5: R
# 6: D
d_D_mu_sigma = {}
with open('age_wise_mu_and_sigma_for_D.csv') as f:
next(f)
for row in f:
listed = row.strip().split(',')
d_D_mu_sigma[int(listed[0])] = {'mu':float(listed[1]), 'sigma':float(listed[2])}
age_map = {0:0, 1:0, 2:1, 3:1, 4:2, 5:2, 6:3, 7:3, 8:4, 9:4, 10:5, 11:5, 12:6, 13:6, 14:7, 15:7, 16:7, 17:7}
for initial_infections in [1000,1000,1000,1000]:
st_dict = {} # keeping track of total stats
stateVector = np.random.rand(len(pidDict),2) * 0
stateVector [:,1] = 0
rho_binary = np.random.rand(len(pidDict),1) * 0 # binary values to decide whether the person is going to go to I_s or not
count_missing_age = 0
count_missing_pt_areas = 0
for i in range(rho_binary.shape[0]):
if i not in age:
print ("Missing PID index in age dict ",i)
continue
age_ = age_map[age[i]]
if age_ > 2:
rho = 0.8
else:
rho = 0.4
r = np.random.rand()
if r < rho:
rho_binary[i,0] = 10000 # marked for progression to I_S
# everyone is susceptible
stateVector[:,:] = [1,0]
n_X = initial_infections
for i in range(len(pidDict)):
if np.random.rand() < initial_infections/len(pidDict):
stateVector[i,:] = [4,0]
startTime = time.time()
nodes_for_plotting = []
backup_states = []
pids_to_be_removed_from_population = set([])
ilist = []
infections_per_node = {}
recoveries_per_node = {}
deaths_per_node = {}
for ii in range(730):
infections_per_node[ii] = {}
recoveries_per_node[ii] = {}
deaths_per_node[ii] = {}
for day_num in range(5) : # days
for t in range(0,288): # 1 day
if not load_graphs_in_RAM:
with open('UNION_dicts/union_dict_'+str(t)+'.pickle', 'rb') as handle:
G = pickle.load(handle)
else:
G = G_loaded[t]
numberOfInfectiousNeigboursTracker = {} ## key:location ## value count
indices_of_infected = (np.where(stateVector[:,0] == 4)[0])
for pid in indices_of_infected:
if pid in pids_to_be_removed_from_population:
continue
if pid not in G['forward']:
continue
if G['forward'][pid] not in numberOfInfectiousNeigboursTracker :
numberOfInfectiousNeigboursTracker[G['forward'][pid]] = 1
else:
numberOfInfectiousNeigboursTracker[G['forward'][pid]] += 1
for dummy in G['backward']:
if dummy not in numberOfInfectiousNeigboursTracker:
continue
if dummy < 20000000 or dummy >= 50000000: ### NODE ACTIVITY
assert(not(dummy>=50000000 and dummy<60000000 )) # should never happen
if dummy >= 60000000 and dummy < 70000000: # WORK
area_of_this_dummy = node_wise_A[dummy - index_start['WORK_NODE']]
elif dummy >= 70000000 and dummy < 80000000: # EDUCATION :
area_of_this_dummy = node_wise_A[dummy - index_start['EDUCATION_NODE']]
elif dummy >= 80000000 and dummy < 90000000: # SHOPPING :
area_of_this_dummy = node_wise_A[dummy - index_start['SHOPPING_NODE']]
elif dummy >= 90000000 and dummy < 100000000: # OTHER :
area_of_this_dummy = node_wise_A[dummy - index_start['OTHER_NODE']]
if area_of_this_dummy > 13000000: # vornoi threshold for area
area_of_this_dummy = 13000000
sigma = area_of_this_dummy * 0.0572 #
sigma_x = sigma
sigma_y = sigma
infectious_ppl = np.random.multivariate_normal([0,0], ([sigma_x,0],[0,sigma_y]), numberOfInfectiousNeigboursTracker[dummy])
p_n = np.random.multivariate_normal([0,0], ([sigma_x,0],[0,sigma_y]), 1)
d = np.sum((p_n - infectious_ppl)**2, axis = 1)
mean_dist = np.mean(d**0.5)
ilist.append(mean_dist)
elif dummy >= 30000000 and dummy < 50000000: ## PT
if dummy < 40000000: # BUS
L = 2.759 * (4**0.333) # (A_bus * 1/4) ** 0.5
else: # MRT
L = 3.314 * (25**0.333)# (A_mrt_coach * 1/5) ** 0.5
mean_dist = L * 0.5014
elif dummy < 30000000 and dummy >= 20000000: ##HOME
mean_dist = 6.5
summation_i_tau = 1/((mean_dist) ** 3) * numberOfInfectiousNeigboursTracker[dummy]
Phi_n_t = 1 - np.exp( - calibrated_theta_prime * summation_i_tau)
if numberOfInfectiousNeigboursTracker[dummy] > 0:
for pid in G['backward'][dummy]:
if pid in pids_to_be_removed_from_population:
continue
if np.random.rand() < Phi_n_t and stateVector[pid,0] == 1:
stateVector[pid,0] = 2
if dummy not in infections_per_node[day_num]:
infections_per_node[day_num][dummy] = 1
else:
infections_per_node[day_num][dummy] += 1
# getting total stats from state vector
st_dict['S',day_num+1] = len(np.where(stateVector[:,0] == 1)[0])
st_dict['E',day_num+1] = len(np.where(stateVector[:,0] == 2)[0])
st_dict['I_s',day_num+1] = len(np.where(stateVector[:,0] == 3)[0])
st_dict['I_a',day_num+1] = len(np.where(stateVector[:,0] == 4)[0])
st_dict['R',day_num+1] = len(np.where(stateVector[:,0] == 5)[0])
st_dict['D',day_num+1] = len(np.where(stateVector[:,0] == 6)[0])
ll = []
for ii in range(len(ilist)):
if ilist[ii] < 200:
ll.append(ilist[ii])
backup_states.append(np.array(stateVector))
for i in range(stateVector.shape[0]):
# I_s -> R or I_a -> R
if stateVector[i,0] == 3 or stateVector[i,0] == 4 :
d_I = np.random.lognormal(1.96, 0.42 ** 0.5, 1) # mu = 1.62; sigma_square = 0.42
Y_n_d = 1 - np.exp(-1/d_I)
if np.random.rand() < Y_n_d:
stateVector[i,0] = 5
hhid_ = hhid[i]
if hhid_ not in recoveries_per_node[day_num]:
recoveries_per_node[day_num][hhid_] = 1
else:
recoveries_per_node[day_num][hhid_] += 1
### actual updates of states
for i in range(stateVector.shape[0]):
# I_s -> D
if stateVector[i,0] == 3 :
age_ = age_map[age[i]]
d_D = np.random.lognormal(d_D_mu_sigma[age_]['mu'], d_D_mu_sigma[age_]['sigma'], 1) # average of 30 days of hospitalisation before death
Mu_n_d = 1 - np.exp(-1/d_D)
if np.random.rand() < Mu_n_d:
stateVector[i,0] = 6
hhid_ = hhid[i]
if hhid_ not in deaths_per_node[day_num]:
deaths_per_node[day_num][hhid_] = 1
else:
deaths_per_node[day_num][hhid_] += 1
# E -> I_a ### NOT ALLOWED DURING CALIBRATING
# for i in range(stateVector.shape[0]):
# if rho_binary[i] != 1: # good news
# if stateVector[i,0] == 2 :
# d_L = np.random.lognormal(1.62, 0.42 ** 0.5, 1) # mu = 1.62; sigma_square = 0.42
# K_n_d = 1 - np.exp(-1/d_L)
# if np.random.rand() < K_n_d:
# stateVector[i,0] = 4
for i in range(stateVector.shape[0]):
# E -> I_s
if rho_binary[i] == 1: # bad news
if stateVector[i,0] == 2 :
d_L = np.random.lognormal(1.62, 0.42 ** 0.5, 1) # mu = 1.62; sigma_square = 0.42
K_n_d = 1 - np.exp(-1/d_L)
if np.random.rand() < K_n_d:
stateVector[i,0] = 3
pids_to_be_removed_from_population.add(i)
print ("S: " , st_dict['S',day_num+1], "E: ", st_dict['E',day_num+1], "I_s: ",st_dict['I_s',day_num+1], "I_a: ", st_dict['I_a',day_num+1], "R: ",st_dict['R',day_num+1], "D: ", st_dict['D',day_num+1])
print ("R_0 at ",day_num+1," days ", st_dict['E',day_num+1]/1000)
###Output
_____no_output_____
###Markdown
The case fatality rate and rate of progression to symptomatic stage are largely related to age. We use the values from the table shown below:Screenshot 2020-05-11 at 1.37.10 PMNode id's starting: *index_start['PID'] = 0*index_start['NODE'] = 1000000*index_start['HHID'] = 2000000*index_start['BUSLINE_TRIPNUM'] = 3000000*index_start['MRTLINE_TRIPNUM'] = 4000000*index_start['WORK_NODE'] = 5000000*index_start['EDUCATION_NODE'] = 6000000*index_start['SHOPPING_NODE'] = 7000000*index_start['OTHER_NODE'] = 8000000
###Code
#############################################################################################
############################## ACTUAL SIMULATION SECTION ###################################
#############################################################################################
###### process and count new infections
import matplotlib.pyplot as plt
est_r_0 = []
import numpy as np
import multiprocessing
# 1: S
# 2: E
# 3: I_s
# 4: I_a
# 5: R
# 6: D
master_start_time = time.time()
d_D_mu_sigma = {}
with open('age_wise_mu_and_sigma_for_D.csv') as f:
next(f)
for row in f:
listed = row.strip().split(',')
d_D_mu_sigma[int(listed[0])] = {'mu':float(listed[1]), 'sigma':float(listed[2])}
age_map = {0:0, 1:0, 2:1, 3:1, 4:2, 5:2, 6:3, 7:3, 8:4, 9:4, 10:5, 11:5, 12:6, 13:6, 14:7, 15:7, 16:7, 17:7}
for initial_infections in [200]:
st_dict = {} # keeping track of total stats
stateVector = np.random.rand(len(pidDict),2) * 0
stateVector [:,1] = 0
rho_binary = np.random.rand(len(pidDict),1) * 0 # binary values to decide whether the person is going to go to I_s or not
count_missing_age = 0
count_missing_pt_areas = 0
for i in range(rho_binary.shape[0]):
if i not in age:
print ("Missing PID index in age dict ",i)
continue
age_ = age_map[age[i]]
if age_ > 2:
rho = 0.8
else:
rho = 0.4
r = np.random.rand()
if r < rho:
rho_binary[i,0] = 1 # marked for progression to I_S
# everyone is susceptible
stateVector[:,:] = [1,0]
n_X = initial_infections
for i in range(len(pidDict)):
if np.random.rand() < initial_infections/len(pidDict):
stateVector[i,:] = [4,0]
startTime = time.time()
nodes_for_plotting = []
calibrated_theta_prime = 10000 # 0.27- actual # 0.4 - NO PT
backup_states = []
pids_to_be_removed_from_population = set([])
ilist = []
infections_per_node = {}
recoveries_per_node = {}
deaths_per_node = {}
for ii in range(730):
infections_per_node[ii] = {}
recoveries_per_node[ii] = {}
deaths_per_node[ii] = {}
for day_num in range(60) : # days
for t in range(0,288): # 1 day
if not load_graphs_in_RAM:
with open('UNION_dicts/union_dict_'+str(t)+'.pickle', 'rb') as handle:
G = pickle.load(handle)
else:
G = G_loaded[t]
numberOfInfectiousNeigboursTracker = {} ## key:location ## value count
indices_of_infected = (np.where(stateVector[:,0] == 4)[0])
for pid in indices_of_infected:
if pid in pids_to_be_removed_from_population:
continue
if pid not in G['forward']:
continue
if G['forward'][pid] not in numberOfInfectiousNeigboursTracker :
numberOfInfectiousNeigboursTracker[G['forward'][pid]] = 1
else:
numberOfInfectiousNeigboursTracker[G['forward'][pid]] += 1
for dummy in G['backward']:
if dummy not in numberOfInfectiousNeigboursTracker:
continue
if dummy < 20000000 or dummy >= 50000000: ### NODE ACTIVITY
assert(not(dummy>=50000000 and dummy<60000000 )) # should never happen
if dummy >= 60000000 and dummy < 70000000: # WORK
area_of_this_dummy = node_wise_A[dummy - index_start['WORK_NODE']]
elif dummy >= 70000000 and dummy < 80000000: # EDUCATION :
area_of_this_dummy = node_wise_A[dummy - index_start['EDUCATION_NODE']]
elif dummy >= 80000000 and dummy < 90000000: # SHOPPING :
area_of_this_dummy = node_wise_A[dummy - index_start['SHOPPING_NODE']]
elif dummy >= 90000000 and dummy < 100000000: # OTHER :
area_of_this_dummy = node_wise_A[dummy - index_start['OTHER_NODE']]
if area_of_this_dummy > 13000000:
area_of_this_dummy = 13000000
sigma = area_of_this_dummy * 0.0572 # 0.00040941176 # 0.048 * 0.005
sigma_x = sigma
sigma_y = sigma
infectious_ppl = np.random.multivariate_normal([0,0], ([sigma_x,0],[0,sigma_y]), numberOfInfectiousNeigboursTracker[dummy])
p_n = np.random.multivariate_normal([0,0], ([sigma_x,0],[0,sigma_y]), 1)
d = np.sum((p_n - infectious_ppl)**2, axis = 1)
mean_dist = np.mean(d**0.5)
ilist.append(mean_dist)
elif dummy >= 30000000 and dummy < 50000000: ## PT
if dummy < 40000000: # BUS
L = 2.759 * (4**0.333) # (A_bus * 1/4) ** 0.5
else: # MRT
L = 3.314 * (25 ** 0.333) # (A_mrt_coach * 1/5) ** 0.5
mean_dist = L * 0.5014
elif dummy < 30000000 and dummy >= 20000000: ##HOME
mean_dist = 6.5
summation_i_tau = 1/((mean_dist) ** 3) * numberOfInfectiousNeigboursTracker[dummy]
Phi_n_t = 1 - np.exp( - calibrated_theta_prime * summation_i_tau)
if numberOfInfectiousNeigboursTracker[dummy] > 0:
for pid in G['backward'][dummy]:
if pid in pids_to_be_removed_from_population:
continue
if np.random.rand() < Phi_n_t and stateVector[pid,0] == 1:
stateVector[pid,0] = 2
if dummy not in infections_per_node[day_num]:
infections_per_node[day_num][dummy] = 1
else:
infections_per_node[day_num][dummy] += 1
# getting total stats from state vector
st_dict['S',day_num+1] = len(np.where(stateVector[:,0] == 1)[0])
st_dict['E',day_num+1] = len(np.where(stateVector[:,0] == 2)[0])
st_dict['I_s',day_num+1] = len(np.where(stateVector[:,0] == 3)[0])
st_dict['I_a',day_num+1] = len(np.where(stateVector[:,0] == 4)[0])
st_dict['R',day_num+1] = len(np.where(stateVector[:,0] == 5)[0])
st_dict['D',day_num+1] = len(np.where(stateVector[:,0] == 6)[0])
backup_states.append(np.array(stateVector))
for i in range(stateVector.shape[0]):
# I_s -> R or I_a -> R
if stateVector[i,0] == 3 or stateVector[i,0] == 4 :
d_I = np.random.lognormal(1.96, 0.42 ** 0.5, 1) # mu = 1.62; sigma_square = 0.42
Y_n_d = 1 - np.exp(-1/d_I)
if np.random.rand() < Y_n_d:
stateVector[i,0] = 5
hhid_ = hhid[i]
if hhid_ not in recoveries_per_node[day_num]:
recoveries_per_node[day_num][hhid_] = 1
else:
recoveries_per_node[day_num][hhid_] += 1
### actual updates of states
for i in range(stateVector.shape[0]):
# I_s -> D
if stateVector[i,0] == 3 :
age_ = age_map[age[i]]
d_D = np.random.lognormal(d_D_mu_sigma[age_]['mu'], d_D_mu_sigma[age_]['sigma'], 1) # average of 30 days of hospitalisation before death
Mu_n_d = 1 - np.exp(-1/d_D)
if np.random.rand() < Mu_n_d:
stateVector[i,0] = 6
hhid_ = hhid[i]
if hhid_ not in deaths_per_node[day_num]:
deaths_per_node[day_num][hhid_] = 1
else:
deaths_per_node[day_num][hhid_] += 1
# E -> I_a
for i in range(stateVector.shape[0]):
if rho_binary[i] != 1: # good news
if stateVector[i,0] == 2 :
d_L = np.random.lognormal(1.62, 0.42 ** 0.5, 1) # mu = 1.62; sigma_square = 0.42
K_n_d = 1 - np.exp(-1/d_L)
if np.random.rand() < K_n_d:
stateVector[i,0] = 4
for i in range(stateVector.shape[0]):
# E -> I_s
if rho_binary[i] == 1: # bad news
if stateVector[i,0] == 2 :
d_L = np.random.lognormal(1.62, 0.42 ** 0.5, 1) # mu = 1.62; sigma_square = 0.42
K_n_d = 1 - np.exp(-1/d_L)
if np.random.rand() < K_n_d:
stateVector[i,0] = 3
pids_to_be_removed_from_population.add(i)
with open(store_graphs_folder_name+'/running_statevectors/stateVector_at_day'+str(day_num)+'.pickle', 'wb') as handle:
pickle.dump(stateVector, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(store_graphs_folder_name+'/running_statevectors/infections_per_node_day'+str(day_num)+'.pickle', 'wb') as handle:
pickle.dump(infections_per_node, handle, protocol=pickle.HIGHEST_PROTOCOL)
print ("Count ``of missing PT area: ", count_missing_pt_areas)
print ("S: " , st_dict['S',day_num+1], "E: ", st_dict['E',day_num+1], "I_s: ",st_dict['I_s',day_num+1], "I_a: ", st_dict['I_a',day_num+1], "R: ",st_dict['R',day_num+1], "D: ", st_dict['D',day_num+1])
print ("At the end of Day ", day_num+1, time.time() - master_start_time, '\n\n\n\n\n\n')
############ PLOTTING OF RESULTS FOLLOWS FROM HERE ##############
import matplotlib as mpl
import pickle
import matplotlib.pyplot as plt
import numpy as np
mpl.rc_file_defaults()
backup_states_loaded = backup_states # modified for demo script; since everything is in RAM
font = {'weight' : 'bold',
'size' : 13}
mpl.rc('font', **font)
S = []
E = []
I = []
R = []
D = []
I_a = []
E_plus_I = []
I_s = []
for i in range(1,60):
stateVector = backup_states_loaded[i-1]
S.append(len(np.where(stateVector[:,0] == 1)[0]))
E.append(len(np.where(stateVector[:,0] == 2)[0]))
I.append(len(np.where(stateVector[:,0] == 3)[0]) + len(np.where(stateVector[:,0] == 4)[0]))
R.append(len(np.where(stateVector[:,0] == 5)[0]))
D.append(len(np.where(stateVector[:,0] == 6)[0]))
I_a.append(len(np.where(stateVector[:,0] == 4)[0]))
I_s.append(len(np.where(stateVector[:,0] == 3)[0]))
E_plus_I.append(I[-1] + E[-1])
plt.plot(range(len(S)), S,'b', label='S')
plt.plot(range(len(E)), E,'#F16913', label='E')
plt.plot(range(len(I)), I,color='red',linestyle='--', label='I = '+r'$ I^S$' + r'+$ I^A$')
plt.plot(range(len(R)), R,'green', label='R')
plt.plot(range(len(D)), D,'#525252', label='D')
plt.plot(range(len(E_plus_I)), E_plus_I,'--',color='#a65628', label='E + I')
plt.plot(range(len(I_a)), I_a,color='#E7298A', label=r'$ I^A$')
plt.plot(range(len(I_s)), I_s,color='#CB181D',label=r'$ I^S$')
plt.yscale('linear')
plt.xlabel("Day", fontsize=20)
plt.ylabel("Number of individuals", fontsize=20)
plt.legend(loc = 'best')
plt.tight_layout()
# plt.grid()
plt.savefig("output_files/overall_state_theta_SEIR.png", dpi=600)
plt.show()
plt.plot(range(len(S)), S,'b', label='S')
plt.plot(range(len(E)), E,'#F16913', label='E')
plt.plot(range(len(I)), I,color='red', linestyle='--',label='I = '+r'$ I^S$' + r'+$ I^A$')
plt.plot(range(len(R)), R,'green', label='R')
plt.plot(range(len(D)), D,color='#525252', label='D')
plt.plot(range(len(E_plus_I)), E_plus_I,'--',color='#a65628', label='E + I')
plt.plot(range(len(I_a)), I_a, color='#E7298A',label=r'$ I^A$')
plt.plot(range(len(I_s)), I_s, color='#CB181D', label=r'$ I^S$')
plt.yscale('log')
# plt.grid()
plt.xlabel("Day", fontsize=20)
plt.ylabel("Number of individuals ", fontsize=20)
plt.legend(loc = 'upper right', fontsize=8)
plt.tight_layout()
plt.savefig("output_files/overall_state_theta_SEIR_log_scale.png", dpi=600)
plt.show()
# plt.plot(range(len(S)), S,'b', label='S')
plt.plot(range(len(E)), E,'#F16913', label='E')
plt.plot(range(len(I)), I,color='red',linestyle='--', label='I = '+r'$ I^S$' + r'+$ I^A$')
# plt.plot(range(len(R)), R,'green', label='R')
plt.plot(range(len(D)), D,color='#525252', label='D')
plt.plot(range(len(E_plus_I)), E_plus_I,'--',color='#a65628', label='E + I')
plt.plot(range(len(I_a)), I_a,color='#E7298A', label=r'$ I^A$')
plt.plot(range(len(I_s)), I_s,color='#CB181D', label=r'$ I^S$')
plt.yscale('linear')
plt.xlabel("Day",fontsize=20)
plt.ylabel("Number of individuals ",fontsize=20)
plt.legend(loc = 'best')
plt.tight_layout()
# plt.grid()
plt.savefig("output_files/overall_state_theta_SEIR_S_R_removed.png", dpi=600)
plt.show()
# plt.plot(range(len(S)), S,'b', label='S')
plt.plot(range(50), E[:50],'#F16913', label='E')
plt.plot(range(50), I[:50],color='red',linestyle='--', label='I = '+r'$I^S$' + r'+$I^A$')
# plt.plot(range(len(R)), R,'green', label='R')
plt.plot(range(50), D[:50],color='#525252', label='D')
plt.plot(range(50), E_plus_I[:50],'--',color='#a65628', label='E + I')
plt.plot(range(50), I_a[:50],color='#E7298A', label=r'$ I^A$')
plt.plot(range(50), I_s[:50],color='#CB181D', label=r'$ I^S$')
# plt.grid()
plt.yscale('linear')
plt.xlabel("Day",fontsize=20)
plt.ylabel("Number of individuals ", fontsize=20)
plt.legend(loc = 'best')
plt.tight_layout()
plt.savefig("output_files/overall_state_theta_SEIR_S_R_removed_zoomed_towards_outbreak.png", dpi=600)
plt.show()
#### R_t
S = []
E = []
I = []
R = []
D = []
I_a = []
E_plus_I = []
I_s = []
r_t = []
Inf_rate = []
Death_rate = []
for i in range(2,60):
stateVector = backup_states_loaded[i-1]
stateVector_prev = backup_states_loaded[i-2]
denominator = (len(np.where(stateVector_prev[:,0] == 4)[0]))
count = 0
for j in range(stateVector.shape[0]):
if stateVector[j][0] == 2 and stateVector_prev[j][0] == 1:
count += 1
numerator = count
if denominator != 0 :
r_t.append(numerator/denominator)
else:
r_t.append(0)
Inf_rate.append(numerator)
new_deaths_today = len(np.where(stateVector[:,0] == 6)[0]) - len(np.where(stateVector_prev[:,0] == 6)[0])
Death_rate.append(new_deaths_today)
# plt.plot(range(len(r_t)), r_t,'--', label='R_t')
r_t_backup = list(r_t)
font = {'weight' : 'bold',
'size' : 13}
mpl.rc('font', **font)
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
r_t_sm = np.convolve(r_t, np.ones((1,))/1, mode='same')
Death_rate_sm = np.convolve(Death_rate, np.ones((1,))/1, mode='same')
Inf_rate_sm = np.convolve(Inf_rate, np.ones((1,))/1, mode='same')
# Death_rate_sm /= (2500000*100000)
# Inf_rate_sm/= (2500000*100000) # if population wise normalisation is enabled. removed for demo version
# days set to 60 for demo version
ax1.plot(range((58)), r_t_sm[:58],'orange', label='R(t)')
ax2.plot(range((58)), Inf_rate_sm[:58], 'red', label = 'Infection rate per')
ax2.plot(range((58)), Death_rate_sm[:58], '#525252', label = 'Death rate per')
ax2.set_yscale('log')
ax1.set_xlabel('Day', fontsize=20)
ax1.set_ylabel(r'$R_t$(linear scale)', color='orange', fontsize=20)
ax2.set_ylabel('Infection rate/Death rate', fontsize=20, color='#525252')
plt.tight_layout()
plt.legend(loc='best', fontsize=10)
# plt.grid()
plt.savefig('output_files/R_t.png',dpi=600)
plt.show()
# ax2.set_ylabel('Y2 data', color='b')
############################# smoothed
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
r_t_sm = np.convolve(r_t, np.ones((5,))/5, mode='same')
Death_rate_sm = np.convolve(Death_rate, np.ones((5,))/5, mode='same')
Inf_rate_sm = np.convolve(Inf_rate, np.ones((5,))/5, mode='same')
# Death_rate_sm /= (2500000*100000)
# Inf_rate_sm/= (2500000*100000)
ax1.plot(range((58)), r_t_sm[:58],'orange', label='R(t)')
ax2.plot(range((58)), Inf_rate_sm[:58], 'red', label = 'Infection rate')
ax2.plot(range((58)), Death_rate_sm[:58], '#525252', label = 'Death rate')
ax2.set_yscale('log')
ax1.set_xlabel('Day', fontsize=20)
ax1.set_ylabel(r'$R_t$(linear scale)', color='orange', fontsize=20)
ax2.set_ylabel('Infection rate/Death rate', fontsize=20, color='#525252')
plt.legend(loc='center right', fontsize=10)
# plt.grid()
plt.tight_layout()
plt.savefig('output_files/R_t smoothed.png',dpi=600)
plt.show()
# ax2.set_ylabel('Y2 data', color='b')
hhid_to_node_lat_lon= {}
c = 0
with open('das_file') as f:
for row in f:
listed = row.strip().split(',')
if listed[4] != 'Home':
continue
stop_node = int(listed[5])
hhid_to_node_lat_lon[hhid[pidDict[(listed[0])]]] = node_lat_lon[stop_node] + [stop_node]
## generating spatial file
from datetime import datetime
from datetime import timedelta
import os
import time
import datetime as dt
def createListOfDates(startDate, endDate): # parameters: startDate, endDate in yyyy-mm-dd format
date1 = startDate
date2 = endDate
start = datetime.strptime(date1, '%Y-%m-%d')
end = datetime.strptime(date2, '%Y-%m-%d')
step = timedelta(days=1)
dateList = []
while start <= end:
dateList.append (str(start.date()))
start += step
return dateList
listOfDates = createListOfDates('2020-03-01','2022-11-10')
### assign a lat lon to every person
pid_to_lat_lon = {}
countMissing = 0
for pid in range(stateVector.shape[0]):
try:
base_lon, base_lat = node_lat_lon[hhid_to_node_lat_lon[hhid[pid]][2]]
except:
countMissing += 1
continue
if np.random.rand() < 0.5:
pid_lon = base_lon + np.random.normal() * 0.001
else:
pid_lon = base_lon - np.random.normal() * 0.001
if np.random.rand() < 0.5:
pid_lat = base_lat + np.random.normal() * 0.001
else:
pid_lat = base_lat - np.random.normal() * 0.001
pid_to_lat_lon[pid] = [(base_lat), (base_lon)]
# pid_to_lat_lon[pid] = [(pid_lat), (pid_lon)]
print ("Count of missing pids ",countMissing)
with open('output_files/node_wise_state.csv','w') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(['SEIR','datetime', 'lat', 'lon'])
for j in range(0,60,7):
spatial_seir = {}
stateVector = backup_states_loaded[j]
missingCount = 0
for i in range(stateVector.shape[0]):
try:
csvwriter.writerow([stateVector[i][0]]+ [listOfDates[j] + ' 00:00'] + pid_to_lat_lon[i])
except:
missingCount += 1
continue
print (missingCount, ' @ day_num = ',j)
n = 60
infections_at_work = [0] * n
infections_at_school = [0] * n
infections_at_shopping = [0] * n
infections_at_other = [0] * n
infections_at_PT = [0] * n
infections_at_BUS = [0] * n
infections_at_MRT = [0] * n
infections_at_Home = [0] * n
for i in range(n):
for dummy in infections_per_node[i]:
if dummy >= 60000000 and dummy < 70000000: # WORK
infections_at_work[i] += (infections_per_node[i][dummy])
if dummy >= 70000000 and dummy < 80000000: # EDUCATION
infections_at_school[i] += (infections_per_node[i][dummy])
if dummy >= 80000000 and dummy < 90000000: # SHOPPING
infections_at_shopping[i] += (infections_per_node[i][dummy])
if dummy >= 90000000 and dummy < 100000000: # OTHER :
infections_at_other[i] += (infections_per_node[i][dummy])
if dummy >= 30000000 and dummy < 50000000: # PT
infections_at_PT[i] += (infections_per_node[i][dummy])
if dummy < 40000000:
infections_at_BUS[i] += (infections_per_node[i][dummy])
else:
infections_at_MRT[i] += (infections_per_node[i][dummy])
if dummy < 30000000 and dummy >= 20000000: ##HOME
infections_at_Home[i] += (infections_per_node[i][dummy])
colormap = {'H':'#1b9e77','W':'#d95f02','E':'#7570b3','S':'#e7298a','O':'#66a61e','B':'#e6ab02','M':'#a6761d','Bus+MRT':'#666666'}
plt.plot(range(len(infections_at_Home)), infections_at_Home, label='Home',c=colormap['H'])
plt.plot(range(len(infections_at_PT)), infections_at_PT, label='Transit',c=colormap['Bus+MRT'])
plt.plot(range(len(infections_at_BUS)), infections_at_BUS, label='Bus',c=colormap['B'])
plt.plot(range(len(infections_at_MRT)), infections_at_MRT, label='MRT',c=colormap['M'])
plt.plot(range(len(infections_at_work)), infections_at_work, label='Work',c=colormap['W'])
plt.plot(range(len(infections_at_school)), infections_at_school, label='Education',c=colormap['E'])
plt.plot(range(len(infections_at_shopping)), infections_at_shopping, label='Shopping',c=colormap['S'])
plt.plot(range(len(infections_at_other)), infections_at_other, label='Other',c=colormap['O'])
plt.yscale('log')
plt.xlabel('Day')
plt.tight_layout()
plt.xlabel('Day', fontsize=20)
plt.savefig('output_files/activity_wise_infections_logscale.png',dpi=600)
plt.show()
plt.plot(range(len(infections_at_Home)), infections_at_Home, label='Home',c=colormap['H'])
plt.plot(range(len(infections_at_PT)), infections_at_PT, label='Transit',c=colormap['Bus+MRT'])
plt.plot(range(len(infections_at_BUS)), infections_at_BUS, label='Bus',c=colormap['B'])
plt.plot(range(len(infections_at_MRT)), infections_at_MRT, label='MRT',c=colormap['M'])
plt.plot(range(len(infections_at_work)), infections_at_work, label='Work',c=colormap['W'])
plt.plot(range(len(infections_at_school)), infections_at_school, label='Education',c=colormap['E'])
plt.plot(range(len(infections_at_shopping)), infections_at_shopping, label='Shopping',c=colormap['S'])
plt.plot(range(len(infections_at_other)), infections_at_other, label='Other',c=colormap['O'])
plt.tight_layout()
plt.xlabel('Day')
plt.xlabel('Day', fontsize=20)
plt.savefig('output_files/activity_wise_infections_linear_scale.png',dpi=600)
plt.show()
infections_at_Home = np.convolve(infections_at_Home, np.ones((5,))/5, mode='same')
infections_at_PT = np.convolve(infections_at_PT, np.ones((5,))/5, mode='same')
infections_at_BUS = np.convolve(infections_at_BUS, np.ones((5,))/5, mode='same')
infections_at_MRT = np.convolve(infections_at_MRT, np.ones((5,))/5, mode='same')
infections_at_work = np.convolve(infections_at_work, np.ones((5,))/5, mode='same')
infections_at_school = np.convolve(infections_at_school, np.ones((5,))/5, mode='same')
infections_at_shopping = np.convolve(infections_at_shopping, np.ones((5,))/5, mode='same')
infections_at_other = np.convolve(infections_at_other, np.ones((5,))/5, mode='same')
plt.plot(range(len(infections_at_Home)), infections_at_Home, label='Home',c=colormap['H'])
plt.plot(range(len(infections_at_PT)), infections_at_PT, label='Transit',c=colormap['Bus+MRT'])
plt.plot(range(len(infections_at_BUS)), infections_at_BUS, label='Bus',c=colormap['B'])
plt.plot(range(len(infections_at_MRT)), infections_at_MRT, label='MRT',c=colormap['M'])
plt.plot(range(len(infections_at_work)), infections_at_work, label='Work',c=colormap['W'])
plt.plot(range(len(infections_at_school)), infections_at_school, label='Education',c=colormap['E'])
plt.plot(range(len(infections_at_shopping)), infections_at_shopping, label='Shopping',c=colormap['S'])
plt.plot(range(len(infections_at_other)), infections_at_other, label='Other',c=colormap['O'])
plt.yscale('log')
plt.legend(fontsize=10)
plt.tight_layout()
plt.xlabel('Day', fontsize=20)
plt.savefig('output_files/activity_wise_infections_logscale_smoothed.png',dpi=600)
plt.show()
plt.plot(range(len(infections_at_Home)), infections_at_Home, label='Home',c=colormap['H'])
plt.plot(range(len(infections_at_PT)), infections_at_PT, label='Transit',c=colormap['Bus+MRT'])
plt.plot(range(len(infections_at_BUS)), infections_at_BUS, label='Bus',c=colormap['B'])
plt.plot(range(len(infections_at_MRT)), infections_at_MRT, label='MRT',c=colormap['M'])
plt.plot(range(len(infections_at_work)), infections_at_work, label='Work',c=colormap['W'])
plt.plot(range(len(infections_at_school)), infections_at_school, label='Education',c=colormap['E'])
plt.plot(range(len(infections_at_shopping)), infections_at_shopping, label='Shopping',c=colormap['S'])
plt.plot(range(len(infections_at_other)), infections_at_other, label='Other',c=colormap['O'])
plt.legend(fontsize=10)
plt.tight_layout()
plt.xlabel('Day', fontsize=20)
plt.savefig('output_files/activity_wise_infections_linear_scale_smoothed.png',dpi=600)
plt.show()
#### double smoothing
infections_at_Home = np.convolve(infections_at_Home, np.ones((5,))/5, mode='same')
infections_at_PT = np.convolve(infections_at_PT, np.ones((5,))/5, mode='same')
infections_at_BUS = np.convolve(infections_at_BUS, np.ones((5,))/5, mode='same')
infections_at_MRT = np.convolve(infections_at_MRT, np.ones((5,))/5, mode='same')
infections_at_work = np.convolve(infections_at_work, np.ones((5,))/5, mode='same')
infections_at_school = np.convolve(infections_at_school, np.ones((5,))/5, mode='same')
infections_at_shopping = np.convolve(infections_at_shopping, np.ones((5,))/5, mode='same')
infections_at_other = np.convolve(infections_at_other, np.ones((5,))/5, mode='same')
plt.plot(range(len(infections_at_Home)), infections_at_Home, label='Home',c=colormap['H'])
plt.plot(range(len(infections_at_PT)), infections_at_PT, label='Transit',c=colormap['Bus+MRT'])
plt.plot(range(len(infections_at_BUS)), infections_at_BUS, label='Bus',c=colormap['B'])
plt.plot(range(len(infections_at_MRT)), infections_at_MRT, label='MRT',c=colormap['M'])
plt.plot(range(len(infections_at_work)), infections_at_work, label='Work',c=colormap['W'])
plt.plot(range(len(infections_at_school)), infections_at_school, label='Education',c=colormap['E'])
plt.plot(range(len(infections_at_shopping)), infections_at_shopping, label='Shopping',c=colormap['S'])
plt.plot(range(len(infections_at_other)), infections_at_other, label='Other',c=colormap['O'])
plt.yscale('log')
plt.xlabel('Day', fontsize=20)
# plt.legend(fontsize=10)
plt.tight_layout()
plt.savefig('output_files/activity_wise_infections_logscale_double_smoothed.png',dpi=600)
plt.show()
plt.plot(range(len(infections_at_Home)), infections_at_Home, label='Home',c=colormap['H'])
plt.plot(range(len(infections_at_PT)), infections_at_PT, label='Transit',c=colormap['Bus+MRT'])
plt.plot(range(len(infections_at_BUS)), infections_at_BUS, label='Bus',c=colormap['B'])
plt.plot(range(len(infections_at_MRT)), infections_at_MRT, label='MRT',c=colormap['M'])
plt.plot(range(len(infections_at_work)), infections_at_work, label='Work',c=colormap['W'])
plt.plot(range(len(infections_at_school)), infections_at_school, label='Education',c=colormap['E'])
plt.plot(range(len(infections_at_shopping)), infections_at_shopping, label='Shopping',c=colormap['S'])
plt.plot(range(len(infections_at_other)), infections_at_other, label='Other',c=colormap['O'])
plt.legend(fontsize=20)
plt.xlabel('Day', fontsize=20)
plt.tight_layout()
plt.savefig('output_files/activity_wise_infections_linear_scale_double_smoothed.png',dpi=600)
plt.show()
import copy
age_backup = copy.deepcopy(age)
age_labels = {'0':'0-9', '1':'10-19','2':'20-29 ','3':'30-39','4':'40-49','5':'50-59','6':'60-69','7':'>70'}
age_map = {0:0, 1:0, 2:1, 3:1, 4:2, 5:2, 6:3, 7:3, 8:4, 9:4, 10:5, 11:5, 12:6, 13:6, 14:7, 15:7, 16:7, 17:7}
import matplotlib as mpl
import pickle
import matplotlib.pyplot as plt
import numpy as np
mpl.style.use('seaborn')
age_count_percentage = {}
for j in range(len(backup_states_loaded[0])):
age_ = age_map[age[j]]
if age_ in age_count_percentage:
age_count_percentage [age_] += 1
else:
age_count_percentage [age_] = 1
for key in range(8):
print (key, round(age_count_percentage[key]*100/sum(age_count_percentage.values()),2),'%')
agewise = np.random.rand(4,4,8) * 0 # 4 days; # 5 for E/Ia/Is/D; # 8 age categories
count = 0
weekrange = range(4,8)
for t in weekrange:
day_ = t*7+ 1
stateVector = backup_states_loaded[day_-1]
stateVectorPrev = backup_states_loaded[day_-1]
for j in range(stateVector.shape[0]):
age_ = age_map[age[j]]
state = stateVector[j][0]
if state == 2:
agewise[count,0,age_] += 1 # E
if state == 4:
agewise[count,1,age_] += 1 # I a
if state == 3:
agewise[count,2,age_] += 1 # I s
if state == 6:
agewise[count,3,age_] += 1 # D
count += 1
print (np.sum(agewise))
import pandas as pd
import numpy as np
import altair as alt
age_Cat_labels = ['0-9','10-19','20-29','30-39','40-49','50-59','60-69','>70']
indexList = []
for i in weekrange:
indexList.append('Week '+str(i+1))
dflist = []
for i in range(8):
dflist.append(pd.DataFrame(agewise[:,:,i],index=indexList,columns=['a E','b I_a','c I_s','d D']))
def prep_df(df, name):
df = df.stack().reset_index()
df.columns = ['c1', 'c2', 'values']
df['Age category'] = name
return df
for i in range(8):
dflist[i] = prep_df(dflist[i], age_Cat_labels[i] + ' years' )
df = pd.concat(dflist)
print (df.head)
alt.Chart(df).mark_bar().encode(
x=alt.X('c2:N', title=None),
y=alt.Y('sum(values):Q', axis=alt.Axis(grid=False, title=None)),
column=alt.Column('c1:N', title=None),
color=alt.Color('Age category:N', scale=alt.Scale(range=list((['#f7f4f9','#e7e1ef','#d4b9da','#c994c7','#df65b0','#e7298a','#ce1256','#91003f']))))
).configure_view(
strokeOpacity=0
)
###Output
_____no_output_____
###Markdown
###Code
import matplotlib.pyplot as plt
def plot_graph_properties(G_loaded, filename ,which_graphs, dpi_=300):
# get average degree of graph
########## NON WEIGHTED DEGREE ###########
average_degree = [0]*288
for i in range(288):
neighbour_count = 0
node_count = 0
n = 0
for dummy in G_loaded[i]["backward"]:
if 'ACT' == which_graphs:
if not (dummy < 20000000 or dummy >= 50000000):
continue
if 'PT' == which_graphs:
if not (dummy >= 30000000 and dummy < 50000000):
continue
if 'WORK' == which_graphs:
if not (dummy >= 60000000 and dummy < 70000000):
continue
if 'EDUCATION' == which_graphs:
if not (dummy >= 70000000 and dummy < 80000000):
continue
if 'SHOPPING' == which_graphs:
if not (dummy >= 80000000 and dummy < 90000000):
continue
if 'OTHER' == which_graphs:
if not (dummy >= 90000000 and dummy < 100000000):
continue
n = len(G_loaded[i]["backward"][dummy])
neighbour_count += (n - 1) * n/2
node_count += n
if node_count != 0:
average_degree[i] = neighbour_count / node_count
else:
average_degree[i] = 0
y_axis_stuff = average_degree[:-1]
x_axis_stuff = list(range(287))
with open('output_files/non_weighted_degree_'+which_graphs+'.pickle', 'wb') as handle:
pickle.dump([x_axis_stuff, y_axis_stuff], handle, protocol=pickle.HIGHEST_PROTOCOL)
############# WEIGHTED DEGREE ##############
average_degree = [0]*288
error_count = 0
for i in range(288):
neighbour_count = 0
node_count = 0
for dummy in G_loaded[i]["backward"]:
n = len(G_loaded[i]["backward"][dummy])
if 'ACT' == which_graphs:
if not (dummy < 20000000 or dummy >= 50000000):
continue
if 'PT' == which_graphs:
if not (dummy >= 30000000 and dummy < 50000000):
continue
if 'WORK' == which_graphs:
if not (dummy >= 60000000 and dummy < 70000000):
continue
if 'EDUCATION' == which_graphs:
if not (dummy >= 70000000 and dummy < 80000000):
continue
if 'SHOPPING' == which_graphs:
if not (dummy >= 80000000 and dummy < 90000000):
continue
if 'OTHER' == which_graphs:
if not (dummy >= 90000000 and dummy < 100000000):
continue
if dummy < 20000000 or dummy >= 50000000: ### NODE ACTIVITY
assert(not(dummy>=50000000 and dummy<60000000 )) # should never happen
if dummy >= 60000000 and dummy < 70000000: # WORK
area_of_this_dummy = node_wise_A[dummy - index_start['WORK_NODE']]
elif dummy >= 70000000 and dummy < 80000000: # EDUCATION :
area_of_this_dummy = node_wise_A[dummy - index_start['EDUCATION_NODE']]
elif dummy >= 80000000 and dummy < 90000000: # SHOPPING :
area_of_this_dummy = node_wise_A[dummy - index_start['SHOPPING_NODE']]
elif dummy >= 90000000 and dummy < 100000000: # OTHER :
area_of_this_dummy = node_wise_A[dummy - index_start['OTHER_NODE']]
r = (area_of_this_dummy / 3.14) ** 0.5
mean_dist = 128*r/(45*3.14)
elif dummy >= 30000000 and dummy < 50000000: ## PT
if dummy < 40000000: # BUS
L = 2.759 * 4 # (A_bus * 1/4) ** 0.5
else: # MRT
L = 3.314 * 25 # (A_mrt_coach * 1/5) ** 0.5
mean_dist = L * 0.5014
elif dummy < 30000000 and dummy >= 20000000: ##HOME
mean_dist = 6.5
neighbour_count += ((n - 1) * (1/mean_dist)) ## assuming same distances(expected)
node_count += n
if node_count != 0:
average_degree[i] = neighbour_count / node_count
else:
average_degree[i] = 0
y_axis_stuff = average_degree[:-1]
x_axis_stuff = list(range(287))
with open('output_files/weighted_degree_'+which_graphs+'.pickle', 'wb') as handle:
pickle.dump([x_axis_stuff, y_axis_stuff], handle, protocol=pickle.HIGHEST_PROTOCOL)
import matplotlib.pyplot as plt
def plot_graph_properties_distribution(G_loaded, filename ,which_graphs, dpi_=300):
# get average degree of graph
############# WEIGHTED DEGREE ##############
average_degree = [0]*288
error_count = 0
distribution = {}
for i in range(288):
distribution[i] = []
for i in range(288):
neighbour_count = 0
node_count = 0
for dummy in G_loaded[i]["backward"]:
n = len(G_loaded[i]["backward"][dummy])
if 'PT' == which_graphs:
if not (dummy >= 30000000 and dummy < 50000000):
continue
if 'WORK' == which_graphs:
if not (dummy >= 60000000 and dummy < 70000000):
continue
if 'EDUCATION' == which_graphs:
if not (dummy >= 70000000 and dummy < 80000000):
continue
if 'SHOPPING' == which_graphs:
if not (dummy >= 80000000 and dummy < 90000000):
continue
if 'OTHER' == which_graphs:
if not (dummy >= 90000000 and dummy < 100000000):
continue
if dummy < 20000000 or dummy >= 50000000: ### NODE ACTIVITY
assert(not(dummy>=50000000 and dummy<60000000 )) # should never happen
if dummy >= 60000000 and dummy < 70000000: # WORK
area_of_this_dummy = node_wise_A[dummy - index_start['WORK_NODE']]
elif dummy >= 70000000 and dummy < 80000000: # EDUCATION :
area_of_this_dummy = node_wise_A[dummy - index_start['EDUCATION_NODE']]
elif dummy >= 80000000 and dummy < 90000000: # SHOPPING :
area_of_this_dummy = node_wise_A[dummy - index_start['SHOPPING_NODE']]
elif dummy >= 90000000 and dummy < 100000000: # OTHER :
area_of_this_dummy = node_wise_A[dummy - index_start['OTHER_NODE']]
r = (area_of_this_dummy / 3.14) ** 0.5
mean_dist = 128*r/(45*3.14)
elif dummy >= 30000000 and dummy < 50000000: ## PT
if dummy < 40000000: # BUS
L = 2.759 * 4 # (A_bus * 1/4) ** 0.5
else: # MRT
L = 3.314 * 25 # (A_mrt_coach * 1/5) ** 0.5
mean_dist = L * 0.5014
elif dummy < 30000000 and dummy >= 20000000: ##HOME
mean_dist = 6.5
neighbour_count += ((n - 1) * (1/mean_dist)) ## assuming same distances(expected)
node_count += n
if n > 2:
distribution[i].append([(1/mean_dist)]* (n-1))
y_axis_stuff = average_degree[:-1]
with open('output_files/weighted_degree_distribution'+which_graphs+'.pickle', 'wb') as handle:
pickle.dump(distribution, handle, protocol=pickle.HIGHEST_PROTOCOL)
import matplotlib.pyplot as plt
def plot_graph_properties_distribution_non_weighted(G_loaded, filename ,which_graphs, dpi_=300):
# get average degree of graph
average_degree = [0]*288
error_count = 0
distribution = {}
for i in range(288):
distribution[i] = []
for i in range(288):
neighbour_count = 0
node_count = 0
for dummy in G_loaded[i]["backward"]:
n = len(G_loaded[i]["backward"][dummy])
if 'PT' == which_graphs:
if not (dummy >= 30000000 and dummy < 50000000):
continue
if 'WORK' == which_graphs:
if not (dummy >= 60000000 and dummy < 70000000):
continue
if 'EDUCATION' == which_graphs:
if not (dummy >= 70000000 and dummy < 80000000):
continue
if 'SHOPPING' == which_graphs:
if not (dummy >= 80000000 and dummy < 90000000):
continue
if 'OTHER' == which_graphs:
if not (dummy >= 90000000 and dummy < 100000000):
continue
if n > 2:
distribution[i].append(n - 1)
y_axis_stuff = average_degree[:-1]
with open('output_files/non_weighted_degree_distribution'+which_graphs+'.pickle', 'wb') as handle:
pickle.dump(distribution, handle, protocol=pickle.HIGHEST_PROTOCOL)
plot_graph_properties(G_loaded, '_AS_OTHER_DEMO_POP_',which_graphs='OTHER', dpi_ = 600)
plot_graph_properties(G_loaded, '_AS_WORK_DEMO_POP_',which_graphs='WORK', dpi_ = 600)
plot_graph_properties(G_loaded, '_AS_EDUCATION_DEMO_POP_',which_graphs='EDUCATION', dpi_ = 600)
plot_graph_properties(G_loaded, '_AS_SHOPPING_DEMO_POP_',which_graphs='SHOPPING', dpi_ = 600)
plot_graph_properties(G_loaded, '_AS_PT_DEMO_POP_',which_graphs='PT', dpi_ = 600)
plot_graph_properties(G_loaded, '_AS_UNION_DEMO_POP_',which_graphs='UNION', dpi_ = 600)
plot_graph_properties_distribution(G_loaded, '_AS_OTHER_DEMO_POP_',which_graphs='OTHER', dpi_ = 600)
plot_graph_properties_distribution(G_loaded, '_AS_WORK_DEMO_POP_',which_graphs='WORK', dpi_ = 600)
plot_graph_properties_distribution(G_loaded, '_AS_EDUCATION_DEMO_POP_',which_graphs='EDUCATION', dpi_ = 600)
plot_graph_properties_distribution(G_loaded, '_AS_SHOPPING_DEMO_POP_',which_graphs='SHOPPING', dpi_ = 600)
plot_graph_properties_distribution(G_loaded, '_AS_PT_DEMO_POP_',which_graphs='PT', dpi_ = 600)
plot_graph_properties_distribution(G_loaded, '_AS_UNION_DEMO_POP_',which_graphs='UNION', dpi_ = 600)
plot_graph_properties_distribution_non_weighted(G_loaded, '_AS_OTHER_DEMO_POP_',which_graphs='OTHER', dpi_ = 600)
plot_graph_properties_distribution_non_weighted(G_loaded, '_AS_WORK_DEMO_POP_',which_graphs='WORK', dpi_ = 600)
plot_graph_properties_distribution_non_weighted(G_loaded, '_AS_EDUCATION_DEMO_POP_',which_graphs='EDUCATION', dpi_ = 600)
plot_graph_properties_distribution_non_weighted(G_loaded, '_AS_SHOPPING_DEMO_POP_',which_graphs='SHOPPING', dpi_ = 600)
plot_graph_properties_distribution_non_weighted(G_loaded, '_AS_PT_DEMO_POP_',which_graphs='PT', dpi_ = 600)
plot_graph_properties_distribution_non_weighted(G_loaded, '_AS_UNION_DEMO_POP_',which_graphs='UNION', dpi_ = 600)
# plot curves for graph
mpl.style.use('seaborn')
# mpl.rc_file_defaults()
colormap = {'H':'#1b9e77','W':'#d95f02','E':'#7570b3','S':'#e7298a','O':'#66a61e','B':'#e6ab02','M':'#a6761d','Bus+MRT':'#666666'}
with open('output_files/weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
plt.plot(range(len(WORK[1])), WORK[1], c = colormap['W'], label='Work')
with open('output_files/weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
plt.plot(range(len(EDUCATION[1])), EDUCATION[1], c = colormap['E'], label='Education')
with open('output_files/weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
plt.plot(range(len(SHOPPING[1])), SHOPPING[1], c = colormap['S'], label='Shopping')
with open('output_files/weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
plt.plot(range(len(OTHER[1])), OTHER[1], c = colormap['O'], label='Other')
with open('output_files/weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
plt.plot(range(len(PT[1])), PT[1], c = colormap['Bus+MRT'], label='Transit')
with open('output_files/weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
plt.plot(range(len(UNION[1])), UNION[1], c = 'b', label='UNION')
plt.legend()
# plt.grid()
plt.yscale('log')
plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Average weighted degree')
plt.savefig('weighted_degrees.png',dpi=600)
plt.show()
vals = np.convolve(infections_at_Home, np.ones((5,))/5, mode='same')
with open('output_files/weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
vals = np.convolve(WORK[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(WORK[1])),vals, c = colormap['W'], label='Work')
with open('output_files/weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
vals = np.convolve(EDUCATION[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(EDUCATION[1])), vals, c = colormap['E'], label='Education')
with open('output_files/weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
vals = np.convolve(SHOPPING[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(SHOPPING[1])), vals, c = colormap['S'], label='Shopping')
with open('output_files/weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
vals = np.convolve(OTHER[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(OTHER[1])), vals, c = colormap['O'], label='Other')
with open('output_files/weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
vals = np.convolve(PT[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(PT[1])), vals, c = colormap['Bus+MRT'], label='Transit')
with open('output_files/weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
vals = np.convolve(UNION[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(UNION[1])), vals, c = 'b', label='UNION')
plt.legend()
plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Average weighted degree')
# plt.grid()
plt.yscale('log')
plt.savefig('output_files/weighted_degrees_smoothed.png',dpi=600)
plt.show()
colormap = {'H':'#1b9e77','W':'#d95f02','E':'#7570b3','S':'#e7298a','O':'#66a61e','B':'#e6ab02','M':'#a6761d','Bus+MRT':'#666666'}
with open('output_files/weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
plt.plot(range(len(WORK[1])), WORK[1], c = colormap['W'], label='Work')
with open('output_files/weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
plt.plot(range(len(EDUCATION[1])), EDUCATION[1], c = colormap['E'], label='Education')
with open('output_files/weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
plt.plot(range(len(SHOPPING[1])), SHOPPING[1], c = colormap['S'], label='Shopping')
with open('output_files/weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
plt.plot(range(len(OTHER[1])), OTHER[1], c = colormap['O'], label='Other')
with open('output_files/weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
plt.plot(range(len(PT[1])), PT[1], c = colormap['Bus+MRT'], label='Transit')
with open('output_files/weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
plt.plot(range(len(UNION[1])), UNION[1], c = 'b', label='UNION')
plt.legend()
plt.yscale('linear')
plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Average weighted degree')
# plt.grid()
plt.savefig('output_files/weighted_degrees_linear.png',dpi=600)
plt.show()
vals = np.convolve(infections_at_Home, np.ones((5,))/5, mode='same')
with open('output_files/weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
vals = np.convolve(WORK[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(WORK[1])),vals, c = colormap['W'], label='Work')
with open('output_files/weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
vals = np.convolve(EDUCATION[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(EDUCATION[1])), vals, c = colormap['E'], label='Education')
with open('output_files/weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
vals = np.convolve(SHOPPING[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(SHOPPING[1])), vals, c = colormap['S'], label='Shopping')
with open('output_files/weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
vals = np.convolve(OTHER[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(OTHER[1])), vals, c = colormap['O'], label='Other')
with open('output_files/weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
vals = np.convolve(PT[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(PT[1])), vals, c = colormap['Bus+MRT'], label='Transit')
with open('output_files/weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
vals = np.convolve(UNION[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(UNION[1])), vals, c = 'b', label='UNION')
plt.legend()
plt.yscale('linear')
plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Average weighted degree')
# plt.grid()
plt.savefig('output_files/weighted_degrees_smoothed_linear.png',dpi=600)
plt.show()
# plot curves foor graph
# mpl.style.use('seaborn')
mpl.rc_file_defaults()
colormap = {'H':'#1b9e77','W':'#d95f02','E':'#7570b3','S':'#e7298a','O':'#66a61e','B':'#e6ab02','M':'#a6761d','Bus+MRT':'#666666'}
with open('output_files/non_weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
plt.plot(range(len(WORK[1])), WORK[1], c = colormap['W'], label='Work')
with open('output_files/non_weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
plt.plot(range(len(EDUCATION[1])), EDUCATION[1], c = colormap['E'], label='Education')
with open('output_files/non_weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
plt.plot(range(len(SHOPPING[1])), SHOPPING[1], c = colormap['S'], label='Shopping')
with open('output_files/non_weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
plt.plot(range(len(OTHER[1])), OTHER[1], c = colormap['O'], label='Other')
with open('output_files/non_weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
plt.plot(range(len(PT[1])), PT[1], c = colormap['Bus+MRT'], label='Transit')
with open('output_files/non_weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
plt.plot(range(len(UNION[1])), UNION[1], c = 'b', label='UNION')
plt.legend()
# plt.grid()
plt.yscale('log')
plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Average degree')
plt.savefig('output_files/non_weighted_degrees.png',dpi=600)
plt.show()
vals = np.convolve(infections_at_Home, np.ones((5,))/5, mode='same')
with open('output_files/non_weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
vals = np.convolve(WORK[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(WORK[1])),vals, c = colormap['W'], label='Work')
with open('output_files/non_weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
vals = np.convolve(EDUCATION[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(EDUCATION[1])), vals, c = colormap['E'], label='Education')
with open('output_files/non_weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
vals = np.convolve(SHOPPING[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(SHOPPING[1])), vals, c = colormap['S'], label='Shopping')
with open('output_files/non_weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
vals = np.convolve(OTHER[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(OTHER[1])), vals, c = colormap['O'], label='Other')
with open('output_files/non_weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
vals = np.convolve(PT[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(PT[1])), vals, c = colormap['Bus+MRT'], label='Transit')
with open('output_files/non_weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
vals = np.convolve(UNION[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(UNION[1])), vals, c = 'b', label='UNION')
plt.legend()
plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Average degree')
# plt.grid()
plt.yscale('log')
plt.savefig('output_files/non_weighted_degrees_smoothed.png',dpi=600)
plt.show()
colormap = {'H':'#1b9e77','W':'#d95f02','E':'#7570b3','S':'#e7298a','O':'#66a61e','B':'#e6ab02','M':'#a6761d','Bus+MRT':'#666666'}
with open('output_files/non_weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
plt.plot(range(len(WORK[1])), WORK[1], c = colormap['W'], label='Work')
with open('output_files/non_weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
plt.plot(range(len(EDUCATION[1])), EDUCATION[1], c = colormap['E'], label='Education')
with open('output_files/non_weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
plt.plot(range(len(SHOPPING[1])), SHOPPING[1], c = colormap['S'], label='Shopping')
with open('output_files/non_weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
plt.plot(range(len(OTHER[1])), OTHER[1], c = colormap['O'], label='Other')
with open('output_files/non_weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
plt.plot(range(len(PT[1])), PT[1], c = colormap['Bus+MRT'], label='Transit')
with open('output_files/non_weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
plt.plot(range(len(UNION[1])), UNION[1], c = 'b', label='UNION')
plt.legend()
plt.yscale('linear')
plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Average degree')
# plt.grid()
plt.savefig('output_files/non_weighted_degrees_linear.png',dpi=600)
plt.show()
with open('output_files/non_weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
vals = np.convolve(WORK[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(WORK[1])),vals, c = colormap['W'], label='Work')
with open('output_files/non_weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
vals = np.convolve(EDUCATION[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(EDUCATION[1])), vals, c = colormap['E'], label='Education')
with open('output_files/non_weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
vals = np.convolve(SHOPPING[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(SHOPPING[1])), vals, c = colormap['S'], label='Shopping')
with open('output_files/non_weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
vals = np.convolve(OTHER[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(OTHER[1])), vals, c = colormap['O'], label='Other')
with open('output_files/non_weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
vals = np.convolve(PT[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(PT[1])), vals, c = colormap['Bus+MRT'], label='Transit')
with open('output_files/non_weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
vals = np.convolve(UNION[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(UNION[1])), vals, c = 'b', label='UNION')
plt.legend()
plt.yscale('linear')
plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Average degree')
# plt.grid()
plt.savefig('output_files/non_weighted_degrees_smoothed_linear.png',dpi=600)
plt.show()
# max size of component every minute
colormap = {'H':'#1b9e77','W':'#d95f02','E':'#7570b3','S':'#e7298a','O':'#66a61e','B':'#e6ab02','M':'#a6761d','Bus+MRT':'#666666'}
with open('output_files/non_weighted_degree_distributionWORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
for key in WORK:
if len(WORK[key]) != 0:
WORK[key] = max(WORK[key])
else:
WORK[key] = 0
a = []
for key in range(288):
a.append(WORK[key])
plt.plot(range(len(a)),a,c=colormap['W'],label = 'Work')
with open('output_files/non_weighted_degree_distributionEDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
for key in EDUCATION:
if len(EDUCATION[key]) != 0:
EDUCATION[key] = max(EDUCATION[key])
else:
EDUCATION[key] = 0
a = []
for key in range(288):
a.append(EDUCATION[key])
plt.plot(range(len(a)),a,c=colormap['E'],label = 'Education')
with open('output_files/non_weighted_degree_distributionSHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
for key in SHOPPING:
if len(SHOPPING[key]) != 0:
SHOPPING[key] = max(SHOPPING[key])
else:
SHOPPING[key] = 0
a = []
for key in range(288):
a.append(SHOPPING[key])
plt.plot(range(len(a)),a,c=colormap['S'],label = 'Shopping')
with open('output_files/non_weighted_degree_distributionOTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
for key in OTHER:
if len(OTHER[key]) != 0:
OTHER[key] = max(OTHER[key])
else:
OTHER[key] = 0
a = []
for key in range(288):
a.append(OTHER[key])
plt.plot(range(len(a)),a,c=colormap['O'],label = 'Other')
with open('output_files/non_weighted_degree_distributionPT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
for key in PT:
if len(PT[key]) != 0:
PT[key] = max(PT[key])
else:
PT[key] = 0
a = []
for key in range(288):
a.append(PT[key])
plt.plot(range(len(a)),a,c=colormap['Bus+MRT'],label = 'Transit')
with open('output_files/non_weighted_degree_distributionUNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
for key in UNION:
if len(UNION[key]) != 0:
UNION[key] = max(UNION[key])
else:
UNION[key] = 0
a = []
for key in range(288):
a.append(UNION[key])
plt.plot(range(len(a)),a,c='b',label = 'UNION')
plt.legend()
plt.yscale('linear')
plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Maximum size of clique')
# plt.grid()
plt.savefig('output_files/max_size_clique.png',dpi=600)
plt.show()
########################################. SMOOTHED ###########
a = np.convolve(a, np.ones((5,))/5, mode='same')
with open('output_files/non_weighted_degree_distributionWORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
for key in WORK:
if len(WORK[key]) != 0:
WORK[key] = max(WORK[key])
else:
WORK[key] = 0
a = []
for key in range(288):
a.append(WORK[key])
a = np.convolve(a, np.ones((5,))/5, mode='same')
plt.plot(range(len(a)),a,c=colormap['W'],label = 'Work')
with open('output_files/non_weighted_degree_distributionEDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
for key in EDUCATION:
if len(EDUCATION[key]) != 0:
EDUCATION[key] = max(EDUCATION[key])
else:
EDUCATION[key] = 0
a = []
for key in range(288):
a.append(EDUCATION[key])
a = np.convolve(a, np.ones((5,))/5, mode='same')
plt.plot(range(len(a)),a,c=colormap['E'],label = 'Education')
with open('output_files/non_weighted_degree_distributionSHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
for key in SHOPPING:
if len(SHOPPING[key]) != 0:
SHOPPING[key] = max(SHOPPING[key])
else:
SHOPPING[key] = 0
a = []
for key in range(288):
a.append(SHOPPING[key])
a = np.convolve(a, np.ones((5,))/5, mode='same')
plt.plot(range(len(a)),a,c=colormap['S'],label = 'Shopping')
with open('output_files/non_weighted_degree_distributionOTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
for key in OTHER:
if len(OTHER[key]) != 0:
OTHER[key] = max(OTHER[key])
else:
OTHER[key] = 0
a = []
for key in range(288):
a.append(OTHER[key])
a = np.convolve(a, np.ones((5,))/5, mode='same')
plt.plot(range(len(a)),a,c=colormap['O'],label = 'Other')
with open('output_files/non_weighted_degree_distributionPT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
for key in PT:
if len(PT[key]) != 0:
PT[key] = max(PT[key])
else:
PT[key] = 0
a = []
for key in range(288):
a.append(PT[key])
a = np.convolve(a, np.ones((5,))/5, mode='same')
plt.plot(range(len(a)),a,c=colormap['Bus+MRT'],label = 'Transit')
with open('output_files/non_weighted_degree_distributionUNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
for key in UNION:
if len(UNION[key]) != 0:
UNION[key] = max(UNION[key])
else:
UNION[key] = 0
a = []
for key in range(288):
a.append(UNION[key])
a = np.convolve(a, np.ones((5,))/5, mode='same')
plt.plot(range(len(a)),a,c='b',label = 'UNION')
plt.legend()
plt.yscale('linear')
plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Maximum size of clique')
# plt.grid()
plt.savefig('output_files/max_size_clique_smoothed.png',dpi=600)
plt.show()
import pickle
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# create xticks
timeofdayticks = []
for i in range(288//2):
if i%12 == 0:
timeofdayticks.append(str(i//12)+':00 AM')
else:
timeofdayticks.append(' ')
for i in range(288//2,288):
if i%12==0:
timeofdayticks.append(str(i//12 -12 )+':00 PM')
else:
timeofdayticks.append(' ')
print (timeofdayticks)
# In[64]:
# plot curves foor graph
# mpl.style.use('seaborn')
import matplotlib as mpl
mpl.rc_file_defaults()
font = {'weight' : 'bold',
'size' : 13}
mpl.rc('font', **font)
# plot curves foor graph
# mpl.style.use('seaborn')
# mpl.rc_file_defaults()
# In[137]:
# In[199]:
# plot curves foor graph
# mpl.style.use('seaborn')
# mpl.rc_file_defaults()
# In[68]:
# In[206]:
# max size of component every minute
colormap = {'H':'#1b9e77','W':'#d95f02','E':'#7570b3','S':'#e7298a','O':'#66a61e','B':'#e6ab02','M':'#a6761d','Bus+MRT':'#666666'}
with open('output_files/non_weighted_degree_distributionWORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
for key in WORK:
if len(WORK[key]) != 0:
WORK[key] = max(WORK[key])
else:
WORK[key] = 0
a = []
for key in range(288):
a.append(WORK[key])
plt.plot(range(len(a)),a,c=colormap['W'],label = 'Work')
with open('output_files/non_weighted_degree_distributionEDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
for key in EDUCATION:
if len(EDUCATION[key]) != 0:
EDUCATION[key] = max(EDUCATION[key])
else:
EDUCATION[key] = 0
a = []
for key in range(288):
a.append(EDUCATION[key])
plt.plot(range(len(a)),a,c=colormap['E'],label = 'Education')
with open('output_files/non_weighted_degree_distributionSHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
for key in SHOPPING:
if len(SHOPPING[key]) != 0:
SHOPPING[key] = max(SHOPPING[key])
else:
SHOPPING[key] = 0
a = []
for key in range(288):
a.append(SHOPPING[key])
plt.plot(range(len(a)),a,c=colormap['S'],label = 'Shopping')
with open('output_files/non_weighted_degree_distributionOTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
for key in OTHER:
if len(OTHER[key]) != 0:
OTHER[key] = max(OTHER[key])
else:
OTHER[key] = 0
a = []
for key in range(288):
a.append(OTHER[key])
plt.plot(range(len(a)),a,c=colormap['O'],label = 'Other')
with open('output_files/non_weighted_degree_distributionPT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
for key in PT:
if len(PT[key]) != 0:
PT[key] = max(PT[key])
else:
PT[key] = 0
a = []
for key in range(288):
a.append(PT[key])
plt.plot(range(len(a)),a,c=colormap['Bus+MRT'],label = 'Transit')
with open('output_files/non_weighted_degree_distributionUNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
for key in UNION:
if len(UNION[key]) != 0:
UNION[key] = max(UNION[key])
else:
UNION[key] = 0
a = []
for key in range(288):
a.append(UNION[key])
plt.plot(range(len(a)),a,c='b',label = 'UNION')
plt.legend()
plt.yscale('linear')
# plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Maximum size of clique',fontsize=20)
# plt.grid()
locs, labels = plt.xticks() # Get the current locations and labels.
plt.xticks(np.arange(0, 1, step=0.2)) # Set label locations.
plt.xticks(np.arange(288), timeofdayticks, rotation=90) # Set text labels.
plt.tight_layout()
plt.savefig('output_files/max_size_clique.png',dpi=600)
plt.show()
########################################. SMOOTHED ###########
a = np.convolve(a, np.ones((5,))/5, mode='same')
with open('output_files/non_weighted_degree_distributionWORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
for key in WORK:
if len(WORK[key]) != 0:
WORK[key] = max(WORK[key])
else:
WORK[key] = 0
a = []
for key in range(288):
a.append(WORK[key])
a = np.convolve(a, np.ones((5,))/5, mode='same')
plt.plot(range(len(a)),a,c=colormap['W'],label = 'Work')
with open('output_files/non_weighted_degree_distributionEDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
for key in EDUCATION:
if len(EDUCATION[key]) != 0:
EDUCATION[key] = max(EDUCATION[key])
else:
EDUCATION[key] = 0
a = []
for key in range(288):
a.append(EDUCATION[key])
a = np.convolve(a, np.ones((5,))/5, mode='same')
plt.plot(range(len(a)),a,c=colormap['E'],label = 'Education')
with open('output_files/non_weighted_degree_distributionSHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
for key in SHOPPING:
if len(SHOPPING[key]) != 0:
SHOPPING[key] = max(SHOPPING[key])
else:
SHOPPING[key] = 0
a = []
for key in range(288):
a.append(SHOPPING[key])
a = np.convolve(a, np.ones((5,))/5, mode='same')
plt.plot(range(len(a)),a,c=colormap['S'],label = 'Shopping')
with open('output_files/non_weighted_degree_distributionOTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
for key in OTHER:
if len(OTHER[key]) != 0:
OTHER[key] = max(OTHER[key])
else:
OTHER[key] = 0
a = []
for key in range(288):
a.append(OTHER[key])
a = np.convolve(a, np.ones((5,))/5, mode='same')
plt.plot(range(len(a)),a,c=colormap['O'],label = 'Other')
with open('output_files/non_weighted_degree_distributionPT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
for key in PT:
if len(PT[key]) != 0:
PT[key] = max(PT[key])
else:
PT[key] = 0
a = []
for key in range(288):
a.append(PT[key])
a = np.convolve(a, np.ones((5,))/5, mode='same')
plt.plot(range(len(a)),a,c=colormap['Bus+MRT'],label = 'Transit')
with open('output_files/non_weighted_degree_distributionUNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
for key in UNION:
if len(UNION[key]) != 0:
UNION[key] = max(UNION[key])
else:
UNION[key] = 0
a = []
for key in range(288):
a.append(UNION[key])
a = np.convolve(a, np.ones((5,))/5, mode='same')
plt.plot(range(len(a)),a,c='b',label = 'UNION')
plt.legend()
plt.yscale('linear')
# plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Maximum size of clique', fontsize=20)
# plt.grid()
locs, labels = plt.xticks() # Get the current locations and labels.
plt.xticks(np.arange(0, 1, step=0.2)) # Set label locations.
plt.xticks(np.arange(288), timeofdayticks, rotation=90) # Set text labels.
plt.tight_layout()
plt.savefig('output_files/max_size_clique_smoothed.png',dpi=600)
plt.show()
# In[ ]:
# In[ ]:
font = {'weight' : 'bold',
'size' : 13}
mpl.rc('font', **font)
colormap = {0:'#762a83',1:'#9970ab',2:'#c2a5cf',3:'#e7d4e8',4:'#d9f0d3',5:'#a6dba0',6:'#5aae61',7:'#1b7837'}
with open('output_files/non_weighted_degree_distributionWORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
with open('output_files/non_weighted_degree_distributionEDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
with open('output_files/non_weighted_degree_distributionSHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
with open('output_files/non_weighted_degree_distributionOTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
with open('output_files/non_weighted_degree_distributionPT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
with open('output_files/non_weighted_degree_distributionUNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
def zero_to_nan(values):
"""Replace every 0 with 'nan' and return a copy."""
return [float('nan') if x==0 else x for x in values]
# combine W/E/S/O to get activity
act = {}
for key in WORK:
act[key] = WORK[key] + EDUCATION[key] + SHOPPING[key] + OTHER[key] ## appending all lists to create one big list
for key in range(0,288,12*3):
counts_, bins_ = np.histogram(UNION[key], 10)
plt.plot(bins_[:-1], (counts_), c=colormap[key//(12*3)], label=timeofdayticks[key])
plt.yscale('log')
plt.legend(fontsize=10)
plt.tight_layout()
# locs, labels = plt.xticks() # Get the current locations and labels.
# plt.xticks(np.arange(0, 1, step=0.2)) # Set label locations.
# plt.xticks(np.arange(288), timeofdayticks, rotation=90) # Set text labels.
plt.savefig('output_files/Histograms_at_3_hour_intervals'+str(key)+'.png', dpi = 600)
plt.show()
colormap = {'H':'#1b9e77','W':'#d95f02','E':'#7570b3','S':'#e7298a','O':'#66a61e','B':'#e6ab02','M':'#a6761d','Bus+MRT':'#666666'}
with open('output_files/non_weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
plt.plot(range(len(WORK[1])), WORK[1], c = colormap['W'], label='Work')
with open('output_files/non_weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
plt.plot(range(len(EDUCATION[1])), EDUCATION[1], c = colormap['E'], label='Education')
with open('output_files/non_weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
plt.plot(range(len(SHOPPING[1])), SHOPPING[1], c = colormap['S'], label='Shopping')
with open('output_files/non_weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
plt.plot(range(len(OTHER[1])), OTHER[1], c = colormap['O'], label='Other')
with open('output_files/non_weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
plt.plot(range(len(PT[1])), PT[1], c = colormap['Bus+MRT'], label='Transit')
with open('output_files/non_weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
plt.plot(range(len(UNION[1])), UNION[1], c = 'b', label='UNION')
plt.legend()
locs, labels = plt.xticks() # Get the current locations and labels.
plt.xticks(np.arange(0, 1, step=0.2)) # Set label locations.
plt.xticks(np.arange(288), timeofdayticks, rotation=90) # Set text labels.
# plt.grid()
plt.yscale('log')
# plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Average degree', fontsize=20)
plt.tight_layout()
plt.savefig('output_files/non_weighted_degrees.png',dpi=600)
plt.show()
with open('output_files/non_weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
vals = np.convolve(WORK[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(WORK[1])),vals, c = colormap['W'], label='Work')
with open('output_files/non_weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
vals = np.convolve(EDUCATION[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(EDUCATION[1])), vals, c = colormap['E'], label='Education')
with open('output_files/non_weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
vals = np.convolve(SHOPPING[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(SHOPPING[1])), vals, c = colormap['S'], label='Shopping')
with open('output_files/non_weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
vals = np.convolve(OTHER[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(OTHER[1])), vals, c = colormap['O'], label='Other')
with open('output_files/non_weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
vals = np.convolve(PT[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(PT[1])), vals, c = colormap['Bus+MRT'], label='Transit')
with open('output_files/non_weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
vals = np.convolve(UNION[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(UNION[1])), vals, c = 'b', label='UNION')
plt.legend()
# plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Average degree', fontsize=20)
# plt.grid()
locs, labels = plt.xticks() # Get the current locations and labels.
plt.xticks(np.arange(0, 1, step=0.2)) # Set label locations.
plt.xticks(np.arange(288), timeofdayticks, rotation=90) # Set text labels.
plt.yscale('log')
plt.tight_layout()
plt.savefig('output_files/non_weighted_degrees_smoothed.png',dpi=600)
plt.show()
colormap = {'H':'#1b9e77','W':'#d95f02','E':'#7570b3','S':'#e7298a','O':'#66a61e','B':'#e6ab02','M':'#a6761d','Bus+MRT':'#666666'}
with open('output_files/non_weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
plt.plot(range(len(WORK[1])), WORK[1], c = colormap['W'], label='Work')
with open('output_files/non_weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
plt.plot(range(len(EDUCATION[1])), EDUCATION[1], c = colormap['E'], label='Education')
with open('output_files/non_weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
plt.plot(range(len(SHOPPING[1])), SHOPPING[1], c = colormap['S'], label='Shopping')
with open('output_files/non_weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
plt.plot(range(len(OTHER[1])), OTHER[1], c = colormap['O'], label='Other')
with open('output_files/non_weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
plt.plot(range(len(PT[1])), PT[1], c = colormap['Bus+MRT'], label='Transit')
with open('output_files/non_weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
plt.plot(range(len(UNION[1])), UNION[1], c = 'b', label='UNION')
plt.legend()
plt.yscale('linear')
# plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Average degree', fontsize=20)
# plt.grid()
locs, labels = plt.xticks() # Get the current locations and labels.
plt.xticks(np.arange(0, 1, step=0.2)) # Set label locations.
plt.xticks(np.arange(288), timeofdayticks, rotation=90) # Set text labels.
plt.tight_layout()
plt.savefig('output_files/non_weighted_degrees_linear.png',dpi=600)
plt.show()
with open('output_files/non_weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
vals = np.convolve(WORK[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(WORK[1])),vals, c = colormap['W'], label='Work')
with open('output_files/non_weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
vals = np.convolve(EDUCATION[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(EDUCATION[1])), vals, c = colormap['E'], label='Education')
with open('output_files/non_weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
vals = np.convolve(SHOPPING[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(SHOPPING[1])), vals, c = colormap['S'], label='Shopping')
with open('output_files/non_weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
vals = np.convolve(OTHER[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(OTHER[1])), vals, c = colormap['O'], label='Other')
with open('output_files/non_weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
vals = np.convolve(PT[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(PT[1])), vals, c = colormap['Bus+MRT'], label='Transit')
with open('output_files/non_weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
vals = np.convolve(UNION[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(UNION[1])), vals, c = 'b', label='UNION')
plt.legend()
plt.yscale('linear')
# plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Average degree', fontsize = 20)
# plt.grid()
locs, labels = plt.xticks() # Get the current locations and labels.
plt.xticks(np.arange(0, 1, step=0.2)) # Set label locations.
plt.xticks(np.arange(288), timeofdayticks, rotation=90) # Set text labels.
plt.tight_layout()
plt.savefig('output_files/non_weighted_degrees_smoothed_linear.png',dpi=600)
plt.show()
colormap = {'H':'#1b9e77','W':'#d95f02','E':'#7570b3','S':'#e7298a','O':'#66a61e','B':'#e6ab02','M':'#a6761d','Bus+MRT':'#666666'}
with open('output_files/weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
plt.plot(range(len(WORK[1])), WORK[1], c = colormap['W'], label='Work')
with open('output_files/weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
plt.plot(range(len(EDUCATION[1])), EDUCATION[1], c = colormap['E'], label='Education')
with open('output_files/weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
plt.plot(range(len(SHOPPING[1])), SHOPPING[1], c = colormap['S'], label='Shopping')
with open('output_files/weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
plt.plot(range(len(OTHER[1])), OTHER[1], c = colormap['O'], label='Other')
with open('output_files/weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
plt.plot(range(len(PT[1])), PT[1], c = colormap['Bus+MRT'], label='Transit')
with open('output_files/weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
plt.plot(range(len(UNION[1])), UNION[1], c = 'b', label='UNION')
# plt.legend(fontsize =10)
plt.legend(fontsize =10)
# plt.grid()
plt.yscale('log')
# plt.xlabel('Time of day(5 minute intervals)', fontsize=20)
# plt.ylabel('Average weighted degree', fontsize=20)
plt.ylabel('Average weighted degree', fontsize=20)
locs, labels = plt.xticks() # Get the current locations and labels.
plt.xticks(np.arange(0, 1, step=0.2)) # Set label locations.
plt.xticks(np.arange(288), timeofdayticks, rotation=90) # Set text labels.
plt.tight_layout()
plt.savefig('output_files/weighted_degrees.png',dpi=600)
plt.show()
# In[47]:
# plot curves foor graph
# mpl.style.use('seaborn')
import matplotlib as mpl
# font = {'weight' : 'bold',
# 'size' : 13}
# mpl.rc('font', **font)
# mpl.rc_file_defaults()
colormap = {'H':'#1b9e77','W':'#d95f02','E':'#7570b3','S':'#e7298a','O':'#66a61e','B':'#e6ab02','M':'#a6761d','Bus+MRT':'#666666'}
with open('output_files/weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
plt.plot(range(len(WORK[1])), WORK[1], c = colormap['W'], label='Work')
with open('output_files/weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
plt.plot(range(len(EDUCATION[1])), EDUCATION[1], c = colormap['E'], label='Education')
with open('output_files/weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
plt.plot(range(len(SHOPPING[1])), SHOPPING[1], c = colormap['S'], label='Shopping')
with open('output_files/weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
plt.plot(range(len(OTHER[1])), OTHER[1], c = colormap['O'], label='Other')
with open('output_files/weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
plt.plot(range(len(PT[1])), PT[1], c = colormap['Bus+MRT'], label='Transit')
with open('output_files/weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
plt.plot(range(len(UNION[1])), UNION[1], c = 'b', label='UNION')
# plt.legend(fontsize =10)
plt.legend(fontsize =10)
# plt.grid()
plt.yscale('log')
# plt.xlabel('Time of day(5 minute intervals)', fontsize=20)
# plt.ylabel('Average weighted degree', fontsize=20)
plt.ylabel('Average weighted degree', fontsize=20)
plt.tight_layout()
locs, labels = plt.xticks() # Get the current locations and labels.
plt.xticks(np.arange(0, 1, step=0.2)) # Set label locations.
plt.xticks(np.arange(288), timeofdayticks, rotation=90) # Set text labels.
plt.savefig('output_files/weighted_degrees.png',dpi=600)
plt.show()
# In[66]:
with open('output_files/weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
vals = np.convolve(WORK[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(WORK[1])),vals, c = colormap['W'], label='Work')
with open('output_files/weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
vals = np.convolve(EDUCATION[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(EDUCATION[1])), vals, c = colormap['E'], label='Education')
with open('output_files/weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
vals = np.convolve(SHOPPING[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(SHOPPING[1])), vals, c = colormap['S'], label='Shopping')
with open('output_files/weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
vals = np.convolve(OTHER[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(OTHER[1])), vals, c = colormap['O'], label='Other')
with open('output_files/weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
vals = np.convolve(PT[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(PT[1])), vals, c = colormap['Bus+MRT'], label='Transit')
with open('output_files/weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
vals = np.convolve(UNION[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(UNION[1])), vals, c = 'b', label='UNION')
plt.legend(loc='lower center', fontsize=10)
# plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Average weighted degree', fontsize =20)
# plt.grid()
locs, labels = plt.xticks() # Get the current locations and labels.
plt.xticks(np.arange(0, 1, step=0.2)) # Set label locations.
plt.xticks(np.arange(288), timeofdayticks, rotation=90) # Set text labels.
plt.yscale('log')
plt.ylim(1e-5,1)
plt.tight_layout()
plt.savefig('output_files/weighted_degrees_smoothed.png',dpi=600)
plt.show()
colormap = {'H':'#1b9e77','W':'#d95f02','E':'#7570b3','S':'#e7298a','O':'#66a61e','B':'#e6ab02','M':'#a6761d','Bus+MRT':'#666666'}
with open('output_files/weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
plt.plot(range(len(WORK[1])), WORK[1], c = colormap['W'], label='Work')
with open('output_files/weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
plt.plot(range(len(EDUCATION[1])), EDUCATION[1], c = colormap['E'], label='Education')
with open('output_files/weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
plt.plot(range(len(SHOPPING[1])), SHOPPING[1], c = colormap['S'], label='Shopping')
with open('output_files/weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
plt.plot(range(len(OTHER[1])), OTHER[1], c = colormap['O'], label='Other')
with open('output_files/weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
plt.plot(range(len(PT[1])), PT[1], c = colormap['Bus+MRT'], label='Transit')
with open('output_files/weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
plt.plot(range(len(UNION[1])), UNION[1], c = 'b', label='UNION')
plt.legend()
plt.yscale('linear')
# plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Average weighted degree',fontsize=20)
# plt.grid()
locs, labels = plt.xticks() # Get the current locations and labels.
plt.xticks(np.arange(0, 1, step=0.2)) # Set label locations.
plt.xticks(np.arange(288), timeofdayticks, rotation=90) # Set text labels.
plt.tight_layout()
plt.savefig('output_files/weighted_degrees_linear.png',dpi=600)
plt.show()
with open('output_files/weighted_degree_WORK'+'.pickle','rb') as handle:
WORK = pickle.load(handle)
vals = np.convolve(WORK[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(WORK[1])),vals, c = colormap['W'], label='Work')
with open('output_files/weighted_degree_EDUCATION'+'.pickle','rb') as handle:
EDUCATION = pickle.load(handle)
vals = np.convolve(EDUCATION[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(EDUCATION[1])), vals, c = colormap['E'], label='Education')
with open('output_files/weighted_degree_SHOPPING'+'.pickle','rb') as handle:
SHOPPING = pickle.load(handle)
vals = np.convolve(SHOPPING[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(SHOPPING[1])), vals, c = colormap['S'], label='Shopping')
with open('output_files/weighted_degree_OTHER'+'.pickle','rb') as handle:
OTHER = pickle.load(handle)
vals = np.convolve(OTHER[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(OTHER[1])), vals, c = colormap['O'], label='Other')
with open('output_files/weighted_degree_PT'+'.pickle','rb') as handle:
PT = pickle.load(handle)
vals = np.convolve(PT[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(PT[1])), vals, c = colormap['Bus+MRT'], label='Transit')
with open('output_files/weighted_degree_UNION'+'.pickle','rb') as handle:
UNION = pickle.load(handle)
vals = np.convolve(UNION[1], np.ones((5,))/5, mode='same')
plt.plot(range(len(UNION[1])), vals, c = 'b', label='UNION')
plt.legend()
locs, labels = plt.xticks() # Get the current locations and labels.
plt.xticks(np.arange(0, 1, step=0.2)) # Set label locations.
plt.xticks(np.arange(288), timeofdayticks, rotation=90) # Set text labels.
plt.yscale('linear')
# plt.xlabel('Time of day(5 minute intervals)')
plt.ylabel('Average weighted degree', fontsize=20)
# plt.grid()
plt.tight_layout()
plt.savefig('output_files/weighted_degrees_smoothed_linear.png',dpi=600)
plt.show()
# In[184]:
###Output
_____no_output_____
###Markdown
The headers for the table activity_Schedule are shown below:
###Code
# get mode shares from DAS
modes_share = {}
das = []
with open('das_file') as f:
for row in f:
listed = row.strip().split(',')
stop_mode = listed[7]
if stop_mode in modes_share:
modes_share[stop_mode] += 1
else:
modes_share[stop_mode] = 1
das.append(listed)
s = sum(modes_share.values())
for mode in modes_share:
print ('\'',mode,'\':', (modes_share[mode]), ',', end='')
import collections
node_lat_lon = {}
node_dict = {}
with open('AS_node_latlon.csv') as f:
for row in f:
listed = row.strip().split(',')
node_lat_lon[int(listed[0])] = [float(listed[1]), float(listed[2])]
node_wise_dict_of_act = {}
for i in range(len(das)):
if int(das[i][5]) in node_wise_dict_of_act:
node_wise_dict_of_act[int(das[i][5])].append(das[i][4])
else:
node_wise_dict_of_act[int(das[i][5])]= [das[i][4]]
for key in node_wise_dict_of_act:
counter=collections.Counter(node_wise_dict_of_act[key])
if 'Home' not in counter:
counter['Home'] = 0
if 'Shop' not in counter:
counter['Shop'] = 0
if 'Education' not in counter:
counter['Education'] = 0
if 'Other' not in counter:
counter['Other'] = 0
if 'Work' not in counter:
counter['Work'] = 0
node_wise_dict_of_act[key] = counter
with open('output_files/node_wise_city_characeristics.csv','w') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(['lat','lon','Home','Work','Education','Shopping','Other'])
for key in node_wise_dict_of_act:
csvwriter.writerow([node_lat_lon[key][1], node_lat_lon[key][0], node_wise_dict_of_act[key]['Home'], node_wise_dict_of_act[key]['Work'], node_wise_dict_of_act[key]['Education'], node_wise_dict_of_act[key]['Shop'], node_wise_dict_of_act[key]['Other']])
print (time.time() - whole_note_book_start_time, 'seconds')
###Output
_____no_output_____
###Markdown
Import data and tilt seriesIn this example we simulate data from a simple blob phantom for demonstrative purposes. Here we use the axis ordering etc. which is default in astra. The following example gives a more practical overview.
###Code
from skimage.data import binary_blobs
# Simulate 2D 'blob' phantom
blobs = binary_blobs(length=128, n_dim=2, volume_fraction=0.1, seed=1)
# Make specimen thin in third dimension
groundtruth = np.concatenate([np.zeros((128,10,128)), np.tile(blobs.reshape(128,1,128), (1,10,1)),
np.zeros((128,12,128))], axis=1).astype('f4')
# Simulate data with X-ray transform:
# Define tomography geometry
angles, detector = np.linspace(0, np.pi, 30), (128,128)
# create null data consistent with desired geometry
null_data = rtr.tomo_data(np.zeros((detector[0], len(angles), detector[1])), angles, degrees=False)
Xray3 = null_data.getOperator(vol_shape=groundtruth.shape, backend='astra')
volume, proj_geom = Xray3.vshape, Xray3.sshape
raw_data = (Xray3 * groundtruth.ravel()).reshape(proj_geom)
# Normalise data
raw_data -= raw_data.min()
raw_data /= raw_data.max()
# Create custom data object
data = rtr.tomo_data(raw_data, angles, degrees=False)
###Output
_____no_output_____
###Markdown
A more typical example would be to load data from files, as in the code below. This is then cast to a custom data-object, the syntax of which is:- `stack_dim` dimension should be the same length as the number of projections - `tilt_axis` should be the dimension in which the scan is being performed slice-wise.- `degrees` is a flag which allows switching from radiansAfter initialisation, `data` object will have shape equal to `(number of slices, number of projections, width of detector)`
###Code
# from skimage.io import imread
# angles = np.loadtxt('myangles.txt')
# raw_data = imread('mydata.tif').astype('float32')
# data = rtr.tomo_data(raw_data, angles, degrees=True,
# tilt_axis=1, stack_dim=0)
###Output
_____no_output_____
###Markdown
Creating projector for reconstruction`backend` and `GPU` are just default values so they could be left out.Possible backends are `'astra'` or `'skimage'`. The volume shape can be chosen freely although each individual pixel of the detector will be physically 'square'. This allows us to control the thickness of the reconstruction.
###Code
# vol_shape = (n. slices, thickness, width of detector)
vol_shape = (data.shape[0],32,data.shape[2])
projector = data.getOperator(vol_shape=vol_shape, backend='astra', GPU=True)
###Output
_____no_output_____
###Markdown
Various reconstructionsThese algorithms are not well documented but should hopefully be self explanatory. I will try to list the most relevant parameters for each algorithm here. FBP- `filter`: The filter used for inversion. The astra backend supports many filters, `'ram-lak'` is the default, see `help(fbp)` for more details. The skimage backend supports - ramp (default) - shepp-logan - cosine - hamming - hann- `min_val`: The reconstruction will be thresholded at this level after the back-projection- `max_val`: The reconstruction will be thresholded above this level after the back-projection
###Code
fbp = rtr.FBP(filter='Ram-Lak', min_val=0, max_val=None)
fbp_recon = fbp.run(data=data,op=projector)
###Output
_____no_output_____
###Markdown
General iterative methods: SIRTNote it is only available with astra backend- `iterations`: Number of iterations to perform, default is 100- `min_val`: The reconstruction will be thresholded at this level after the back-projection- `max_val`: The reconstruction will be thresholded above this level after the back-projection
###Code
sirt = rtr.SIRT()
sirt_recon = sirt.run(data=data,op=projector,iterations=10,
min_val=0,max_val=None)
###Output
_____no_output_____
###Markdown
SART- `iterations`: Number of iterations to perform, default is 100- `min_val`: The reconstruction will be thresholded at this level after the back-projection- `max_val`: The reconstruction will be thresholded above this level after the back-projection
###Code
sart = rtr.SART()
sart_recon = sart.run(data=data,op=projector,iterations=20,
min_val=0,max_val=None)
###Output
_____no_output_____
###Markdown
WaveletsImplements a variational model of the form $\min_{u} \frac{1}{2}|op\cdot u-data|_2^2 + weight*|Wu|_1$ where $W$ is a wavelet transform.- `weight`: The weight of regularisation, `weight=1` will be over-smoothed and `weight=0` will be noisy.- `wavelet`: Choice of wavelet family, default is `wavelet='haar'`. Other popular choices are `'db1','db2',...` which refer to the Daubechies wavelets.
###Code
alg = rtr.Wavelet(vol_shape, wavelet='db2')
wave_recon = alg.run(data=data, op=projector, maxiter=100, weight=3e-3,
callback=('primal', 'gap', 'step'))[0]
###Output
Iter Time primal gap step
0% 0s 2.291e-02 9.970e-01 0.000e+00
10% 1s 1.780e-03 3.766e-02 1.694e-02
20% 2s 1.710e-03 2.525e-02 6.969e-03
30% 3s 1.706e-03 2.491e-02 3.493e-03
40% 4s 1.706e-03 2.529e-02 8.027e-04
50% 4s 1.706e-03 2.529e-02 3.678e-04
60% 5s 1.706e-03 2.530e-02 3.677e-04
70% 6s 1.706e-03 2.532e-02 2.907e-04
80% 7s 1.706e-03 2.531e-02 2.724e-04
90% 7s 1.706e-03 2.529e-02 1.977e-04
100% 8s 1.706e-03 2.532e-02 2.441e-04
###Markdown
Iterative methods using PDHG algorithmThe following reconstruction algorithms are grouped together because they use the Primal-Dual Hybrid Gradient algorithm and so share many parameters in the `alg.run` method. See `help(rtr.PDHG)` for references. TVImplements a variational model of the form $\min_u \frac{1}{2}|op\cdot u-data|_2^2 + weight*|\nabla u|_1$- `order`: The order of derivative to use, i.e. `order=1` will result in piece-wise constant reconstructions and `order=2` corresponds to piece-wise linear or 'TV2' regularisation - `weight`: The weight of regularisation, `weight=1` will be over-smoothed and `weight=0` will be noisy.
###Code
alg, weight = rtr.TV(vol_shape, order=1), 0.1
###Output
_____no_output_____
###Markdown
TGVImplements a variational model of the form $\min_{u,v} \frac{1}{2}|op\cdot u-data|_2^2 + weight_1*|\nabla u - v|_1 + weight_2*|\nabla v|_1$- `weight`: Similar to before. TGV requires two weights, the first on the first order gradient penalty and the second on the second order gradient. If only one weight is provided then it is assumed that they are equal. Again, a good range is between 0 and 1.
###Code
# alg, weight = rtr.TGV(vol_shape), 0.1
###Output
_____no_output_____
###Markdown
Performing PDHG reconstructionsAll iterative reconstructions are performed using the primal-dual hybrid gradient method. There are two parameters which should not effect the reconstruction but do effect speed of convergence.- `balance`: This is technically the ratio between primal and dual step-sizes. The default is 1, somewhere between 0.1 and 10 is a resonable range. - `steps`: Either `None`, `'backtrack'` (default), or `'adaptive'`. `None` is standard but occassionally produced a chequerboard-like artefact in reconstructions due to some quirks of the astra toolbox. This should be fixed now but is definitely fixed by `'backtrack'`. `'Adaptive'` should find a good value of *balance* automatically, but defining the best value is very subjective. If you're happy in your choice of *balance*, then choosing `None` will be slightly faster than the other two methods.The parameters for tracking and printing convergence metrics are:- `callback`: A list of printouts to show over time, if it's `None` (default) then it will not print anything. - `callback_freq`: How often to print (and compute) the values, default is every 10 iterations. The values I have given below should be plenty to see if the algorithms have converged. - `primal` is the value of the function you are minimising, if it stops going down then you have minimised the function- `gap` and `feasibility` are slightly better measures of how close you are to optimality, if both values are smaller than 1e-4 then the algorithm is probably converged.- `step` (not to be confused with `steps`) measures how much your reconstruction is changing between iterations, this is also a proxy for a measure of the size of the gradient which should be going to 0. Again, if this goes below about 1e-4/1e-5 then the reconstruction has probably converged.
###Code
maxiter = 100
balance = 1
steps = 'adaptive'
recon = alg.run(data=data,op=projector, maxiter=maxiter, weight=weight,
balance=balance, steps=steps,
callback=('primal','gap','violation','step'))[0]
if steps=='adaptive':
print('Optimal balance was: %.3f'% ((alg.s/alg.t)**.5))
###Output
Iter Time primal gap violation step
0% 1s 1.680e+03 1.000e+00 0 0.000e+00
10% 2s 4.114e+02 7.620e-03 1.216e-01 1.861e-01
20% 3s 3.957e+02 6.400e-02 4.881e-02 5.581e-02
30% 5s 3.779e+02 7.105e-02 3.277e-02 2.848e-02
40% 6s 3.541e+02 5.107e-02 2.783e-02 1.652e-02
50% 8s 3.466e+02 4.092e-02 1.451e-02 1.055e-02
60% 9s 3.415e+02 2.906e-02 1.681e-02 1.034e-02
70% 11s 3.394e+02 2.312e-02 9.108e-03 7.953e-03
80% 12s 3.373e+02 2.000e-02 7.648e-03 6.312e-03
90% 13s 3.355e+02 1.816e-02 6.049e-03 5.143e-03
100% 15s 3.344e+02 1.679e-02 6.874e-03 6.422e-03
Optimal balance was: 3.008
###Markdown
Plotting the reconstructionThe returned reconstruction is a standard numpy array and can be plotted or saved with standard tools. These plots are more to give a flavour of which coordinate axes of the reconstruction correspond to the frame of reference of the original data.
###Code
plt.figure('Plots of data/reprojection', figsize=(9.8,3)); params = {'vmin':data.min(), 'vmax':data.max(), 'aspect':'equal'}
plt.subplot(131)
plt.imshow(data[:,abs(angles).argmin()], **params)
plt.title('0-angle data')
plt.subplot(132)
plt.imshow(recon.sum(1), **params)
plt.title('y-sum of recon')
plt.subplot(133)
plt.imshow((projector*recon.reshape(-1)).reshape(data.shape)[:,abs(angles).argmin()], **params)
plt.title('0-angle projection of recon')
plt.show()
###Output
_____no_output_____
###Markdown
Comparison with filtered back-projectionNote that these weights have been chosen assuming there is some noise on the data, so the structures are blurred out a little. The FBP is exact for the sum down the y-axis because it is in the data.
###Code
f,ax = plt.subplots(3,3,gridspec_kw={'height_ratios': [1, 3,3]}, figsize=(9.8,7),num='Slice comparison')
aspect='equal'
for i in range(3):
ax[0,i].set_title(('FBP','Wavelet','TV')[i]+' recon')
ax[i,0].set_ylabel('xyz'[i] + '-sum')
ax[i,0].imshow(wave_recon.sum(i), vmin=0, vmax=1, aspect=aspect)
ax[i,1].imshow(recon.sum(i), vmin=0, vmax=1, aspect=aspect)
ax[i,2].imshow(fbp_recon.sum(i), vmin=0, vmax=1, aspect=aspect)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Saving output arrays
###Code
# recon.astype('float32').tofile('myfile.raw')
###Output
_____no_output_____ |
notebooks/aula_03.ipynb | ###Markdown
**EXERCรCIO: MODELO DE REGRESSรO LINEAR E MรTODO DE VALIDAรรO CRUZADA (K-FOLD)**O dataset utilizado para este exercรญcio refere-se ร dados imobiliรกrios de casas ร venda.
###Code
import pandas as pd
import numpy as np
from sklearn import linear_model as lm
from sklearn import datasets
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split, KFold, cross_val_score, cross_val_predict
#from mlxtend.evaluate import bias_variance_decomp
import matplotlib.pyplot as plt
import matplotlib_venn
###Output
_____no_output_____
###Markdown
Importing a library that is not in ColaboratoryTo import a library that's not in Colaboratory by default, you can use `!pip install` or `!apt-get install`.
###Code
# Instalando bilioteca
#!pip install matplotlib-venn
#!pip install Bias
#!pip isntall mse
#!apt-get -qq install -y libfluidsynth1
###Output
_____no_output_____
###Markdown
**Importando o Dataframe do Google Drive**
###Code
#from google.colab import drive
#drive.mount('/content/drive')
data=pd.read_csv('https://raw.githubusercontent.com/hinessacaminha/mentoring-ml/main/exercicios/regressao/real-estate.csv')
data.head()
#removendo a coluna sem dado
data.drop(columns=['No', 'X1 transaction date'],inplace=True)
data.head()
###Output
_____no_output_____
###Markdown
a. Calcule estatรญsticas bรกsicas usando o describe;
###Code
data.describe()
###Output
_____no_output_____
###Markdown
b. Verifique se hรก dados faltantes. Caso haja, trate-os da forma que achar maisconveniente para o contexto do problema;
###Code
#caso haja dados faltante o mรฉtodo isnull() com o sum() irรก enconctrar e fazer a soma
data.isnull()
data.isnull().sum()
###Output
_____no_output_____
###Markdown
c. Verifique se hรก dados duplicados. Caso haja, trate-os da forma adequada.
###Code
data.duplicated()
###Output
_____no_output_____
###Markdown
d. Calcule a correlaรงรฃo dos atributos utilizando a funรงรฃo **corr do pandas com omรฉtodo de pearson**. O que vocรช observa de acordo com a matriz decorrelaรงรตes? Faรงa um grรกfico de dispersรฃo, e procure indรญcios visuais queconfirmem os valores de correlaรงรฃo encontrados na matriz. ***Essa correlaรงรฃo varia de -1 a 1 sendo:*** * 0.9 a 1 positivo ou negativo indica uma correlaรงรฃo muito forte.* 0.7 a 0.9 positivo ou negativo indica uma correlaรงรฃo forte.* 0.5 a 0.7 positivo ou negativo indica uma correlaรงรฃo moderada.* 0.3 a 0.5 positivo ou negativo indica uma correlaรงรฃo fraca.* 0 a 0.3 positivo ou negativo indica uma correlaรงรฃo desprezรญvel.
###Code
#essa correlaรงรฃo varia de -1 a 1 sendo
data.corr('pearson')
###Output
_____no_output_____
###Markdown
Continuaรงรฃo da questรฃo:
###Code
#grรกfico de dispersรฃo (Hinessa me ajuda a entender no grรกfica essas correlaรงรตes)
plt.scatter(data['X2 house age'], data['Y house price of unit area'])
plt.scatter(data['X3 distance to the nearest MRT station'], data['Y house price of unit area'], color= 'green')
plt.scatter(data['X4 number of convenience stores'], data['Y house price of unit area'], color = 'red')
plt.scatter(data['X5 latitude'], data['Y house price of unit area'], color = 'pink')
plt.scatter(data['X6 longitude'], data['Y house price of unit area'], color = 'orange')
###Output
_____no_output_____
###Markdown
e. Verifique os valores dos dados, e os padronizem na mesma escala. Vocรช podeusar a funรงรฃo MinMaxScaler do sci-kit learn para isso.
###Code
X = data.iloc[:, [2]].copy()
Y = data.iloc[:, [4] ].copy()
data2 = data.values
X = data2[:, :-1]
Y = data2[:, -1]
scaler = MinMaxScaler()
scaler.fit(X, Y)
scaled = scaler.transform(X)
X2 = scaled[:, :-1]
###Output
_____no_output_____
###Markdown
**Criando o modelo**
###Code
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
lr = lm.LinearRegression()
model = lr.fit(x_train, y_train)
predicted = lr.predict(x_test)
mse, bias, var = bias_variance_decomp(lr, x_train, y_train, x_test, y_test, loss='mse')
print(mse, bias, var)
model.score(x_test, y_test)
r2_score(y_test, predicted)
mean_squared_error(y_test, predicted)
plt.scatter(y_test, predicted)
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], color='blue')
plt.ylabel("Prediรงรฃo")
plt.xlabel("Real")
plt.show()
###Output
_____no_output_____ |
notebooks/Importing data from statistical software packages.ipynb | ###Markdown
Importing data from statistical software packagesthere are also other commonly used statistical software packages: SAS, STATA and SPSS. Each of them has their own file format. haven`haven` is an extremely easy-to-use package to import data from three software packages: SAS, STATA and SPSS. Depending on the software, you use different functions:* **SAS:** `read_sas()`* **STATA:** `read_dta()` (or `read_stata()`, which are identical)* **SPSS:** `read_sav()` or `read_por()`, depending on the file type.We'll be working with data on the age, gender, income, and purchase level (0 = low, 1 = high) of 36 individuals (Source: SAS). The information is stored in a SAS file, `sales.sas7bdat`, which is available in your current working directory. You can also download the data [here](http://s3.amazonaws.com/assets.datacamp.com/production/course_1478/datasets/sales.sas7bdat). Import SAS with haven
###Code
library(haven)
sales = read_sas("http://s3.amazonaws.com/assets.datacamp.com/production/course_1478/datasets/sales.sas7bdat")
str(sales)
print(sales)
###Output
# A tibble: 431 x 4
purchase age gender income
<dbl> <dbl> <chr> <chr>
1 0 41 Female Low
2 0 47 Female Low
3 1 41 Female Low
4 1 39 Female Low
5 0 32 Female Low
6 0 32 Female Low
7 0 33 Female Low
8 0 45 Female Low
9 0 43 Female Low
10 0 40 Female Low
# ... with 421 more rows
###Markdown
Import STATA data with havenyou can use `read_dta()`The data is on yearly import and export numbers of sugar, both in USD and in weight. The data can be found at: http://assets.datacamp.com/production/course_1478/datasets/trade.dta
###Code
sugar = read_dta("http://assets.datacamp.com/production/course_1478/datasets/trade.dta")
str(sugar)
sugar$Date = as.Date(as_factor(sugar$Date))
str(sugar)
print(sugar)
sugar
plot(sugar$Import,sugar$Weight_I)
###Output
_____no_output_____
###Markdown
The import figures in USD and the import figures in weight are rather positively correlated.You can spot an increasing trend among the data points. This of course makes sense: the more sugar is traded, the higher the weight that's traded. Import SPSS data with havenThe `haven` package can also import data files from SPSS. Depending on the SPSS data file you're working with, you'll need either `read_sav()` - for **`.sav`** files - or `read_por()` - for **`.por`** files.We will work with data on four of the Big Five personality traits for 434 persons (Source: [University of Bath](http://staff.bath.ac.uk/pssiw/stats2/page16/page16.html)). The Big Five is a psychological concept including, originally, five dimensions of personality to classify human personality. The SPSS dataset is called [person.sav](http://s3.amazonaws.com/assets.datacamp.com/production/course_1478/datasets/person.sav)
###Code
traits = read_sav("http://s3.amazonaws.com/assets.datacamp.com/production/course_1478/datasets/person.sav")
summary(traits)
subset(traits, Agreeableness>40 & Extroversion>40)
###Output
_____no_output_____
###Markdown
The data below involves information on employees and their demographic and economic attributes (Source: [QRiE](http://cehd.gmu.edu/book/dimitrov/spss)). The data can be found on the following URL:http://s3.amazonaws.com/assets.datacamp.com/production/course_1478/datasets/employee.sav
###Code
work = read_sav("http://s3.amazonaws.com/assets.datacamp.com/production/course_1478/datasets/employee.sav")
summary(work)
summary(work$GENDER)
work$GENDER = as_factor(work$GENDER)
summary(work$GENDER)
###Output
_____no_output_____
###Markdown
Importing data on the US presidential elections in the year 2000. The data in `florida.dta` contains the total numbers of votes for each of the four candidates as well as the total number of votes per election area in the state of Florida (Source: [Florida Department of State](http://results.elections.myflorida.com/)). The file can be downloaded [here](http://s3.amazonaws.com/assets.datacamp.com/production/course_1478/datasets/florida.dta)
###Code
florida = read_dta("http://s3.amazonaws.com/assets.datacamp.com/production/course_1478/datasets/florida.dta")
tail(florida)
###Output
_____no_output_____
###Markdown
working with socio-economic variables from different countries (Source: [Quantative Data Analysis in Education](http://cw.routledge.com/textbooks/9780415372985/resources/datasets.asp)). The SPSS data is in a file called `international.sav`, which is in your working directory. You can also download it [here](http://s3.amazonaws.com/assets.datacamp.com/production/course_1478/datasets/international.sav)
###Code
international = read_sav("http://s3.amazonaws.com/assets.datacamp.com/production/course_1478/datasets/international.sav")
boxplot(international$gdp)
cor(international$gdp, international$f_illit)
###Output
_____no_output_____ |
series-dsa/basic-algorithms.ipynb | ###Markdown
Basic AlgorithmsBefore we dive into more complex datastructure and problem solving approaches let's cover a few basic algorithms baked into Python. String Manipulation
###Code
s = "......"
# manipulation function
s.lower()
s.upper()
# tokenisation
s.split(' ')
# substring
start = 0
end = 1
print(s[start:end])
# size
print(len(s))
# strings are immutable and individual character can't be changes
# string can be converted to list of char and back to string after changing
char_arr = list(s)
i = 2
char_arr[i] = 'a'
s = ''.join(char_arr)
s
###Output
.
6
###Markdown
Sortinglist, an iterable of numbers or other complex data structure supports sorting using in-build sort routine.Works for other primitives and complex data structures as well with the help of comparator.
###Code
iterable = [1,2,5,7,0]
# built-in sorted func to sort an iterable
res = sorted(iterable, key=lambda x: x)
print(res)
# sort iterable in place in ascending order
iterable.sort()
iterable
###Output
[0, 1, 2, 5, 7]
###Markdown
sort with comparator function
###Code
import functools
def compare(item1, item2):
return item1 - item2
sorted(iterable, key=functools.cmp_to_key(compare))
###Output
_____no_output_____
###Markdown
Reverse Iterable
###Code
iterable = [1,3,5,6]
# built-in function in-place
iterable.reverse()
print(iterable)
# returns a reversed list, works on strings (str) too
print(iterable[::-1])
s = "abcd"
s[::-1]
###Output
[6, 5, 3, 1]
[1, 3, 5, 6]
###Markdown
Map-Reduce
###Code
# map can be used to apply certain function on every list member
numbers = (1, 2, 3, 4)
result = map(lambda x: x + x, numbers)
# it returns an iterator of type map
print(type(result))
# result can be converted to list before iteraing
print(list(result))
# original list remains intact
print(numbers)
# reduce a list using some function
# importing functools for reduce()
import functools
lis = [ 1 , 3, 5, 6, 2, ]
# sum of list
print (functools.reduce(lambda a,b : a+b,lis))
# max element
print (functools.reduce(lambda a,b : a if a > b else b,lis))
###Output
17
6
|
python/docs/source/examples/search_speech_tagger.ipynb | ###Markdown
Search - Speech Tagger This tutorial walks you through writing learning to search code using the VW python interface. Once you've completed this, you can graduate to the C++ version, which will be faster for the computer but more painful for you.The "learning to search" paradigm solves problems that look like the following. You have a sequence of decisions to make. At the end of making these decisions, the world tells you how bad your decisions were. You want to condition later decisions on earlier decisions. But thankfully, at "training time," you have access to an *oracle* that will tell you the right answer. Let's start with a simple example: sequence labeling for Part of Speech tagging. The goal is to take a sequence of words ("the monster ate a big sandwich") and label them with their parts of speech (in this case: Det Noun Verb Det Adj Noun).We will choose to solve this problem with left-to-right search. I.e., we'll label the first word, then the second then the third and so on.For any vw project in python, we have to start by importing the pyvw library:
###Code
from __future__ import print_function
from vowpalwabbit import pyvw
###Output
_____no_output_____
###Markdown
Now, let's define our data first. We'll do this first by defining the labels (one annoying thing is that labels in vw have to be integers):
###Code
DET = 1
NOUN = 2
VERB = 3
ADJ = 4
my_dataset = [ [(DET , 'the'),
(NOUN, 'monster'),
(VERB, 'ate'),
(DET , 'a'),
(ADJ , 'big'),
(NOUN, 'sandwich')],
[(DET , 'the'),
(NOUN, 'sandwich'),
(VERB, 'was'),
(ADJ , 'tasty')],
[(NOUN, 'it'),
(VERB, 'ate'),
(NOUN, 'it'),
(ADJ , 'all')] ]
print(my_dataset[2])
###Output
_____no_output_____
###Markdown
Here we have an example of a (correctly) tagged sentence.Now, we need to write the structured prediction code. To do this, we have to write a new class that derives from the `pyvw.SearchTask` class.This class *must* have two functions: `__init__` and `_run`.The initialization function takes three arguments (plus `self`): a vw object (`vw`), a search object (`sch`), and the number of actions (`num_actions`) that this object has been initialized with. Within the initialization function, we must first initialize the parent class, and then we can set whatever options we want via `sch.set_options(...)`. Of course we can also do whatever additional initialization we like.The `_run` function executes the sequence of decisions on a given input. The input will be of whatever type our data is (so, in the above example, it will be a list of (label,word) pairs).Here is a basic implementation of sequence labeling:
###Code
class SequenceLabeler(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
# you must must must initialize the parent class
# this will automatically store self.sch <- sch, self.vw <- vw
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
# set whatever options you want
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.AUTO_CONDITION_FEATURES )
def _run(self, sentence): # it's called _run to remind you that you shouldn't call it directly!
output = []
for n in range(len(sentence)):
pos,word = sentence[n]
# use "with...as..." to guarantee that the example is finished properly
ex = self.vw.example({'w': [word]})
pred = self.sch.predict(examples=ex, my_tag=n+1, oracle=pos, condition=[(n,'p'), (n-1, 'q')])
self.vw.finish_example([ex]) # must pass the example in as a list because search is a MultiEx reduction
output.append(pred)
return output
###Output
_____no_output_____
###Markdown
Let's unpack this a bit.The `__init__` function is simple. It first calls the parent initializer and then sets some options. The options it sets are two things designed to make the programmer's life easier. The first is `AUTO_HAMMING_LOSS`. Remember earlier we said that when the sequence of decision is made, you have to say how bad it was? This says that we want this to be computed automatically by comparing the individual decisions to the oracle decisions, and defining the loss to be the sum of incorrect decisions.The second is `AUTO_CONDITION_FEATURES`. This is a bit subtler. Later in the `_run` function, we will say that the label of the `n`th word depends on the label of the `n-1`th word. In order to get the underlying classifier to *pay attention* to that conditioning, we need to add features. We could do that manually (we'll do this later) or we can ask vw to do it automatically for us. For simplicity, we choose the latter.The `_run` function takes a sentence (list of pos/word pairs) as input. We loop over each word position `n` in the sentence and extract the `pos,word` pair. We then construct a VW example that consists of a single feature (the `word`) in the 'w' namespace. Given that example `ex`, we make a search-based prediction by calling `self.sch.predict(...)`. This is a fairly complicated function that takes a number of arguments. Here, we are calling it with the following: - `examples=ex`: This tells the predictor what features to use. - `my_tag=n+1`: In general, we want to condition the prediction of the `n`th label on the `n-1`th label. In order to do this, we have to give each prediction a "name" so that we can refer back to it in the future. This name needs to be an integer `>= 1`. So we'll call the first word `1`, the second word `2`, and so on. It has to be `n+1` and not `n` because of the 1-based requirement. - `oracle=pos`: As mentioned before, on training data, we need to tell the system what the "true" (or "best") decision is at this point in time. Here, it is the given part of speech label. - `condition=(n,'p')`: This says that this prediction depends on the output of whichever-prediction-was-called-`n`, and that the "nature" of that condition is called 'p' (for "predecessor" in this case, though this is entirely up to you)Now, we're ready to train the model. We do this in three steps. First, we initialize a vw object, telling it that we have a `--search` task with 4 labels, second that the specific type of `--search_task` is `hook` (you will always use the `hook` task) and finally that we want it to be quiet and use a larger `ring_size` (you can ignore the `ring_size` for now).
###Code
vw = pyvw.vw("--search 4 --audit --quiet --search_task hook --ring_size 1024")
###Output
_____no_output_____
###Markdown
Next, we need to initialize the search task. We use the `vw.init_search_task` function to do this:
###Code
sequenceLabeler = vw.init_search_task(SequenceLabeler)
###Output
_____no_output_____
###Markdown
Finally, we can train on the dataset we defined earlier, using `sequenceLabeler.learn` (the `.learn` function is inherited from the `pyvw.SearchTask` class). The `.learn` function takes any iterator over data. Whatever type of data it iterates over is what it will feed to your `_run` function.
###Code
for i in range(10):
sequenceLabeler.learn(my_dataset)
###Output
_____no_output_____
###Markdown
Of course, we want to see if it's learned anything. So let's create a single test example:
###Code
test_example = [ (1,w) for w in "the sandwich ate a monster".split() ]
print(test_example)
###Output
_____no_output_____
###Markdown
We've used `0` as the labels so you can be sure that vw isn't cheating and it's actually making predictions:
###Code
out = sequenceLabeler.predict(test_example)
print(out)
###Output
_____no_output_____
###Markdown
If we look back at our POS tag definitions, this is DET NOUN VERB DET NOUN, which is indeed correct! Removing the AUTO features In the above example we used both AUTO_HAMMING_LOSS and AUTO_CONDITION_FEATURES. To make more explicit what these are doing, let's rewrite our `SequenceLabeler` class without them! Here's a version that gets rid of both simultaneously. It is only modestly more complex:
###Code
class SequenceLabeler2(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
def _run(self, sentence):
output = []
loss = 0.
for n in range(len(sentence)):
pos,word = sentence[n]
prevPred = output[n-1] if n > 0 else '<s>'
ex = self.vw.example({'w': [word], 'p': [prevPred]})
pred = self.sch.predict(examples=ex, my_tag=n+1, oracle=pos, condition=(n,'p'))
vw.finish_example([ex])
output.append(pred)
if pred != pos:
loss += 1.
self.sch.loss(loss)
return output
sequenceLabeler2 = vw.init_search_task(SequenceLabeler2)
sequenceLabeler2.learn(my_dataset)
print(sequenceLabeler2.predict( [(1,w) for w in "the sandwich ate a monster".split()] ))
###Output
_____no_output_____
###Markdown
If executed correctly, this should have printed `[1, 2, 3, 1, 2]`.There are essentially two things that changed here. In order to get rid of AUTO_HAMMING_LOSS, we had to keep track of how many errors the predictor had made. This is done by checking whether `pred != pos` inside the inner loop, and then at the end calling `self.sch.loss(loss)` to tell the search procedure how well we did.In order to get rid of AUTO_CONDITION_FEATURES, we need to explicitly add the previous prediction as features to the example we are predicting with. Here, we've done this by extracting the previous prediction (`prevPred`) and explicitly adding it as a feature (in the 'p' namespace) during the example construction.**Important Note:** even though we're not using AUTO_CONDITION_FEATURES, we *still* must tell the search process that this prediction depends on the previous prediction. We need to do this because the learning algorithm automatically memoizes certain computations, and so it needs to know that, when memoizing, to remember that this prediction *might have been different* if a previous decision were different. Very silly Covington-esqu dependency parsing Let's also try a variant of dependency parsing to see that this doesn't work just for sequence-labeling list tasks. First we need to define some data:
###Code
# the label for each word is its parent, or -1 for root
my_dataset = [ [("the", 1), # 0
("monster", 2), # 1
("ate", -1), # 2
("a", 5), # 3
("big", 5), # 4
("sandwich", 2) ] # 5
,
[("the", 1), # 0
("sandwich", 2), # 1
("is", -1), # 2
("tasty", 2)] # 3
,
[("a", 1), # 0
("sandwich", 2), # 1
("ate", -1), # 2
("itself", 2), # 3
]
]
###Output
_____no_output_____
###Markdown
For instance, in the first sentence, the parent of "the" is "monster"; the parent of "monster" is "ate"; and "ate" is the root.The basic idea of a Covington-style dependency parser is to loop over all O(N^2) word pairs and ask if one is the parent of the other. In a real parser you would want to make sure that you don't have cycles, that you have a unique root and (perhaps) that the resulting graph is projective. I'm not doing that here. Hopefully I'll add a shift-reduce parser example later that *does* do this. Here's an implementation of this idea:
###Code
class CovingtonDepParser(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.AUTO_CONDITION_FEATURES )
def _run(self, sentence):
N = len(sentence)
# initialize our output so everything is a root
output = [-1 for i in range(N)]
for n in range(N):
wordN,parN = sentence[n]
for m in range(-1,N):
if m == n: continue
wordM = sentence[m][0] if m > 0 else "*root*"
# ask the question: is m the parent of n?
isParent = 2 if m == parN else 1
# construct an example
dir = 'l' if m < n else 'r'
ex = self.vw.example({'a': [wordN, dir + '_' + wordN],
'b': [wordM, dir + '_' + wordN],
'p': [wordN + '_' + wordM, dir + '_' + wordN + '_' + wordM],
'd': [ str(m-n <= d) + '<=' + str(d) for d in [-8, -4, -2, -1, 1, 2, 4, 8] ] +
[ str(m-n >= d) + '>=' + str(d) for d in [-8, -4, -2, -1, 1, 2, 4, 8] ] })
pred = self.sch.predict(examples = ex,
my_tag = (m+1)*N + n + 1,
oracle = isParent,
condition = [ (max(0, (m )*N + n + 1), 'p'),
(max(0, (m+1)*N + n ), 'q') ])
self.vw.finish_example([ex]) # must pass the example in as a list because search is a MultiEx reduction
if pred == 2:
output[n] = m
break
return output
###Output
_____no_output_____
###Markdown
In this, `output` stores the predicted tree and is initialized with every word being a root. We then loop over every word (`n`) and every possible parent (`m`, which can be -1, though that's really kind of unnecessary).The features are basically the words under consideration, the words paired with the direction of the edge, the pair of words, and then a bunch of (binned) distance features.We can train and run this parser with:
###Code
vw = pyvw.vw("--search 2 --quiet --search_task hook --ring_size 1024")
task = vw.init_search_task(CovingtonDepParser)
for p in range(10): # do ten passes over the training data
task.learn(my_dataset)
print('testing')
print(task.predict( [(w,-1) for w in "the monster ate a sandwich".split()] ))
print('should have printed [ 1 2 -1 4 2 ]')
###Output
_____no_output_____
###Markdown
One could argue that a more natural way to do this would be with LDF rather than the inner loop over `m`. We'll do that next. LDF-based Covington-style dependency parser One of the weirdnesses in the previous parser implementation is that it makes N-many binary decisions per word ("is word n my parent?") rather than a single N-way decision. The latter makes more sense.The challenge is that you cannot set this up as a vanilla multiclass classification problem because (a) the number of "classes" is a function of the input (a length N sentence will have N classes) and (b) class "1" and "2" don't mean anything consistently across examples.The way around this is label-dependent features (LDF). In LDF mode, the class ids are (essentially -- see caveat below) irrelevant. Instead, you simply provide features that depend on the label (hence "LDF"). In particular, for each possible label, you provide a *different* feature vector, and the goal of learning is to pick one of those as the "correct" one.Here's a re-implementation of Covington using LDF:
###Code
class CovingtonDepParserLDF(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.IS_LDF | sch.AUTO_CONDITION_FEATURES )
def makeExample(self, sentence, n, m):
wordN = sentence[n][0]
wordM = sentence[m][0] if m >= 0 else '*ROOT*'
dir = 'l' if m < n else 'r'
ex = self.vw.example( { 'a': [wordN, dir + '_' + wordN],
'b': [wordM, dir + '_' + wordM],
'p': [wordN + '_' + wordM, dir + '_' + wordN + '_' + wordM],
'd': [ str(m-n <= d) + '<=' + str(d) for d in [-8, -4, -2, -1, 1, 2, 4, 8] ] +
[ str(m-n >= d) + '>=' + str(d) for d in [-8, -4, -2, -1, 1, 2, 4, 8] ] },
labelType=self.vw.lCostSensitive)
ex.set_label_string(str(m+2) + ":0") # project the m-indices (-1...N into 1...N+2)
return ex
def _run(self, sentence):
N = len(sentence)
# initialize our output so everything is a root
output = [-1 for i in range(N)]
for n in range(N):
# make LDF examples
examples = [ self.makeExample(sentence,n,m) for m in range(-1,N) if n != m ]
# truth
parN = sentence[n][1]
oracle = parN+1 if parN < n else parN # have to -1 because we excluded n==m from list
# make a prediction
pred = self.sch.predict(examples = examples,
my_tag = n+1,
oracle = oracle,
condition = [ (n, 'p'), (n-1, 'q') ] )
output[n] = pred-1 if pred < n else pred # have to +1 because n==m excluded
for ex in examples: ex.finish() # clean up
return output
###Output
_____no_output_____
###Markdown
There are a few things going on here. Let's focus first on the `__init__` function. The only difference here is that when we call `sch.set_options` we provide `sch.IS_LDF` to declare that this is an LDF task.Let's skip the `makeExample` function for a minute and look at the `_run` function. You should recognize most of this from the non-LDF version. We initialize the `output` (parent) of every word to `-1` (meaning that every word is connected to the root).For each word `n`, we construct `N`-many examples: one for every -1..(N-1), except for the current word `n` because you cannot have self-loops. If we were being more clever, we would only do the ones that won't result in the creation of a cycle, but we're not being clever.Now, because the "labels" are just examples, it's a bit more complicated to specify the oracle. The oracle is an *index* into the examples list. So if `oracle` is the oracle action, then `examples[oracle]` is the corresponding example. We compute the oracle as follows. `parN` is the *actual* parent, which is going to be something in the range `-1 .. (N-1)`. If `parN n` (note: it cannot be equal to `n`) then, beacuse `n == m` is left out of the examples list, the correct index is just `parN`. Phew.We then ask for a prediction. Now, instead of giving a single example, with give the list of examples. The tag works the same way, as does the conditioning.Once we get a prediction out (called `pred`) we need to figure out what parent it actually corresponds to. This is simply un-doing the computaiton two paragraphs ago!Finally -- and this is skippable if you trust the Python garbage collector -- we tell VW that we're done with all the examples we created. We do this just to be pedantic; it's optional.Okay, now let's go back to the `makeExample` function. This takes two word ids (`n` and `m`) and makes an example that roughly says "what would it look like if I had an edge from `n` to `m`?" We construct basically the same feautres as before. There are two major changes, though:1. When we run `self.vw.example(...)` we provide `labelType=self.vw.lCostSensitive` as an argument. This is because under the hood, vw treats LDF examples as cost-sensitive classification examples. This means they need to have cost-sensitive labels, so that's how we need to create them.1. We explicitly set the label of the this example to `str(m+2)+":0"`. What is this? Well, this is _optional_ but recommended. Here's the issue. In LDF mode, recall that labels have no intrinsic meaning. This means that when vw does auto-conditioning, it's not really clear what to use as the "previous prediction." By giving explicit label names (in this case, m+2) we're recording what the position of the last parent, which may be useful for predicting the next parent. We could avoid this necessity if we did our own feature engineering on the history, but for now, this seems to capture the right intuition.Given all this, we can now train and test our parser:
###Code
vw = pyvw.vw("--search 0 --csoaa_ldf m --search_task hook --ring_size 1024 --quiet")
task = vw.init_search_task(CovingtonDepParserLDF)
#BUG: This currently does not work because oracle generation is incorrect (generates invalid oracle values)#for p in range(2): # do two passes over the training data
# task.learn(my_dataset)
#print(task.predict( [(w,-1) for w in "the monster ate a sandwich".split()] ))
###Output
_____no_output_____
###Markdown
The correct parse of this sentence is `[1, 2, -1, 4, 2]` which is what this should have printed.There are two major things to notice in the initialization of VW. The first is that we say `--search 0`. The zero labels argument to `--search` declares that this is going to be an LDF task. We also have to tell VW that we want an LDF-enabled cost-sensitive learner, which is what `--csoaa_ldf m` does (if you're wondering, `m` means "multiline" -- just treat it as something you have to do). The rest should be familiar. A simple word-alignment model Okay, as a last example we'll do a simple word alignment model in the spirit of the IBM models. Note that this will be a *supervised* model; doing unsupervised stuff is a bit trickier.Here's some word alignment data. The dataset is triples of `E, A, F` where `A[i]` = list of words `E[i]` aligned to, or `[]` for null-aligned:
###Code
my_dataset = [
( "the blue house".split(),
([0], [2], [1]),
"la maison bleue".split() ),
( "the house".split(),
([0], [1]),
"la maison".split() ),
( "the flower".split(),
([0], [1]),
"la fleur".split() )
]
###Output
_____no_output_____
###Markdown
It's going to be useful to compute alignment mismatches at the word level between true alignments (like `[1,2]`) and predicted alignments (like `[2,3,4]`). We use intersection-over-union error:
###Code
def alignmentError(true, sys):
t = set(true)
s = set(sys)
if len(t | s) == 0: return 0.
return 1. - float(len(t & s)) / float(len(t | s))
###Output
_____no_output_____
###Markdown
And now we can define our structured prediction task. This is also an LDF problem. Basically for each word on the English side, we'll loop over all possible phrases on the Foreign side to which it could align (maximum phrase length of three). For each of these options we'll create an example to be fed into the LDF classifier. We also ensure that the same foreign word cannot be covered by multiple English words, though this might not be a good idea in general.
###Code
class WordAligner(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.IS_LDF | sch.AUTO_CONDITION_FEATURES )
def makeExample(self, E, F, i, j0, l):
f = 'Null' if j0 is None else [ F[j0+k] for k in range(l+1) ]
ex = self.vw.example( { 'e': E[i],
'f': f,
'p': '_'.join(f),
'l': str(l),
'o': [str(i-j0), str(i-j0-l)] if j0 is not None else [] },
labelType = self.vw.lCostSensitive )
lab = 'Null' if j0 is None else str(j0+l)
ex.set_label_string(lab + ':0')
return ex
def _run(self, alignedSentence):
E,A,F = alignedSentence
# for each E word, we pick a F span
covered = {} # which F words have been covered so far?
output = []
for i in range(len(E)):
examples = [] # contains vw examples
spans = [] # contains triples (alignment error, index in examples, [range])
# empty span:
examples.append( self.makeExample(E, F, i, None, None) )
spans.append( (alignmentError(A[i], []), 0, []) )
# non-empty spans
for j0 in range(len(F)):
for l in range(3): # max phrase length of 3
if j0+l >= len(F): break
if covered.has_key(j0+l): break
id = len(examples)
examples.append( self.makeExample(E, F, i, j0, l) )
spans.append( (alignmentError(A[i], range(j0,j0+l+1)), id, range(j0,j0+l+1)) )
sortedSpans = []
for s in spans: sortedSpans.append(s)
sortedSpans.sort()
oracle = []
for id in range(len(sortedSpans)):
if sortedSpans[id][0] > sortedSpans[0][0]: break
oracle.append( sortedSpans[id][1] )
pred = self.sch.predict(examples = examples,
my_tag = i+1,
oracle = oracle,
condition = [ (i, 'p'), (i-1, 'q') ] )
for ex in examples: ex.finish()
output.append( spans[pred][2] )
for j in spans[pred][2]:
covered[j] = True
return output
###Output
_____no_output_____
###Markdown
The only really complicated thing here is computing the oracle. What we do is, for each possible alignment, compute an intersection-over-union error rate. The oracle is then that alignment that achieves the smallest (local) error rate. This is not perfect, but is good enough. One interesting thing here is that now the `oracle` could be a *list*; this is completely supported by the underlying algorithms.We can train and test this model to make sure it does the right thing:
###Code
vw = pyvw.vw("--search 0 --csoaa_ldf m --search_task hook --ring_size 1024 --quiet -q ef -q ep")
task = vw.init_search_task(WordAligner)
# BUG: This is currently broken due to incorrect oracle generation. Currently under investigation.#for p in range(10):
# task.learn(my_dataset)
#print(task.predict( ("the blue flower".split(), ([],[],[]), "la fleur bleue".split()) ))
###Output
_____no_output_____
###Markdown
Search - Speech Tagger This tutorial walks you through writing learning to search code using the VW python interface. Once you've completed this, you can graduate to the C++ version, which will be faster for the computer but more painful for you.The "learning to search" paradigm solves problems that look like the following. You have a sequence of decisions to make. At the end of making these decisions, the world tells you how bad your decisions were. You want to condition later decisions on earlier decisions. But thankfully, at "training time," you have access to an *oracle* that will tell you the right answer. Let's start with a simple example: sequence labeling for Part of Speech tagging. The goal is to take a sequence of words ("the monster ate a big sandwich") and label them with their parts of speech (in this case: Det Noun Verb Det Adj Noun).We will choose to solve this problem with left-to-right search. I.e., we'll label the first word, then the second then the third and so on.For any vw project in python, we have to start by importing the pyvw library:
###Code
from __future__ import print_function
from vowpalwabbit import pyvw
###Output
_____no_output_____
###Markdown
Now, let's define our data first. We'll do this first by defining the labels (one annoying thing is that labels in vw have to be integers):
###Code
DET = 1
NOUN = 2
VERB = 3
ADJ = 4
my_dataset = [ [(DET , 'the'),
(NOUN, 'monster'),
(VERB, 'ate'),
(DET , 'a'),
(ADJ , 'big'),
(NOUN, 'sandwich')],
[(DET , 'the'),
(NOUN, 'sandwich'),
(VERB, 'was'),
(ADJ , 'tasty')],
[(NOUN, 'it'),
(VERB, 'ate'),
(NOUN, 'it'),
(ADJ , 'all')] ]
print(my_dataset[2])
###Output
_____no_output_____
###Markdown
Here we have an example of a (correctly) tagged sentence.Now, we need to write the structured prediction code. To do this, we have to write a new class that derives from the `pyvw.SearchTask` class.This class *must* have two functions: `__init__` and `_run`.The initialization function takes three arguments (plus `self`): a vw object (`vw`), a search object (`sch`), and the number of actions (`num_actions`) that this object has been initialized with. Within the initialization function, we must first initialize the parent class, and then we can set whatever options we want via `sch.set_options(...)`. Of course we can also do whatever additional initialization we like.The `_run` function executes the sequence of decisions on a given input. The input will be of whatever type our data is (so, in the above example, it will be a list of (label,word) pairs).Here is a basic implementation of sequence labeling:
###Code
class SequenceLabeler(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
# you must must must initialize the parent class
# this will automatically store self.sch <- sch, self.vw <- vw
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
# set whatever options you want
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.AUTO_CONDITION_FEATURES )
def _run(self, sentence): # it's called _run to remind you that you shouldn't call it directly!
output = []
for n in range(len(sentence)):
pos,word = sentence[n]
# use "with...as..." to guarantee that the example is finished properly
ex = self.vw.example({'w': [word]})
pred = self.sch.predict(examples=ex, my_tag=n+1, oracle=pos, condition=[(n,'p'), (n-1, 'q')])
self.vw.finish_example([ex]) # must pass the example in as a list because search is a MultiEx reduction
output.append(pred)
return output
###Output
_____no_output_____
###Markdown
Let's unpack this a bit.The `__init__` function is simple. It first calls the parent initializer and then sets some options. The options it sets are two things designed to make the programmer's life easier. The first is `AUTO_HAMMING_LOSS`. Remember earlier we said that when the sequence of decision is made, you have to say how bad it was? This says that we want this to be computed automatically by comparing the individual decisions to the oracle decisions, and defining the loss to be the sum of incorrect decisions.The second is `AUTO_CONDITION_FEATURES`. This is a bit subtler. Later in the `_run` function, we will say that the label of the `n`th word depends on the label of the `n-1`th word. In order to get the underlying classifier to *pay attention* to that conditioning, we need to add features. We could do that manually (we'll do this later) or we can ask vw to do it automatically for us. For simplicity, we choose the latter.The `_run` function takes a sentence (list of pos/word pairs) as input. We loop over each word position `n` in the sentence and extract the `pos,word` pair. We then construct a VW example that consists of a single feature (the `word`) in the 'w' namespace. Given that example `ex`, we make a search-based prediction by calling `self.sch.predict(...)`. This is a fairly complicated function that takes a number of arguments. Here, we are calling it with the following: - `examples=ex`: This tells the predictor what features to use. - `my_tag=n+1`: In general, we want to condition the prediction of the `n`th label on the `n-1`th label. In order to do this, we have to give each prediction a "name" so that we can refer back to it in the future. This name needs to be an integer `>= 1`. So we'll call the first word `1`, the second word `2`, and so on. It has to be `n+1` and not `n` because of the 1-based requirement. - `oracle=pos`: As mentioned before, on training data, we need to tell the system what the "true" (or "best") decision is at this point in time. Here, it is the given part of speech label. - `condition=(n,'p')`: This says that this prediction depends on the output of whichever-prediction-was-called-`n`, and that the "nature" of that condition is called 'p' (for "predecessor" in this case, though this is entirely up to you)Now, we're ready to train the model. We do this in three steps. First, we initialize a vw object, telling it that we have a `--search` task with 4 labels, second that the specific type of `--search_task` is `hook` (you will always use the `hook` task) and finally that we want it to be quiet and use a larger `ring_size` (you can ignore the `ring_size` for now).
###Code
vw = pyvw.vw("--search 4 --audit --quiet --search_task hook --ring_size 1024")
###Output
_____no_output_____
###Markdown
Next, we need to initialize the search task. We use the `vw.init_search_task` function to do this:
###Code
sequenceLabeler = vw.init_search_task(SequenceLabeler)
###Output
_____no_output_____
###Markdown
Finally, we can train on the dataset we defined earlier, using `sequenceLabeler.learn` (the `.learn` function is inherited from the `pyvw.SearchTask` class). The `.learn` function takes any iterator over data. Whatever type of data it iterates over is what it will feed to your `_run` function.
###Code
for i in range(10):
sequenceLabeler.learn(my_dataset)
###Output
_____no_output_____
###Markdown
Of course, we want to see if it's learned anything. So let's create a single test example:
###Code
test_example = [ (1,w) for w in "the sandwich ate a monster".split() ]
print(test_example)
###Output
_____no_output_____
###Markdown
We've used `0` as the labels so you can be sure that vw isn't cheating and it's actually making predictions:
###Code
out = sequenceLabeler.predict(test_example)
print(out)
###Output
_____no_output_____
###Markdown
If we look back at our POS tag definitions, this is DET NOUN VERB DET NOUN, which is indeed correct! Removing the AUTO features In the above example we used both AUTO_HAMMING_LOSS and AUTO_CONDITION_FEATURES. To make more explicit what these are doing, let's rewrite our `SequenceLabeler` class without them! Here's a version that gets rid of both simultaneously. It is only modestly more complex:
###Code
class SequenceLabeler2(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
def _run(self, sentence):
output = []
loss = 0.
for n in range(len(sentence)):
pos,word = sentence[n]
prevPred = output[n-1] if n > 0 else '<s>'
ex = self.vw.example({'w': [word], 'p': [prevPred]})
pred = self.sch.predict(examples=ex, my_tag=n+1, oracle=pos, condition=(n,'p'))
vw.finish_example([ex])
output.append(pred)
if pred != pos:
loss += 1.
self.sch.loss(loss)
return output
sequenceLabeler2 = vw.init_search_task(SequenceLabeler2)
sequenceLabeler2.learn(my_dataset)
print(sequenceLabeler2.predict( [(1,w) for w in "the sandwich ate a monster".split()] ))
###Output
_____no_output_____
###Markdown
If executed correctly, this should have printed `[1, 2, 3, 1, 2]`.There are essentially two things that changed here. In order to get rid of AUTO_HAMMING_LOSS, we had to keep track of how many errors the predictor had made. This is done by checking whether `pred != pos` inside the inner loop, and then at the end calling `self.sch.loss(loss)` to tell the search procedure how well we did.In order to get rid of AUTO_CONDITION_FEATURES, we need to explicitly add the previous prediction as features to the example we are predicting with. Here, we've done this by extracting the previous prediction (`prevPred`) and explicitly adding it as a feature (in the 'p' namespace) during the example construction.**Important Note:** even though we're not using AUTO_CONDITION_FEATURES, we *still* must tell the search process that this prediction depends on the previous prediction. We need to do this because the learning algorithm automatically memoizes certain computations, and so it needs to know that, when memoizing, to remember that this prediction *might have been different* if a previous decision were different. Very silly Covington-esqu dependency parsing Let's also try a variant of dependency parsing to see that this doesn't work just for sequence-labeling list tasks. First we need to define some data:
###Code
# the label for each word is its parent, or -1 for root
my_dataset = [ [("the", 1), # 0
("monster", 2), # 1
("ate", -1), # 2
("a", 5), # 3
("big", 5), # 4
("sandwich", 2) ] # 5
,
[("the", 1), # 0
("sandwich", 2), # 1
("is", -1), # 2
("tasty", 2)] # 3
,
[("a", 1), # 0
("sandwich", 2), # 1
("ate", -1), # 2
("itself", 2), # 3
]
]
###Output
_____no_output_____
###Markdown
For instance, in the first sentence, the parent of "the" is "monster"; the parent of "monster" is "ate"; and "ate" is the root.The basic idea of a Covington-style dependency parser is to loop over all O(N^2) word pairs and ask if one is the parent of the other. In a real parser you would want to make sure that you don't have cycles, that you have a unique root and (perhaps) that the resulting graph is projective. I'm not doing that here. Hopefully I'll add a shift-reduce parser example later that *does* do this. Here's an implementation of this idea:
###Code
class CovingtonDepParser(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.AUTO_CONDITION_FEATURES )
def _run(self, sentence):
N = len(sentence)
# initialize our output so everything is a root
output = [-1 for i in range(N)]
for n in range(N):
wordN,parN = sentence[n]
for m in range(-1,N):
if m == n: continue
wordM = sentence[m][0] if m > 0 else "*root*"
# ask the question: is m the parent of n?
isParent = 2 if m == parN else 1
# construct an example
dir = 'l' if m < n else 'r'
ex = self.vw.example({'a': [wordN, dir + '_' + wordN],
'b': [wordM, dir + '_' + wordN],
'p': [wordN + '_' + wordM, dir + '_' + wordN + '_' + wordM],
'd': [ str(m-n <= d) + '<=' + str(d) for d in [-8, -4, -2, -1, 1, 2, 4, 8] ] +
[ str(m-n >= d) + '>=' + str(d) for d in [-8, -4, -2, -1, 1, 2, 4, 8] ] })
pred = self.sch.predict(examples = ex,
my_tag = (m+1)*N + n + 1,
oracle = isParent,
condition = [ (max(0, (m )*N + n + 1), 'p'),
(max(0, (m+1)*N + n ), 'q') ])
self.vw.finish_example([ex]) # must pass the example in as a list because search is a MultiEx reduction
if pred == 2:
output[n] = m
break
return output
###Output
_____no_output_____
###Markdown
In this, `output` stores the predicted tree and is initialized with every word being a root. We then loop over every word (`n`) and every possible parent (`m`, which can be -1, though that's really kind of unnecessary).The features are basically the words under consideration, the words paired with the direction of the edge, the pair of words, and then a bunch of (binned) distance features.We can train and run this parser with:
###Code
vw = pyvw.vw("--search 2 --quiet --search_task hook --ring_size 1024")
task = vw.init_search_task(CovingtonDepParser)
for p in range(10): # do ten passes over the training data
task.learn(my_dataset)
print('testing')
print(task.predict( [(w,-1) for w in "the monster ate a sandwich".split()] ))
print('should have printed [ 1 2 -1 4 2 ]')
###Output
_____no_output_____
###Markdown
One could argue that a more natural way to do this would be with LDF rather than the inner loop over `m`. We'll do that next. LDF-based Covington-style dependency parser One of the weirdnesses in the previous parser implementation is that it makes N-many binary decisions per word ("is word n my parent?") rather than a single N-way decision. The latter makes more sense.The challenge is that you cannot set this up as a vanilla multiclass classification problem because (a) the number of "classes" is a function of the input (a length N sentence will have N classes) and (b) class "1" and "2" don't mean anything consistently across examples.The way around this is label-dependent features (LDF). In LDF mode, the class ids are (essentially -- see caveat below) irrelevant. Instead, you simply provide features that depend on the label (hence "LDF"). In particular, for each possible label, you provide a *different* feature vector, and the goal of learning is to pick one of those as the "correct" one.Here's a re-implementation of Covington using LDF:
###Code
class CovingtonDepParserLDF(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.IS_LDF | sch.AUTO_CONDITION_FEATURES )
def makeExample(self, sentence, n, m):
wordN = sentence[n][0]
wordM = sentence[m][0] if m >= 0 else '*ROOT*'
dir = 'l' if m < n else 'r'
ex = self.vw.example( { 'a': [wordN, dir + '_' + wordN],
'b': [wordM, dir + '_' + wordM],
'p': [wordN + '_' + wordM, dir + '_' + wordN + '_' + wordM],
'd': [ str(m-n <= d) + '<=' + str(d) for d in [-8, -4, -2, -1, 1, 2, 4, 8] ] +
[ str(m-n >= d) + '>=' + str(d) for d in [-8, -4, -2, -1, 1, 2, 4, 8] ] },
labelType=self.vw.lCostSensitive)
ex.set_label_string(str(m+2) + ":0") # project the m-indicies (-1...N into 1...N+2)
return ex
def _run(self, sentence):
N = len(sentence)
# initialize our output so everything is a root
output = [-1 for i in range(N)]
for n in range(N):
# make LDF examples
examples = [ self.makeExample(sentence,n,m) for m in range(-1,N) if n != m ]
# truth
parN = sentence[n][1]
oracle = parN+1 if parN < n else parN # have to -1 because we excluded n==m from list
# make a prediction
pred = self.sch.predict(examples = examples,
my_tag = n+1,
oracle = oracle,
condition = [ (n, 'p'), (n-1, 'q') ] )
output[n] = pred-1 if pred < n else pred # have to +1 because n==m excluded
for ex in examples: ex.finish() # clean up
return output
###Output
_____no_output_____
###Markdown
There are a few things going on here. Let's focus first on the `__init__` function. The only difference here is that when we call `sch.set_options` we provide `sch.IS_LDF` to declare that this is an LDF task.Let's skip the `makeExample` function for a minute and look at the `_run` function. You should recognize most of this from the non-LDF version. We initialize the `output` (parent) of every word to `-1` (meaning that every word is connected to the root).For each word `n`, we construct `N`-many examples: one for every -1..(N-1), except for the current word `n` because you cannot have self-loops. If we were being more clever, we would only do the ones that won't result in the creation of a cycle, but we're not being clever.Now, because the "labels" are just examples, it's a bit more complicated to specify the oracle. The oracle is an *index* into the examples list. So if `oracle` is the oracle action, then `examples[oracle]` is the corresponding example. We compute the oracle as follows. `parN` is the *actual* parent, which is going to be something in the range `-1 .. (N-1)`. If `parN n` (note: it cannot be equal to `n`) then, beacuse `n == m` is left out of the examples list, the correct index is just `parN`. Phew.We then ask for a prediction. Now, instead of giving a single example, with give the list of examples. The tag works the same way, as does the conditioning.Once we get a prediction out (called `pred`) we need to figure out what parent it actually corresponds to. This is simply un-doing the computaiton two paragraphs ago!Finally -- and this is skippable if you trust the Python garbage collector -- we tell VW that we're done with all the examples we created. We do this just to be pedantic; it's optional.Okay, now let's go back to the `makeExample` function. This takes two word ids (`n` and `m`) and makes an example that roughly says "what would it look like if I had an edge from `n` to `m`?" We construct basically the same feautres as before. There are two major changes, though:1. When we run `self.vw.example(...)` we provide `labelType=self.vw.lCostSensitive` as an argument. This is because under the hood, vw treats LDF examples as cost-sensitive classification examples. This means they need to have cost-sensitive labels, so that's how we need to create them.1. We explicitly set the label of the this example to `str(m+2)+":0"`. What is this? Well, this is _optional_ but recommended. Here's the issue. In LDF mode, recall that labels have no intrinsic meaning. This means that when vw does auto-conditioning, it's not really clear what to use as the "previous prediction." By giving explicit label names (in this case, m+2) we're recording what the position of the last parent, which may be useful for predicting the next parent. We could avoid this necessity if we did our own feature engineering on the history, but for now, this seems to capture the right intuition.Given all this, we can now train and test our parser:
###Code
vw = pyvw.vw("--search 0 --csoaa_ldf m --search_task hook --ring_size 1024 --quiet")
task = vw.init_search_task(CovingtonDepParserLDF)
#BUG: This currently does not work because oracle generation is incorrect (generates invalid oracle values)#for p in range(2): # do two passes over the training data
# task.learn(my_dataset)
#print(task.predict( [(w,-1) for w in "the monster ate a sandwich".split()] ))
###Output
_____no_output_____
###Markdown
The correct parse of this sentence is `[1, 2, -1, 4, 2]` which is what this should have printed.There are two major things to notice in the initialization of VW. The first is that we say `--search 0`. The zero labels argument to `--search` declares that this is going to be an LDF task. We also have to tell VW that we want an LDF-enabled cost-sensitive learner, which is what `--csoaa_ldf m` does (if you're wondering, `m` means "multiline" -- just treat it as something you have to do). The rest should be familiar. A simple word-alignment model Okay, as a last example we'll do a simple word alignment model in the spirit of the IBM models. Note that this will be a *supervised* model; doing unsupervised stuff is a bit trickier.Here's some word alignment data. The dataset is triples of `E, A, F` where `A[i]` = list of words `E[i]` aligned to, or `[]` for null-aligned:
###Code
my_dataset = [
( "the blue house".split(),
([0], [2], [1]),
"la maison bleue".split() ),
( "the house".split(),
([0], [1]),
"la maison".split() ),
( "the flower".split(),
([0], [1]),
"la fleur".split() )
]
###Output
_____no_output_____
###Markdown
It's going to be useful to compute alignment mismatches at the word level between true alignments (like `[1,2]`) and predicted alignments (like `[2,3,4]`). We use intersection-over-union error:
###Code
def alignmentError(true, sys):
t = set(true)
s = set(sys)
if len(t | s) == 0: return 0.
return 1. - float(len(t & s)) / float(len(t | s))
###Output
_____no_output_____
###Markdown
And now we can define our structured prediction task. This is also an LDF problem. Basically for each word on the English side, we'll loop over all possible phrases on the Foreign side to which it could align (maximum phrase length of three). For each of these options we'll create an example to be fed into the LDF classifier. We also ensure that the same foreign word cannot be covered by multiple English words, though this might not be a good idea in general.
###Code
class WordAligner(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.IS_LDF | sch.AUTO_CONDITION_FEATURES )
def makeExample(self, E, F, i, j0, l):
f = 'Null' if j0 is None else [ F[j0+k] for k in range(l+1) ]
ex = self.vw.example( { 'e': E[i],
'f': f,
'p': '_'.join(f),
'l': str(l),
'o': [str(i-j0), str(i-j0-l)] if j0 is not None else [] },
labelType = self.vw.lCostSensitive )
lab = 'Null' if j0 is None else str(j0+l)
ex.set_label_string(lab + ':0')
return ex
def _run(self, alignedSentence):
E,A,F = alignedSentence
# for each E word, we pick a F span
covered = {} # which F words have been covered so far?
output = []
for i in range(len(E)):
examples = [] # contains vw examples
spans = [] # contains triples (alignment error, index in examples, [range])
# empty span:
examples.append( self.makeExample(E, F, i, None, None) )
spans.append( (alignmentError(A[i], []), 0, []) )
# non-empty spans
for j0 in range(len(F)):
for l in range(3): # max phrase length of 3
if j0+l >= len(F): break
if covered.has_key(j0+l): break
id = len(examples)
examples.append( self.makeExample(E, F, i, j0, l) )
spans.append( (alignmentError(A[i], range(j0,j0+l+1)), id, range(j0,j0+l+1)) )
sortedSpans = []
for s in spans: sortedSpans.append(s)
sortedSpans.sort()
oracle = []
for id in range(len(sortedSpans)):
if sortedSpans[id][0] > sortedSpans[0][0]: break
oracle.append( sortedSpans[id][1] )
pred = self.sch.predict(examples = examples,
my_tag = i+1,
oracle = oracle,
condition = [ (i, 'p'), (i-1, 'q') ] )
for ex in examples: ex.finish()
output.append( spans[pred][2] )
for j in spans[pred][2]:
covered[j] = True
return output
###Output
_____no_output_____
###Markdown
The only really complicated thing here is computing the oracle. What we do is, for each possible alignment, compute an intersection-over-union error rate. The oracle is then that alignment that achieves the smallest (local) error rate. This is not perfect, but is good enough. One interesting thing here is that now the `oracle` could be a *list*; this is completely supported by the underlying algorithms.We can train and test this model to make sure it does the right thing:
###Code
vw = pyvw.vw("--search 0 --csoaa_ldf m --search_task hook --ring_size 1024 --quiet -q ef -q ep")
task = vw.init_search_task(WordAligner)
# BUG: This is currently broken due to incorrect oracle generation. Currently under investigation.#for p in range(10):
# task.learn(my_dataset)
#print(task.predict( ("the blue flower".split(), ([],[],[]), "la fleur bleue".split()) ))
###Output
_____no_output_____
###Markdown
Search - Speech Tagger This tutorial walks you through writing learning to search code using the VW python interface. Once you've completed this, you can graduate to the C++ version, which will be faster for the computer but more painful for you.The "learning to search" paradigm solves problems that look like the following. You have a sequence of decisions to make. At the end of making these decisions, the world tells you how bad your decisions were. You want to condition later decisions on earlier decisions. But thankfully, at "training time," you have access to an *oracle* that will tell you the right answer. Let's start with a simple example: sequence labeling for Part of Speech tagging. The goal is to take a sequence of words ("the monster ate a big sandwich") and label them with their parts of speech (in this case: Det Noun Verb Det Adj Noun).We will choose to solve this problem with left-to-right search. I.e., we'll label the first word, then the second then the third and so on.For any vw project in python, we have to start by importing the pyvw library:
###Code
import vowpalwabbit
###Output
_____no_output_____
###Markdown
Now, let's define our data first. We'll do this first by defining the labels (one annoying thing is that labels in vw have to be integers):
###Code
DET = 1
NOUN = 2
VERB = 3
ADJ = 4
my_dataset = [
[
(DET, "the"),
(NOUN, "monster"),
(VERB, "ate"),
(DET, "a"),
(ADJ, "big"),
(NOUN, "sandwich"),
],
[(DET, "the"), (NOUN, "sandwich"), (VERB, "was"), (ADJ, "tasty")],
[(NOUN, "it"), (VERB, "ate"), (NOUN, "it"), (ADJ, "all")],
]
print(my_dataset[2])
###Output
_____no_output_____
###Markdown
Here we have an example of a (correctly) tagged sentence.Now, we need to write the structured prediction code. To do this, we have to write a new class that derives from the `pyvw.SearchTask` class.This class *must* have two functions: `__init__` and `_run`.The initialization function takes three arguments (plus `self`): a vw object (`vw`), a search object (`sch`), and the number of actions (`num_actions`) that this object has been initialized with. Within the initialization function, we must first initialize the parent class, and then we can set whatever options we want via `sch.set_options(...)`. Of course we can also do whatever additional initialization we like.The `_run` function executes the sequence of decisions on a given input. The input will be of whatever type our data is (so, in the above example, it will be a list of (label,word) pairs).Here is a basic implementation of sequence labeling:
###Code
class SequenceLabeler(vowpalwabbit.pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
# you must must must initialize the parent class
# this will automatically store self.sch <- sch, self.vw <- vw
vowpalwabbit.pyvw.SearchTask.__init__(self, vw, sch, num_actions)
# set whatever options you want
sch.set_options(sch.AUTO_HAMMING_LOSS | sch.AUTO_CONDITION_FEATURES)
def _run(
self, sentence
): # it's called _run to remind you that you shouldn't call it directly!
output = []
for n in range(len(sentence)):
pos, word = sentence[n]
# use "with...as..." to guarantee that the example is finished properly
ex = self.vw.example({"w": [word]})
pred = self.sch.predict(
examples=ex,
my_tag=n + 1,
oracle=pos,
condition=[(n, "p"), (n - 1, "q")],
)
self.vw.finish_example(
[ex]
) # must pass the example in as a list because search is a MultiEx reduction
output.append(pred)
return output
###Output
_____no_output_____
###Markdown
Let's unpack this a bit.The `__init__` function is simple. It first calls the parent initializer and then sets some options. The options it sets are two things designed to make the programmer's life easier. The first is `AUTO_HAMMING_LOSS`. Remember earlier we said that when the sequence of decision is made, you have to say how bad it was? This says that we want this to be computed automatically by comparing the individual decisions to the oracle decisions, and defining the loss to be the sum of incorrect decisions.The second is `AUTO_CONDITION_FEATURES`. This is a bit subtler. Later in the `_run` function, we will say that the label of the `n`th word depends on the label of the `n-1`th word. In order to get the underlying classifier to *pay attention* to that conditioning, we need to add features. We could do that manually (we'll do this later) or we can ask vw to do it automatically for us. For simplicity, we choose the latter.The `_run` function takes a sentence (list of pos/word pairs) as input. We loop over each word position `n` in the sentence and extract the `pos,word` pair. We then construct a VW example that consists of a single feature (the `word`) in the 'w' namespace. Given that example `ex`, we make a search-based prediction by calling `self.sch.predict(...)`. This is a fairly complicated function that takes a number of arguments. Here, we are calling it with the following: - `examples=ex`: This tells the predictor what features to use. - `my_tag=n+1`: In general, we want to condition the prediction of the `n`th label on the `n-1`th label. In order to do this, we have to give each prediction a "name" so that we can refer back to it in the future. This name needs to be an integer `>= 1`. So we'll call the first word `1`, the second word `2`, and so on. It has to be `n+1` and not `n` because of the 1-based requirement. - `oracle=pos`: As mentioned before, on training data, we need to tell the system what the "true" (or "best") decision is at this point in time. Here, it is the given part of speech label. - `condition=(n,'p')`: This says that this prediction depends on the output of whichever-prediction-was-called-`n`, and that the "nature" of that condition is called 'p' (for "predecessor" in this case, though this is entirely up to you)Now, we're ready to train the model. We do this in three steps. First, we initialize a vw object, telling it that we have a `--search` task with 4 labels, second that the specific type of `--search_task` is `hook` (you will always use the `hook` task) and finally that we want it to be quiet and use a larger `ring_size` (you can ignore the `ring_size` for now).
###Code
vw = vowpalwabbit.Workspace("--search 4 --search_task hook", quiet=True)
###Output
_____no_output_____
###Markdown
Next, we need to initialize the search task. We use the `vw.init_search_task` function to do this:
###Code
sequenceLabeler = vw.init_search_task(SequenceLabeler)
###Output
_____no_output_____
###Markdown
Finally, we can train on the dataset we defined earlier, using `sequenceLabeler.learn` (the `.learn` function is inherited from the `pyvw.SearchTask` class). The `.learn` function takes any iterator over data. Whatever type of data it iterates over is what it will feed to your `_run` function.
###Code
for i in range(10):
sequenceLabeler.learn(my_dataset)
###Output
_____no_output_____
###Markdown
Of course, we want to see if it's learned anything. So let's create a single test example:
###Code
test_example = [(1, w) for w in "the sandwich ate a monster".split()]
print(test_example)
###Output
_____no_output_____
###Markdown
We've used `0` as the labels so you can be sure that vw isn't cheating and it's actually making predictions:
###Code
out = sequenceLabeler.predict(test_example)
print(out)
###Output
_____no_output_____
###Markdown
If we look back at our POS tag definitions, this is DET NOUN VERB DET NOUN, which is indeed correct! Removing the AUTO features In the above example we used both AUTO_HAMMING_LOSS and AUTO_CONDITION_FEATURES. To make more explicit what these are doing, let's rewrite our `SequenceLabeler` class without them! Here's a version that gets rid of both simultaneously. It is only modestly more complex:
###Code
class SequenceLabeler2(vowpalwabbit.pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
vowpalwabbit.pyvw.SearchTask.__init__(self, vw, sch, num_actions)
def _run(self, sentence):
output = []
loss = 0.0
for n in range(len(sentence)):
pos, word = sentence[n]
prevPred = output[n - 1] if n > 0 else "<s>"
ex = self.vw.example({"w": [word], "p": [prevPred]})
pred = self.sch.predict(
examples=ex, my_tag=n + 1, oracle=pos, condition=(n, "p")
)
vw.finish_example([ex])
output.append(pred)
if pred != pos:
loss += 1.0
self.sch.loss(loss)
return output
sequenceLabeler2 = vw.init_search_task(SequenceLabeler2)
sequenceLabeler2.learn(my_dataset)
print(sequenceLabeler2.predict([(1, w) for w in "the sandwich ate a monster".split()]))
###Output
_____no_output_____
###Markdown
If executed correctly, this should have printed `[1, 2, 3, 1, 2]`.There are essentially two things that changed here. In order to get rid of AUTO_HAMMING_LOSS, we had to keep track of how many errors the predictor had made. This is done by checking whether `pred != pos` inside the inner loop, and then at the end calling `self.sch.loss(loss)` to tell the search procedure how well we did.In order to get rid of AUTO_CONDITION_FEATURES, we need to explicitly add the previous prediction as features to the example we are predicting with. Here, we've done this by extracting the previous prediction (`prevPred`) and explicitly adding it as a feature (in the 'p' namespace) during the example construction.**Important Note:** even though we're not using AUTO_CONDITION_FEATURES, we *still* must tell the search process that this prediction depends on the previous prediction. We need to do this because the learning algorithm automatically memoizes certain computations, and so it needs to know that, when memoizing, to remember that this prediction *might have been different* if a previous decision were different. Very silly Covington-esqu dependency parsing Let's also try a variant of dependency parsing to see that this doesn't work just for sequence-labeling list tasks. First we need to define some data:
###Code
# the label for each word is its parent, or -1 for root
my_dataset = [
[
("the", 1), # 0
("monster", 2), # 1
("ate", -1), # 2
("a", 5), # 3
("big", 5), # 4
("sandwich", 2),
], # 5
[("the", 1), ("sandwich", 2), ("is", -1), ("tasty", 2)], # 0 # 1 # 2 # 3
[
("a", 1), # 0
("sandwich", 2), # 1
("ate", -1), # 2
("itself", 2), # 3
],
]
###Output
_____no_output_____
###Markdown
For instance, in the first sentence, the parent of "the" is "monster"; the parent of "monster" is "ate"; and "ate" is the root.The basic idea of a Covington-style dependency parser is to loop over all O(N^2) word pairs and ask if one is the parent of the other. In a real parser you would want to make sure that you don't have cycles, that you have a unique root and (perhaps) that the resulting graph is projective. I'm not doing that here. Hopefully I'll add a shift-reduce parser example later that *does* do this. Here's an implementation of this idea:
###Code
class CovingtonDepParser(vowpalwabbit.pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
vowpalwabbit.pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options(sch.AUTO_HAMMING_LOSS | sch.AUTO_CONDITION_FEATURES)
def _run(self, sentence):
N = len(sentence)
# initialize our output so everything is a root
output = [-1 for i in range(N)]
for n in range(N):
wordN, parN = sentence[n]
for m in range(-1, N):
if m == n:
continue
wordM = sentence[m][0] if m > 0 else "*root*"
# ask the question: is m the parent of n?
isParent = 2 if m == parN else 1
# construct an example
dir = "l" if m < n else "r"
ex = self.vw.example(
{
"a": [wordN, dir + "_" + wordN],
"b": [wordM, dir + "_" + wordN],
"p": [wordN + "_" + wordM, dir + "_" + wordN + "_" + wordM],
"d": [
str(m - n <= d) + "<=" + str(d)
for d in [-8, -4, -2, -1, 1, 2, 4, 8]
]
+ [
str(m - n >= d) + ">=" + str(d)
for d in [-8, -4, -2, -1, 1, 2, 4, 8]
],
}
)
pred = self.sch.predict(
examples=ex,
my_tag=(m + 1) * N + n + 1,
oracle=isParent,
condition=[
(max(0, (m) * N + n + 1), "p"),
(max(0, (m + 1) * N + n), "q"),
],
)
self.vw.finish_example(
[ex]
) # must pass the example in as a list because search is a MultiEx reduction
if pred == 2:
output[n] = m
break
return output
###Output
_____no_output_____
###Markdown
In this, `output` stores the predicted tree and is initialized with every word being a root. We then loop over every word (`n`) and every possible parent (`m`, which can be -1, though that's really kind of unnecessary).The features are basically the words under consideration, the words paired with the direction of the edge, the pair of words, and then a bunch of (binned) distance features.We can train and run this parser with:
###Code
vw = vowpalwabbit.Workspace("--search 2 --search_task hook", quiet=True)
task = vw.init_search_task(CovingtonDepParser)
for p in range(10): # do ten passes over the training data
task.learn(my_dataset)
print("testing")
print(task.predict([(w, -1) for w in "the monster ate a sandwich".split()]))
print("should have printed [ 1 2 -1 4 2 ]")
###Output
_____no_output_____
###Markdown
One could argue that a more natural way to do this would be with LDF rather than the inner loop over `m`. We'll do that next. LDF-based Covington-style dependency parser One of the weirdnesses in the previous parser implementation is that it makes N-many binary decisions per word ("is word n my parent?") rather than a single N-way decision. The latter makes more sense.The challenge is that you cannot set this up as a vanilla multiclass classification problem because (a) the number of "classes" is a function of the input (a length N sentence will have N classes) and (b) class "1" and "2" don't mean anything consistently across examples.The way around this is label-dependent features (LDF). In LDF mode, the class ids are (essentially -- see caveat below) irrelevant. Instead, you simply provide features that depend on the label (hence "LDF"). In particular, for each possible label, you provide a *different* feature vector, and the goal of learning is to pick one of those as the "correct" one.Here's a re-implementation of Covington using LDF:
###Code
class CovingtonDepParserLDF(vowpalwabbit.pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
vowpalwabbit.pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options(
sch.AUTO_HAMMING_LOSS | sch.IS_LDF | sch.AUTO_CONDITION_FEATURES
)
def makeExample(self, sentence, n, m):
wordN = sentence[n][0]
wordM = sentence[m][0] if m >= 0 else "*ROOT*"
dir = "l" if m < n else "r"
ex = self.vw.example(
{
"a": [wordN, dir + "_" + wordN],
"b": [wordM, dir + "_" + wordM],
"p": [wordN + "_" + wordM, dir + "_" + wordN + "_" + wordM],
"d": [
str(m - n <= d) + "<=" + str(d)
for d in [-8, -4, -2, -1, 1, 2, 4, 8]
]
+ [
str(m - n >= d) + ">=" + str(d)
for d in [-8, -4, -2, -1, 1, 2, 4, 8]
],
},
labelType=self.vw.lCostSensitive,
)
ex.set_label_string(
str(m + 2) + ":0"
) # project the m-indices (-1...N into 1...N+2)
return ex
def _run(self, sentence):
N = len(sentence)
# initialize our output so everything is a root
output = [-1 for i in range(N)]
for n in range(N):
# make LDF examples
examples = [
self.makeExample(sentence, n, m) for m in range(-1, N) if n != m
]
# truth
parN = sentence[n][1]
oracle = (
parN + 1 if parN < n else parN
) # have to -1 because we excluded n==m from list
# make a prediction
pred = self.sch.predict(
examples=examples,
my_tag=n + 1,
oracle=oracle,
condition=[(n, "p"), (n - 1, "q")],
)
output[n] = (
pred - 1 if pred < n else pred
) # have to +1 because n==m excluded
for ex in examples:
ex.finish() # clean up
return output
###Output
_____no_output_____
###Markdown
There are a few things going on here. Let's focus first on the `__init__` function. The only difference here is that when we call `sch.set_options` we provide `sch.IS_LDF` to declare that this is an LDF task.Let's skip the `makeExample` function for a minute and look at the `_run` function. You should recognize most of this from the non-LDF version. We initialize the `output` (parent) of every word to `-1` (meaning that every word is connected to the root).For each word `n`, we construct `N`-many examples: one for every -1..(N-1), except for the current word `n` because you cannot have self-loops. If we were being more clever, we would only do the ones that won't result in the creation of a cycle, but we're not being clever.Now, because the "labels" are just examples, it's a bit more complicated to specify the oracle. The oracle is an *index* into the examples list. So if `oracle` is the oracle action, then `examples[oracle]` is the corresponding example. We compute the oracle as follows. `parN` is the *actual* parent, which is going to be something in the range `-1 .. (N-1)`. If `parN n` (note: it cannot be equal to `n`) then, beacuse `n == m` is left out of the examples list, the correct index is just `parN`. Phew.We then ask for a prediction. Now, instead of giving a single example, with give the list of examples. The tag works the same way, as does the conditioning.Once we get a prediction out (called `pred`) we need to figure out what parent it actually corresponds to. This is simply un-doing the computaiton two paragraphs ago!Finally -- and this is skippable if you trust the Python garbage collector -- we tell VW that we're done with all the examples we created. We do this just to be pedantic; it's optional.Okay, now let's go back to the `makeExample` function. This takes two word ids (`n` and `m`) and makes an example that roughly says "what would it look like if I had an edge from `n` to `m`?" We construct basically the same feautres as before. There are two major changes, though:1. When we run `self.vw.example(...)` we provide `labelType=self.vw.lCostSensitive` as an argument. This is because under the hood, vw treats LDF examples as cost-sensitive classification examples. This means they need to have cost-sensitive labels, so that's how we need to create them.1. We explicitly set the label of the this example to `str(m+2)+":0"`. What is this? Well, this is _optional_ but recommended. Here's the issue. In LDF mode, recall that labels have no intrinsic meaning. This means that when vw does auto-conditioning, it's not really clear what to use as the "previous prediction." By giving explicit label names (in this case, m+2) we're recording what the position of the last parent, which may be useful for predicting the next parent. We could avoid this necessity if we did our own feature engineering on the history, but for now, this seems to capture the right intuition.Given all this, we can now train and test our parser:
###Code
vw = vowpalwabbit.Workspace("--search 0 --csoaa_ldf m --search_task hook", quiet=True)
task = vw.init_search_task(CovingtonDepParserLDF)
# BUG: This currently does not work because oracle generation is incorrect (generates invalid oracle values)#for p in range(2): # do two passes over the training data
# task.learn(my_dataset)
# print(task.predict( [(w,-1) for w in "the monster ate a sandwich".split()] ))
###Output
_____no_output_____
###Markdown
The correct parse of this sentence is `[1, 2, -1, 4, 2]` which is what this should have printed.There are two major things to notice in the initialization of VW. The first is that we say `--search 0`. The zero labels argument to `--search` declares that this is going to be an LDF task. We also have to tell VW that we want an LDF-enabled cost-sensitive learner, which is what `--csoaa_ldf m` does (if you're wondering, `m` means "multiline" -- just treat it as something you have to do). The rest should be familiar. A simple word-alignment model Okay, as a last example we'll do a simple word alignment model in the spirit of the IBM models. Note that this will be a *supervised* model; doing unsupervised stuff is a bit trickier.Here's some word alignment data. The dataset is triples of `E, A, F` where `A[i]` = list of words `E[i]` aligned to, or `[]` for null-aligned:
###Code
my_dataset = [
("the blue house".split(), ([0], [2], [1]), "la maison bleue".split()),
("the house".split(), ([0], [1]), "la maison".split()),
("the flower".split(), ([0], [1]), "la fleur".split()),
]
###Output
_____no_output_____
###Markdown
It's going to be useful to compute alignment mismatches at the word level between true alignments (like `[1,2]`) and predicted alignments (like `[2,3,4]`). We use intersection-over-union error:
###Code
def alignmentError(true, sys):
t = set(true)
s = set(sys)
if len(t | s) == 0:
return 0.0
return 1.0 - float(len(t & s)) / float(len(t | s))
###Output
_____no_output_____
###Markdown
And now we can define our structured prediction task. This is also an LDF problem. Basically for each word on the English side, we'll loop over all possible phrases on the Foreign side to which it could align (maximum phrase length of three). For each of these options we'll create an example to be fed into the LDF classifier. We also ensure that the same foreign word cannot be covered by multiple English words, though this might not be a good idea in general.
###Code
class WordAligner(vowpalwabbit.pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
vowpalwabbit.pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options(
sch.AUTO_HAMMING_LOSS | sch.IS_LDF | sch.AUTO_CONDITION_FEATURES
)
def makeExample(self, E, F, i, j0, l):
f = "Null" if j0 is None else [F[j0 + k] for k in range(l + 1)]
ex = self.vw.example(
{
"e": E[i],
"f": f,
"p": "_".join(f),
"l": str(l),
"o": [str(i - j0), str(i - j0 - l)] if j0 is not None else [],
},
labelType=self.vw.lCostSensitive,
)
lab = "Null" if j0 is None else str(j0 + l)
ex.set_label_string(lab + ":0")
return ex
def _run(self, alignedSentence):
E, A, F = alignedSentence
# for each E word, we pick a F span
covered = {} # which F words have been covered so far?
output = []
for i in range(len(E)):
examples = [] # contains vw examples
spans = [] # contains triples (alignment error, index in examples, [range])
# empty span:
examples.append(self.makeExample(E, F, i, None, None))
spans.append((alignmentError(A[i], []), 0, []))
# non-empty spans
for j0 in range(len(F)):
for l in range(3): # max phrase length of 3
if j0 + l >= len(F):
break
if covered.has_key(j0 + l):
break
id = len(examples)
examples.append(self.makeExample(E, F, i, j0, l))
spans.append(
(
alignmentError(A[i], range(j0, j0 + l + 1)),
id,
range(j0, j0 + l + 1),
)
)
sortedSpans = []
for s in spans:
sortedSpans.append(s)
sortedSpans.sort()
oracle = []
for id in range(len(sortedSpans)):
if sortedSpans[id][0] > sortedSpans[0][0]:
break
oracle.append(sortedSpans[id][1])
pred = self.sch.predict(
examples=examples,
my_tag=i + 1,
oracle=oracle,
condition=[(i, "p"), (i - 1, "q")],
)
for ex in examples:
ex.finish()
output.append(spans[pred][2])
for j in spans[pred][2]:
covered[j] = True
return output
###Output
_____no_output_____
###Markdown
The only really complicated thing here is computing the oracle. What we do is, for each possible alignment, compute an intersection-over-union error rate. The oracle is then that alignment that achieves the smallest (local) error rate. This is not perfect, but is good enough. One interesting thing here is that now the `oracle` could be a *list*; this is completely supported by the underlying algorithms.We can train and test this model to make sure it does the right thing:
###Code
vw = vowpalwabbit.Workspace(
"--search 0 --csoaa_ldf m --search_task hook -q ef -q ep", quiet=True
)
task = vw.init_search_task(WordAligner)
# BUG: This is currently broken due to incorrect oracle generation. Currently under investigation.#for p in range(10):
# task.learn(my_dataset)
# print(task.predict( ("the blue flower".split(), ([],[],[]), "la fleur bleue".split()) ))
###Output
_____no_output_____
###Markdown
Search - Speech Tagger This tutorial walks you through writing learning to search code using the VW python interface. Once you've completed this, you can graduate to the C++ version, which will be faster for the computer but more painful for you.The "learning to search" paradigm solves problems that look like the following. You have a sequence of decisions to make. At the end of making these decisions, the world tells you how bad your decisions were. You want to condition later decisions on earlier decisions. But thankfully, at "training time," you have access to an *oracle* that will tell you the right answer. Let's start with a simple example: sequence labeling for Part of Speech tagging. The goal is to take a sequence of words ("the monster ate a big sandwich") and label them with their parts of speech (in this case: Det Noun Verb Det Adj Noun).We will choose to solve this problem with left-to-right search. I.e., we'll label the first word, then the second then the third and so on.For any vw project in python, we have to start by importing the pyvw library:
###Code
from __future__ import print_function
from vowpalwabbit import pyvw
###Output
_____no_output_____
###Markdown
Now, let's define our data first. We'll do this first by defining the labels (one annoying thing is that labels in vw have to be integers):
###Code
DET = 1
NOUN = 2
VERB = 3
ADJ = 4
my_dataset = [ [(DET , 'the'),
(NOUN, 'monster'),
(VERB, 'ate'),
(DET , 'a'),
(ADJ , 'big'),
(NOUN, 'sandwich')],
[(DET , 'the'),
(NOUN, 'sandwich'),
(VERB, 'was'),
(ADJ , 'tasty')],
[(NOUN, 'it'),
(VERB, 'ate'),
(NOUN, 'it'),
(ADJ , 'all')] ]
print(my_dataset[2])
###Output
_____no_output_____
###Markdown
Here we have an example of a (correctly) tagged sentence.Now, we need to write the structured prediction code. To do this, we have to write a new class that derives from the `pyvw.SearchTask` class.This class *must* have two functions: `__init__` and `_run`.The initialization function takes three arguments (plus `self`): a vw object (`vw`), a search object (`sch`), and the number of actions (`num_actions`) that this object has been initialized with. Within the initialization function, we must first initialize the parent class, and then we can set whatever options we want via `sch.set_options(...)`. Of course we can also do whatever additional initialization we like.The `_run` function executes the sequence of decisions on a given input. The input will be of whatever type our data is (so, in the above example, it will be a list of (label,word) pairs).Here is a basic implementation of sequence labeling:
###Code
class SequenceLabeler(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
# you must must must initialize the parent class
# this will automatically store self.sch <- sch, self.vw <- vw
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
# set whatever options you want
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.AUTO_CONDITION_FEATURES )
def _run(self, sentence): # it's called _run to remind you that you shouldn't call it directly!
output = []
for n in range(len(sentence)):
pos,word = sentence[n]
# use "with...as..." to guarantee that the example is finished properly
ex = self.vw.example({'w': [word]})
pred = self.sch.predict(examples=ex, my_tag=n+1, oracle=pos, condition=[(n,'p'), (n-1, 'q')])
self.vw.finish_example([ex]) # must pass the example in as a list because search is a MultiEx reduction
output.append(pred)
return output
###Output
_____no_output_____
###Markdown
Let's unpack this a bit.The `__init__` function is simple. It first calls the parent initializer and then sets some options. The options it sets are two things designed to make the programmer's life easier. The first is `AUTO_HAMMING_LOSS`. Remember earlier we said that when the sequence of decision is made, you have to say how bad it was? This says that we want this to be computed automatically by comparing the individual decisions to the oracle decisions, and defining the loss to be the sum of incorrect decisions.The second is `AUTO_CONDITION_FEATURES`. This is a bit subtler. Later in the `_run` function, we will say that the label of the `n`th word depends on the label of the `n-1`th word. In order to get the underlying classifier to *pay attention* to that conditioning, we need to add features. We could do that manually (we'll do this later) or we can ask vw to do it automatically for us. For simplicity, we choose the latter.The `_run` function takes a sentence (list of pos/word pairs) as input. We loop over each word position `n` in the sentence and extract the `pos,word` pair. We then construct a VW example that consists of a single feature (the `word`) in the 'w' namespace. Given that example `ex`, we make a search-based prediction by calling `self.sch.predict(...)`. This is a fairly complicated function that takes a number of arguments. Here, we are calling it with the following: - `examples=ex`: This tells the predictor what features to use. - `my_tag=n+1`: In general, we want to condition the prediction of the `n`th label on the `n-1`th label. In order to do this, we have to give each prediction a "name" so that we can refer back to it in the future. This name needs to be an integer `>= 1`. So we'll call the first word `1`, the second word `2`, and so on. It has to be `n+1` and not `n` because of the 1-based requirement. - `oracle=pos`: As mentioned before, on training data, we need to tell the system what the "true" (or "best") decision is at this point in time. Here, it is the given part of speech label. - `condition=(n,'p')`: This says that this prediction depends on the output of whichever-prediction-was-called-`n`, and that the "nature" of that condition is called 'p' (for "predecessor" in this case, though this is entirely up to you)Now, we're ready to train the model. We do this in three steps. First, we initialize a vw object, telling it that we have a `--search` task with 4 labels, second that the specific type of `--search_task` is `hook` (you will always use the `hook` task) and finally that we want it to be quiet and use a larger `ring_size` (you can ignore the `ring_size` for now).
###Code
vw = pyvw.Workspace("--search 4 --audit --quiet --search_task hook --ring_size 1024")
###Output
_____no_output_____
###Markdown
Next, we need to initialize the search task. We use the `vw.init_search_task` function to do this:
###Code
sequenceLabeler = vw.init_search_task(SequenceLabeler)
###Output
_____no_output_____
###Markdown
Finally, we can train on the dataset we defined earlier, using `sequenceLabeler.learn` (the `.learn` function is inherited from the `pyvw.SearchTask` class). The `.learn` function takes any iterator over data. Whatever type of data it iterates over is what it will feed to your `_run` function.
###Code
for i in range(10):
sequenceLabeler.learn(my_dataset)
###Output
_____no_output_____
###Markdown
Of course, we want to see if it's learned anything. So let's create a single test example:
###Code
test_example = [ (1,w) for w in "the sandwich ate a monster".split() ]
print(test_example)
###Output
_____no_output_____
###Markdown
We've used `0` as the labels so you can be sure that vw isn't cheating and it's actually making predictions:
###Code
out = sequenceLabeler.predict(test_example)
print(out)
###Output
_____no_output_____
###Markdown
If we look back at our POS tag definitions, this is DET NOUN VERB DET NOUN, which is indeed correct! Removing the AUTO features In the above example we used both AUTO_HAMMING_LOSS and AUTO_CONDITION_FEATURES. To make more explicit what these are doing, let's rewrite our `SequenceLabeler` class without them! Here's a version that gets rid of both simultaneously. It is only modestly more complex:
###Code
class SequenceLabeler2(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
def _run(self, sentence):
output = []
loss = 0.
for n in range(len(sentence)):
pos,word = sentence[n]
prevPred = output[n-1] if n > 0 else '<s>'
ex = self.vw.example({'w': [word], 'p': [prevPred]})
pred = self.sch.predict(examples=ex, my_tag=n+1, oracle=pos, condition=(n,'p'))
vw.finish_example([ex])
output.append(pred)
if pred != pos:
loss += 1.
self.sch.loss(loss)
return output
sequenceLabeler2 = vw.init_search_task(SequenceLabeler2)
sequenceLabeler2.learn(my_dataset)
print(sequenceLabeler2.predict( [(1,w) for w in "the sandwich ate a monster".split()] ))
###Output
_____no_output_____
###Markdown
If executed correctly, this should have printed `[1, 2, 3, 1, 2]`.There are essentially two things that changed here. In order to get rid of AUTO_HAMMING_LOSS, we had to keep track of how many errors the predictor had made. This is done by checking whether `pred != pos` inside the inner loop, and then at the end calling `self.sch.loss(loss)` to tell the search procedure how well we did.In order to get rid of AUTO_CONDITION_FEATURES, we need to explicitly add the previous prediction as features to the example we are predicting with. Here, we've done this by extracting the previous prediction (`prevPred`) and explicitly adding it as a feature (in the 'p' namespace) during the example construction.**Important Note:** even though we're not using AUTO_CONDITION_FEATURES, we *still* must tell the search process that this prediction depends on the previous prediction. We need to do this because the learning algorithm automatically memoizes certain computations, and so it needs to know that, when memoizing, to remember that this prediction *might have been different* if a previous decision were different. Very silly Covington-esqu dependency parsing Let's also try a variant of dependency parsing to see that this doesn't work just for sequence-labeling list tasks. First we need to define some data:
###Code
# the label for each word is its parent, or -1 for root
my_dataset = [ [("the", 1), # 0
("monster", 2), # 1
("ate", -1), # 2
("a", 5), # 3
("big", 5), # 4
("sandwich", 2) ] # 5
,
[("the", 1), # 0
("sandwich", 2), # 1
("is", -1), # 2
("tasty", 2)] # 3
,
[("a", 1), # 0
("sandwich", 2), # 1
("ate", -1), # 2
("itself", 2), # 3
]
]
###Output
_____no_output_____
###Markdown
For instance, in the first sentence, the parent of "the" is "monster"; the parent of "monster" is "ate"; and "ate" is the root.The basic idea of a Covington-style dependency parser is to loop over all O(N^2) word pairs and ask if one is the parent of the other. In a real parser you would want to make sure that you don't have cycles, that you have a unique root and (perhaps) that the resulting graph is projective. I'm not doing that here. Hopefully I'll add a shift-reduce parser example later that *does* do this. Here's an implementation of this idea:
###Code
class CovingtonDepParser(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.AUTO_CONDITION_FEATURES )
def _run(self, sentence):
N = len(sentence)
# initialize our output so everything is a root
output = [-1 for i in range(N)]
for n in range(N):
wordN,parN = sentence[n]
for m in range(-1,N):
if m == n: continue
wordM = sentence[m][0] if m > 0 else "*root*"
# ask the question: is m the parent of n?
isParent = 2 if m == parN else 1
# construct an example
dir = 'l' if m < n else 'r'
ex = self.vw.example({'a': [wordN, dir + '_' + wordN],
'b': [wordM, dir + '_' + wordN],
'p': [wordN + '_' + wordM, dir + '_' + wordN + '_' + wordM],
'd': [ str(m-n <= d) + '<=' + str(d) for d in [-8, -4, -2, -1, 1, 2, 4, 8] ] +
[ str(m-n >= d) + '>=' + str(d) for d in [-8, -4, -2, -1, 1, 2, 4, 8] ] })
pred = self.sch.predict(examples = ex,
my_tag = (m+1)*N + n + 1,
oracle = isParent,
condition = [ (max(0, (m )*N + n + 1), 'p'),
(max(0, (m+1)*N + n ), 'q') ])
self.vw.finish_example([ex]) # must pass the example in as a list because search is a MultiEx reduction
if pred == 2:
output[n] = m
break
return output
###Output
_____no_output_____
###Markdown
In this, `output` stores the predicted tree and is initialized with every word being a root. We then loop over every word (`n`) and every possible parent (`m`, which can be -1, though that's really kind of unnecessary).The features are basically the words under consideration, the words paired with the direction of the edge, the pair of words, and then a bunch of (binned) distance features.We can train and run this parser with:
###Code
vw = pyvw.Workspace("--search 2 --quiet --search_task hook --ring_size 1024")
task = vw.init_search_task(CovingtonDepParser)
for p in range(10): # do ten passes over the training data
task.learn(my_dataset)
print('testing')
print(task.predict( [(w,-1) for w in "the monster ate a sandwich".split()] ))
print('should have printed [ 1 2 -1 4 2 ]')
###Output
_____no_output_____
###Markdown
One could argue that a more natural way to do this would be with LDF rather than the inner loop over `m`. We'll do that next. LDF-based Covington-style dependency parser One of the weirdnesses in the previous parser implementation is that it makes N-many binary decisions per word ("is word n my parent?") rather than a single N-way decision. The latter makes more sense.The challenge is that you cannot set this up as a vanilla multiclass classification problem because (a) the number of "classes" is a function of the input (a length N sentence will have N classes) and (b) class "1" and "2" don't mean anything consistently across examples.The way around this is label-dependent features (LDF). In LDF mode, the class ids are (essentially -- see caveat below) irrelevant. Instead, you simply provide features that depend on the label (hence "LDF"). In particular, for each possible label, you provide a *different* feature vector, and the goal of learning is to pick one of those as the "correct" one.Here's a re-implementation of Covington using LDF:
###Code
class CovingtonDepParserLDF(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.IS_LDF | sch.AUTO_CONDITION_FEATURES )
def makeExample(self, sentence, n, m):
wordN = sentence[n][0]
wordM = sentence[m][0] if m >= 0 else '*ROOT*'
dir = 'l' if m < n else 'r'
ex = self.vw.example( { 'a': [wordN, dir + '_' + wordN],
'b': [wordM, dir + '_' + wordM],
'p': [wordN + '_' + wordM, dir + '_' + wordN + '_' + wordM],
'd': [ str(m-n <= d) + '<=' + str(d) for d in [-8, -4, -2, -1, 1, 2, 4, 8] ] +
[ str(m-n >= d) + '>=' + str(d) for d in [-8, -4, -2, -1, 1, 2, 4, 8] ] },
labelType=self.vw.lCostSensitive)
ex.set_label_string(str(m+2) + ":0") # project the m-indices (-1...N into 1...N+2)
return ex
def _run(self, sentence):
N = len(sentence)
# initialize our output so everything is a root
output = [-1 for i in range(N)]
for n in range(N):
# make LDF examples
examples = [ self.makeExample(sentence,n,m) for m in range(-1,N) if n != m ]
# truth
parN = sentence[n][1]
oracle = parN+1 if parN < n else parN # have to -1 because we excluded n==m from list
# make a prediction
pred = self.sch.predict(examples = examples,
my_tag = n+1,
oracle = oracle,
condition = [ (n, 'p'), (n-1, 'q') ] )
output[n] = pred-1 if pred < n else pred # have to +1 because n==m excluded
for ex in examples: ex.finish() # clean up
return output
###Output
_____no_output_____
###Markdown
There are a few things going on here. Let's focus first on the `__init__` function. The only difference here is that when we call `sch.set_options` we provide `sch.IS_LDF` to declare that this is an LDF task.Let's skip the `makeExample` function for a minute and look at the `_run` function. You should recognize most of this from the non-LDF version. We initialize the `output` (parent) of every word to `-1` (meaning that every word is connected to the root).For each word `n`, we construct `N`-many examples: one for every -1..(N-1), except for the current word `n` because you cannot have self-loops. If we were being more clever, we would only do the ones that won't result in the creation of a cycle, but we're not being clever.Now, because the "labels" are just examples, it's a bit more complicated to specify the oracle. The oracle is an *index* into the examples list. So if `oracle` is the oracle action, then `examples[oracle]` is the corresponding example. We compute the oracle as follows. `parN` is the *actual* parent, which is going to be something in the range `-1 .. (N-1)`. If `parN n` (note: it cannot be equal to `n`) then, beacuse `n == m` is left out of the examples list, the correct index is just `parN`. Phew.We then ask for a prediction. Now, instead of giving a single example, with give the list of examples. The tag works the same way, as does the conditioning.Once we get a prediction out (called `pred`) we need to figure out what parent it actually corresponds to. This is simply un-doing the computaiton two paragraphs ago!Finally -- and this is skippable if you trust the Python garbage collector -- we tell VW that we're done with all the examples we created. We do this just to be pedantic; it's optional.Okay, now let's go back to the `makeExample` function. This takes two word ids (`n` and `m`) and makes an example that roughly says "what would it look like if I had an edge from `n` to `m`?" We construct basically the same feautres as before. There are two major changes, though:1. When we run `self.vw.example(...)` we provide `labelType=self.vw.lCostSensitive` as an argument. This is because under the hood, vw treats LDF examples as cost-sensitive classification examples. This means they need to have cost-sensitive labels, so that's how we need to create them.1. We explicitly set the label of the this example to `str(m+2)+":0"`. What is this? Well, this is _optional_ but recommended. Here's the issue. In LDF mode, recall that labels have no intrinsic meaning. This means that when vw does auto-conditioning, it's not really clear what to use as the "previous prediction." By giving explicit label names (in this case, m+2) we're recording what the position of the last parent, which may be useful for predicting the next parent. We could avoid this necessity if we did our own feature engineering on the history, but for now, this seems to capture the right intuition.Given all this, we can now train and test our parser:
###Code
vw = pyvw.Workspace("--search 0 --csoaa_ldf m --search_task hook --ring_size 1024 --quiet")
task = vw.init_search_task(CovingtonDepParserLDF)
#BUG: This currently does not work because oracle generation is incorrect (generates invalid oracle values)#for p in range(2): # do two passes over the training data
# task.learn(my_dataset)
#print(task.predict( [(w,-1) for w in "the monster ate a sandwich".split()] ))
###Output
_____no_output_____
###Markdown
The correct parse of this sentence is `[1, 2, -1, 4, 2]` which is what this should have printed.There are two major things to notice in the initialization of VW. The first is that we say `--search 0`. The zero labels argument to `--search` declares that this is going to be an LDF task. We also have to tell VW that we want an LDF-enabled cost-sensitive learner, which is what `--csoaa_ldf m` does (if you're wondering, `m` means "multiline" -- just treat it as something you have to do). The rest should be familiar. A simple word-alignment model Okay, as a last example we'll do a simple word alignment model in the spirit of the IBM models. Note that this will be a *supervised* model; doing unsupervised stuff is a bit trickier.Here's some word alignment data. The dataset is triples of `E, A, F` where `A[i]` = list of words `E[i]` aligned to, or `[]` for null-aligned:
###Code
my_dataset = [
( "the blue house".split(),
([0], [2], [1]),
"la maison bleue".split() ),
( "the house".split(),
([0], [1]),
"la maison".split() ),
( "the flower".split(),
([0], [1]),
"la fleur".split() )
]
###Output
_____no_output_____
###Markdown
It's going to be useful to compute alignment mismatches at the word level between true alignments (like `[1,2]`) and predicted alignments (like `[2,3,4]`). We use intersection-over-union error:
###Code
def alignmentError(true, sys):
t = set(true)
s = set(sys)
if len(t | s) == 0: return 0.
return 1. - float(len(t & s)) / float(len(t | s))
###Output
_____no_output_____
###Markdown
And now we can define our structured prediction task. This is also an LDF problem. Basically for each word on the English side, we'll loop over all possible phrases on the Foreign side to which it could align (maximum phrase length of three). For each of these options we'll create an example to be fed into the LDF classifier. We also ensure that the same foreign word cannot be covered by multiple English words, though this might not be a good idea in general.
###Code
class WordAligner(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.IS_LDF | sch.AUTO_CONDITION_FEATURES )
def makeExample(self, E, F, i, j0, l):
f = 'Null' if j0 is None else [ F[j0+k] for k in range(l+1) ]
ex = self.vw.example( { 'e': E[i],
'f': f,
'p': '_'.join(f),
'l': str(l),
'o': [str(i-j0), str(i-j0-l)] if j0 is not None else [] },
labelType = self.vw.lCostSensitive )
lab = 'Null' if j0 is None else str(j0+l)
ex.set_label_string(lab + ':0')
return ex
def _run(self, alignedSentence):
E,A,F = alignedSentence
# for each E word, we pick a F span
covered = {} # which F words have been covered so far?
output = []
for i in range(len(E)):
examples = [] # contains vw examples
spans = [] # contains triples (alignment error, index in examples, [range])
# empty span:
examples.append( self.makeExample(E, F, i, None, None) )
spans.append( (alignmentError(A[i], []), 0, []) )
# non-empty spans
for j0 in range(len(F)):
for l in range(3): # max phrase length of 3
if j0+l >= len(F): break
if covered.has_key(j0+l): break
id = len(examples)
examples.append( self.makeExample(E, F, i, j0, l) )
spans.append( (alignmentError(A[i], range(j0,j0+l+1)), id, range(j0,j0+l+1)) )
sortedSpans = []
for s in spans: sortedSpans.append(s)
sortedSpans.sort()
oracle = []
for id in range(len(sortedSpans)):
if sortedSpans[id][0] > sortedSpans[0][0]: break
oracle.append( sortedSpans[id][1] )
pred = self.sch.predict(examples = examples,
my_tag = i+1,
oracle = oracle,
condition = [ (i, 'p'), (i-1, 'q') ] )
for ex in examples: ex.finish()
output.append( spans[pred][2] )
for j in spans[pred][2]:
covered[j] = True
return output
###Output
_____no_output_____
###Markdown
The only really complicated thing here is computing the oracle. What we do is, for each possible alignment, compute an intersection-over-union error rate. The oracle is then that alignment that achieves the smallest (local) error rate. This is not perfect, but is good enough. One interesting thing here is that now the `oracle` could be a *list*; this is completely supported by the underlying algorithms.We can train and test this model to make sure it does the right thing:
###Code
vw = pyvw.Workspace("--search 0 --csoaa_ldf m --search_task hook --ring_size 1024 --quiet -q ef -q ep")
task = vw.init_search_task(WordAligner)
# BUG: This is currently broken due to incorrect oracle generation. Currently under investigation.#for p in range(10):
# task.learn(my_dataset)
#print(task.predict( ("the blue flower".split(), ([],[],[]), "la fleur bleue".split()) ))
###Output
_____no_output_____
###Markdown
Search - Speech Tagger This tutorial walks you through writing learning to search code using the VW python interface. Once you've completed this, you can graduate to the C++ version, which will be faster for the computer but more painful for you.The "learning to search" paradigm solves problems that look like the following. You have a sequence of decisions to make. At the end of making these decisions, the world tells you how bad your decisions were. You want to condition later decisions on earlier decisions. But thankfully, at "training time," you have access to an *oracle* that will tell you the right answer. Let's start with a simple example: sequence labeling for Part of Speech tagging. The goal is to take a sequence of words ("the monster ate a big sandwich") and label them with their parts of speech (in this case: Det Noun Verb Det Adj Noun).We will choose to solve this problem with left-to-right search. I.e., we'll label the first word, then the second then the third and so on.For any vw project in python, we have to start by importing the pyvw library:
###Code
import vowpalwabbit
###Output
_____no_output_____
###Markdown
Now, let's define our data first. We'll do this first by defining the labels (one annoying thing is that labels in vw have to be integers):
###Code
DET = 1
NOUN = 2
VERB = 3
ADJ = 4
my_dataset = [ [(DET , 'the'),
(NOUN, 'monster'),
(VERB, 'ate'),
(DET , 'a'),
(ADJ , 'big'),
(NOUN, 'sandwich')],
[(DET , 'the'),
(NOUN, 'sandwich'),
(VERB, 'was'),
(ADJ , 'tasty')],
[(NOUN, 'it'),
(VERB, 'ate'),
(NOUN, 'it'),
(ADJ , 'all')] ]
print(my_dataset[2])
###Output
_____no_output_____
###Markdown
Here we have an example of a (correctly) tagged sentence.Now, we need to write the structured prediction code. To do this, we have to write a new class that derives from the `pyvw.SearchTask` class.This class *must* have two functions: `__init__` and `_run`.The initialization function takes three arguments (plus `self`): a vw object (`vw`), a search object (`sch`), and the number of actions (`num_actions`) that this object has been initialized with. Within the initialization function, we must first initialize the parent class, and then we can set whatever options we want via `sch.set_options(...)`. Of course we can also do whatever additional initialization we like.The `_run` function executes the sequence of decisions on a given input. The input will be of whatever type our data is (so, in the above example, it will be a list of (label,word) pairs).Here is a basic implementation of sequence labeling:
###Code
class SequenceLabeler(vowpalwabbit.pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
# you must must must initialize the parent class
# this will automatically store self.sch <- sch, self.vw <- vw
vowpalwabbit.pyvw.SearchTask.__init__(self, vw, sch, num_actions)
# set whatever options you want
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.AUTO_CONDITION_FEATURES )
def _run(self, sentence): # it's called _run to remind you that you shouldn't call it directly!
output = []
for n in range(len(sentence)):
pos,word = sentence[n]
# use "with...as..." to guarantee that the example is finished properly
ex = self.vw.example({'w': [word]})
pred = self.sch.predict(examples=ex, my_tag=n+1, oracle=pos, condition=[(n,'p'), (n-1, 'q')])
self.vw.finish_example([ex]) # must pass the example in as a list because search is a MultiEx reduction
output.append(pred)
return output
###Output
_____no_output_____
###Markdown
Let's unpack this a bit.The `__init__` function is simple. It first calls the parent initializer and then sets some options. The options it sets are two things designed to make the programmer's life easier. The first is `AUTO_HAMMING_LOSS`. Remember earlier we said that when the sequence of decision is made, you have to say how bad it was? This says that we want this to be computed automatically by comparing the individual decisions to the oracle decisions, and defining the loss to be the sum of incorrect decisions.The second is `AUTO_CONDITION_FEATURES`. This is a bit subtler. Later in the `_run` function, we will say that the label of the `n`th word depends on the label of the `n-1`th word. In order to get the underlying classifier to *pay attention* to that conditioning, we need to add features. We could do that manually (we'll do this later) or we can ask vw to do it automatically for us. For simplicity, we choose the latter.The `_run` function takes a sentence (list of pos/word pairs) as input. We loop over each word position `n` in the sentence and extract the `pos,word` pair. We then construct a VW example that consists of a single feature (the `word`) in the 'w' namespace. Given that example `ex`, we make a search-based prediction by calling `self.sch.predict(...)`. This is a fairly complicated function that takes a number of arguments. Here, we are calling it with the following: - `examples=ex`: This tells the predictor what features to use. - `my_tag=n+1`: In general, we want to condition the prediction of the `n`th label on the `n-1`th label. In order to do this, we have to give each prediction a "name" so that we can refer back to it in the future. This name needs to be an integer `>= 1`. So we'll call the first word `1`, the second word `2`, and so on. It has to be `n+1` and not `n` because of the 1-based requirement. - `oracle=pos`: As mentioned before, on training data, we need to tell the system what the "true" (or "best") decision is at this point in time. Here, it is the given part of speech label. - `condition=(n,'p')`: This says that this prediction depends on the output of whichever-prediction-was-called-`n`, and that the "nature" of that condition is called 'p' (for "predecessor" in this case, though this is entirely up to you)Now, we're ready to train the model. We do this in three steps. First, we initialize a vw object, telling it that we have a `--search` task with 4 labels, second that the specific type of `--search_task` is `hook` (you will always use the `hook` task) and finally that we want it to be quiet and use a larger `ring_size` (you can ignore the `ring_size` for now).
###Code
vw = vowpalwabbit.Workspace("--search 4 --search_task hook", quiet=True)
###Output
_____no_output_____
###Markdown
Next, we need to initialize the search task. We use the `vw.init_search_task` function to do this:
###Code
sequenceLabeler = vw.init_search_task(SequenceLabeler)
###Output
_____no_output_____
###Markdown
Finally, we can train on the dataset we defined earlier, using `sequenceLabeler.learn` (the `.learn` function is inherited from the `pyvw.SearchTask` class). The `.learn` function takes any iterator over data. Whatever type of data it iterates over is what it will feed to your `_run` function.
###Code
for i in range(10):
sequenceLabeler.learn(my_dataset)
###Output
_____no_output_____
###Markdown
Of course, we want to see if it's learned anything. So let's create a single test example:
###Code
test_example = [ (1,w) for w in "the sandwich ate a monster".split() ]
print(test_example)
###Output
_____no_output_____
###Markdown
We've used `0` as the labels so you can be sure that vw isn't cheating and it's actually making predictions:
###Code
out = sequenceLabeler.predict(test_example)
print(out)
###Output
_____no_output_____
###Markdown
If we look back at our POS tag definitions, this is DET NOUN VERB DET NOUN, which is indeed correct! Removing the AUTO features In the above example we used both AUTO_HAMMING_LOSS and AUTO_CONDITION_FEATURES. To make more explicit what these are doing, let's rewrite our `SequenceLabeler` class without them! Here's a version that gets rid of both simultaneously. It is only modestly more complex:
###Code
class SequenceLabeler2(vowpalwabbit.pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
vowpalwabbit.pyvw.SearchTask.__init__(self, vw, sch, num_actions)
def _run(self, sentence):
output = []
loss = 0.
for n in range(len(sentence)):
pos,word = sentence[n]
prevPred = output[n-1] if n > 0 else '<s>'
ex = self.vw.example({'w': [word], 'p': [prevPred]})
pred = self.sch.predict(examples=ex, my_tag=n+1, oracle=pos, condition=(n,'p'))
vw.finish_example([ex])
output.append(pred)
if pred != pos:
loss += 1.
self.sch.loss(loss)
return output
sequenceLabeler2 = vw.init_search_task(SequenceLabeler2)
sequenceLabeler2.learn(my_dataset)
print(sequenceLabeler2.predict( [(1,w) for w in "the sandwich ate a monster".split()] ))
###Output
_____no_output_____
###Markdown
If executed correctly, this should have printed `[1, 2, 3, 1, 2]`.There are essentially two things that changed here. In order to get rid of AUTO_HAMMING_LOSS, we had to keep track of how many errors the predictor had made. This is done by checking whether `pred != pos` inside the inner loop, and then at the end calling `self.sch.loss(loss)` to tell the search procedure how well we did.In order to get rid of AUTO_CONDITION_FEATURES, we need to explicitly add the previous prediction as features to the example we are predicting with. Here, we've done this by extracting the previous prediction (`prevPred`) and explicitly adding it as a feature (in the 'p' namespace) during the example construction.**Important Note:** even though we're not using AUTO_CONDITION_FEATURES, we *still* must tell the search process that this prediction depends on the previous prediction. We need to do this because the learning algorithm automatically memoizes certain computations, and so it needs to know that, when memoizing, to remember that this prediction *might have been different* if a previous decision were different. Very silly Covington-esqu dependency parsing Let's also try a variant of dependency parsing to see that this doesn't work just for sequence-labeling list tasks. First we need to define some data:
###Code
# the label for each word is its parent, or -1 for root
my_dataset = [ [("the", 1), # 0
("monster", 2), # 1
("ate", -1), # 2
("a", 5), # 3
("big", 5), # 4
("sandwich", 2) ] # 5
,
[("the", 1), # 0
("sandwich", 2), # 1
("is", -1), # 2
("tasty", 2)] # 3
,
[("a", 1), # 0
("sandwich", 2), # 1
("ate", -1), # 2
("itself", 2), # 3
]
]
###Output
_____no_output_____
###Markdown
For instance, in the first sentence, the parent of "the" is "monster"; the parent of "monster" is "ate"; and "ate" is the root.The basic idea of a Covington-style dependency parser is to loop over all O(N^2) word pairs and ask if one is the parent of the other. In a real parser you would want to make sure that you don't have cycles, that you have a unique root and (perhaps) that the resulting graph is projective. I'm not doing that here. Hopefully I'll add a shift-reduce parser example later that *does* do this. Here's an implementation of this idea:
###Code
class CovingtonDepParser(vowpalwabbit.pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
vowpalwabbit.pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.AUTO_CONDITION_FEATURES )
def _run(self, sentence):
N = len(sentence)
# initialize our output so everything is a root
output = [-1 for i in range(N)]
for n in range(N):
wordN,parN = sentence[n]
for m in range(-1,N):
if m == n: continue
wordM = sentence[m][0] if m > 0 else "*root*"
# ask the question: is m the parent of n?
isParent = 2 if m == parN else 1
# construct an example
dir = 'l' if m < n else 'r'
ex = self.vw.example({'a': [wordN, dir + '_' + wordN],
'b': [wordM, dir + '_' + wordN],
'p': [wordN + '_' + wordM, dir + '_' + wordN + '_' + wordM],
'd': [ str(m-n <= d) + '<=' + str(d) for d in [-8, -4, -2, -1, 1, 2, 4, 8] ] +
[ str(m-n >= d) + '>=' + str(d) for d in [-8, -4, -2, -1, 1, 2, 4, 8] ] })
pred = self.sch.predict(examples = ex,
my_tag = (m+1)*N + n + 1,
oracle = isParent,
condition = [ (max(0, (m )*N + n + 1), 'p'),
(max(0, (m+1)*N + n ), 'q') ])
self.vw.finish_example([ex]) # must pass the example in as a list because search is a MultiEx reduction
if pred == 2:
output[n] = m
break
return output
###Output
_____no_output_____
###Markdown
In this, `output` stores the predicted tree and is initialized with every word being a root. We then loop over every word (`n`) and every possible parent (`m`, which can be -1, though that's really kind of unnecessary).The features are basically the words under consideration, the words paired with the direction of the edge, the pair of words, and then a bunch of (binned) distance features.We can train and run this parser with:
###Code
vw = vowpalwabbit.Workspace("--search 2 --search_task hook", quiet=True)
task = vw.init_search_task(CovingtonDepParser)
for p in range(10): # do ten passes over the training data
task.learn(my_dataset)
print('testing')
print(task.predict( [(w,-1) for w in "the monster ate a sandwich".split()] ))
print('should have printed [ 1 2 -1 4 2 ]')
###Output
_____no_output_____
###Markdown
One could argue that a more natural way to do this would be with LDF rather than the inner loop over `m`. We'll do that next. LDF-based Covington-style dependency parser One of the weirdnesses in the previous parser implementation is that it makes N-many binary decisions per word ("is word n my parent?") rather than a single N-way decision. The latter makes more sense.The challenge is that you cannot set this up as a vanilla multiclass classification problem because (a) the number of "classes" is a function of the input (a length N sentence will have N classes) and (b) class "1" and "2" don't mean anything consistently across examples.The way around this is label-dependent features (LDF). In LDF mode, the class ids are (essentially -- see caveat below) irrelevant. Instead, you simply provide features that depend on the label (hence "LDF"). In particular, for each possible label, you provide a *different* feature vector, and the goal of learning is to pick one of those as the "correct" one.Here's a re-implementation of Covington using LDF:
###Code
class CovingtonDepParserLDF(vowpalwabbit.pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
vowpalwabbit.pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.IS_LDF | sch.AUTO_CONDITION_FEATURES )
def makeExample(self, sentence, n, m):
wordN = sentence[n][0]
wordM = sentence[m][0] if m >= 0 else '*ROOT*'
dir = 'l' if m < n else 'r'
ex = self.vw.example( { 'a': [wordN, dir + '_' + wordN],
'b': [wordM, dir + '_' + wordM],
'p': [wordN + '_' + wordM, dir + '_' + wordN + '_' + wordM],
'd': [ str(m-n <= d) + '<=' + str(d) for d in [-8, -4, -2, -1, 1, 2, 4, 8] ] +
[ str(m-n >= d) + '>=' + str(d) for d in [-8, -4, -2, -1, 1, 2, 4, 8] ] },
labelType=self.vw.lCostSensitive)
ex.set_label_string(str(m+2) + ":0") # project the m-indices (-1...N into 1...N+2)
return ex
def _run(self, sentence):
N = len(sentence)
# initialize our output so everything is a root
output = [-1 for i in range(N)]
for n in range(N):
# make LDF examples
examples = [ self.makeExample(sentence,n,m) for m in range(-1,N) if n != m ]
# truth
parN = sentence[n][1]
oracle = parN+1 if parN < n else parN # have to -1 because we excluded n==m from list
# make a prediction
pred = self.sch.predict(examples = examples,
my_tag = n+1,
oracle = oracle,
condition = [ (n, 'p'), (n-1, 'q') ] )
output[n] = pred-1 if pred < n else pred # have to +1 because n==m excluded
for ex in examples: ex.finish() # clean up
return output
###Output
_____no_output_____
###Markdown
There are a few things going on here. Let's focus first on the `__init__` function. The only difference here is that when we call `sch.set_options` we provide `sch.IS_LDF` to declare that this is an LDF task.Let's skip the `makeExample` function for a minute and look at the `_run` function. You should recognize most of this from the non-LDF version. We initialize the `output` (parent) of every word to `-1` (meaning that every word is connected to the root).For each word `n`, we construct `N`-many examples: one for every -1..(N-1), except for the current word `n` because you cannot have self-loops. If we were being more clever, we would only do the ones that won't result in the creation of a cycle, but we're not being clever.Now, because the "labels" are just examples, it's a bit more complicated to specify the oracle. The oracle is an *index* into the examples list. So if `oracle` is the oracle action, then `examples[oracle]` is the corresponding example. We compute the oracle as follows. `parN` is the *actual* parent, which is going to be something in the range `-1 .. (N-1)`. If `parN n` (note: it cannot be equal to `n`) then, beacuse `n == m` is left out of the examples list, the correct index is just `parN`. Phew.We then ask for a prediction. Now, instead of giving a single example, with give the list of examples. The tag works the same way, as does the conditioning.Once we get a prediction out (called `pred`) we need to figure out what parent it actually corresponds to. This is simply un-doing the computaiton two paragraphs ago!Finally -- and this is skippable if you trust the Python garbage collector -- we tell VW that we're done with all the examples we created. We do this just to be pedantic; it's optional.Okay, now let's go back to the `makeExample` function. This takes two word ids (`n` and `m`) and makes an example that roughly says "what would it look like if I had an edge from `n` to `m`?" We construct basically the same feautres as before. There are two major changes, though:1. When we run `self.vw.example(...)` we provide `labelType=self.vw.lCostSensitive` as an argument. This is because under the hood, vw treats LDF examples as cost-sensitive classification examples. This means they need to have cost-sensitive labels, so that's how we need to create them.1. We explicitly set the label of the this example to `str(m+2)+":0"`. What is this? Well, this is _optional_ but recommended. Here's the issue. In LDF mode, recall that labels have no intrinsic meaning. This means that when vw does auto-conditioning, it's not really clear what to use as the "previous prediction." By giving explicit label names (in this case, m+2) we're recording what the position of the last parent, which may be useful for predicting the next parent. We could avoid this necessity if we did our own feature engineering on the history, but for now, this seems to capture the right intuition.Given all this, we can now train and test our parser:
###Code
vw = vowpalwabbit.Workspace("--search 0 --csoaa_ldf m --search_task hook", quiet=True)
task = vw.init_search_task(CovingtonDepParserLDF)
#BUG: This currently does not work because oracle generation is incorrect (generates invalid oracle values)#for p in range(2): # do two passes over the training data
# task.learn(my_dataset)
#print(task.predict( [(w,-1) for w in "the monster ate a sandwich".split()] ))
###Output
_____no_output_____
###Markdown
The correct parse of this sentence is `[1, 2, -1, 4, 2]` which is what this should have printed.There are two major things to notice in the initialization of VW. The first is that we say `--search 0`. The zero labels argument to `--search` declares that this is going to be an LDF task. We also have to tell VW that we want an LDF-enabled cost-sensitive learner, which is what `--csoaa_ldf m` does (if you're wondering, `m` means "multiline" -- just treat it as something you have to do). The rest should be familiar. A simple word-alignment model Okay, as a last example we'll do a simple word alignment model in the spirit of the IBM models. Note that this will be a *supervised* model; doing unsupervised stuff is a bit trickier.Here's some word alignment data. The dataset is triples of `E, A, F` where `A[i]` = list of words `E[i]` aligned to, or `[]` for null-aligned:
###Code
my_dataset = [
( "the blue house".split(),
([0], [2], [1]),
"la maison bleue".split() ),
( "the house".split(),
([0], [1]),
"la maison".split() ),
( "the flower".split(),
([0], [1]),
"la fleur".split() )
]
###Output
_____no_output_____
###Markdown
It's going to be useful to compute alignment mismatches at the word level between true alignments (like `[1,2]`) and predicted alignments (like `[2,3,4]`). We use intersection-over-union error:
###Code
def alignmentError(true, sys):
t = set(true)
s = set(sys)
if len(t | s) == 0: return 0.
return 1. - float(len(t & s)) / float(len(t | s))
###Output
_____no_output_____
###Markdown
And now we can define our structured prediction task. This is also an LDF problem. Basically for each word on the English side, we'll loop over all possible phrases on the Foreign side to which it could align (maximum phrase length of three). For each of these options we'll create an example to be fed into the LDF classifier. We also ensure that the same foreign word cannot be covered by multiple English words, though this might not be a good idea in general.
###Code
class WordAligner(vowpalwabbit.pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
vowpalwabbit.pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.IS_LDF | sch.AUTO_CONDITION_FEATURES )
def makeExample(self, E, F, i, j0, l):
f = 'Null' if j0 is None else [ F[j0+k] for k in range(l+1) ]
ex = self.vw.example( { 'e': E[i],
'f': f,
'p': '_'.join(f),
'l': str(l),
'o': [str(i-j0), str(i-j0-l)] if j0 is not None else [] },
labelType = self.vw.lCostSensitive )
lab = 'Null' if j0 is None else str(j0+l)
ex.set_label_string(lab + ':0')
return ex
def _run(self, alignedSentence):
E,A,F = alignedSentence
# for each E word, we pick a F span
covered = {} # which F words have been covered so far?
output = []
for i in range(len(E)):
examples = [] # contains vw examples
spans = [] # contains triples (alignment error, index in examples, [range])
# empty span:
examples.append( self.makeExample(E, F, i, None, None) )
spans.append( (alignmentError(A[i], []), 0, []) )
# non-empty spans
for j0 in range(len(F)):
for l in range(3): # max phrase length of 3
if j0+l >= len(F): break
if covered.has_key(j0+l): break
id = len(examples)
examples.append( self.makeExample(E, F, i, j0, l) )
spans.append( (alignmentError(A[i], range(j0,j0+l+1)), id, range(j0,j0+l+1)) )
sortedSpans = []
for s in spans: sortedSpans.append(s)
sortedSpans.sort()
oracle = []
for id in range(len(sortedSpans)):
if sortedSpans[id][0] > sortedSpans[0][0]: break
oracle.append( sortedSpans[id][1] )
pred = self.sch.predict(examples = examples,
my_tag = i+1,
oracle = oracle,
condition = [ (i, 'p'), (i-1, 'q') ] )
for ex in examples: ex.finish()
output.append( spans[pred][2] )
for j in spans[pred][2]:
covered[j] = True
return output
###Output
_____no_output_____
###Markdown
The only really complicated thing here is computing the oracle. What we do is, for each possible alignment, compute an intersection-over-union error rate. The oracle is then that alignment that achieves the smallest (local) error rate. This is not perfect, but is good enough. One interesting thing here is that now the `oracle` could be a *list*; this is completely supported by the underlying algorithms.We can train and test this model to make sure it does the right thing:
###Code
vw = vowpalwabbit.Workspace("--search 0 --csoaa_ldf m --search_task hook -q ef -q ep", quiet=True)
task = vw.init_search_task(WordAligner)
# BUG: This is currently broken due to incorrect oracle generation. Currently under investigation.#for p in range(10):
# task.learn(my_dataset)
#print(task.predict( ("the blue flower".split(), ([],[],[]), "la fleur bleue".split()) ))
###Output
_____no_output_____ |
python/text-analytics-scoring/sentiment/text-analytics-sentiment.ipynb | ###Markdown
Text Analytics - Sentiment Authentication -- optional stepIf you have registered a client (see the authentication use case), uncomment the code below to generate an access token.
###Code
# import requests, json, base64
# sasserver = ""
# username = ""
# password = ""
# client_id = ""
# client_secret = ""
# url = sasserver + "/SASLogon/oauth/token"
# data = {
# 'grant_type': 'password',
# 'username': username,
# 'password': password
# }
# headers = {'Accept': 'application/json'}
# response = requests.post(url, headers=headers, data=data, auth=(client_id, client_secret), verify=False).json()
# access_token = response["access_token"]
# print("The access token is: " + access_token)
###Output
_____no_output_____
###Markdown
Import modules, variable assignmentThe first step of the process is to import the required packages and assign variable values.
###Code
import requests
import json
## Variables to assign
sasserver = ""
text = " Today it rained; we didn't go to school\r\n456, Our team won the game\r\n789, The funeral was a sad event\r\n012, The quick brown fox jumped over the lazy dog\r\n345, What a long strange trip it's been\r\n678, The telephone was rang and I handed it to Liz. She said \"This isn't who it would be If it wasn't who it is\"\r\n901, She was having a no good terrible very bad day\r\n234, If I could be the sun I'd radiate like Africa and Smile upon the world Intergalactic love laughter"
oaccess_token = "" # Get from the authentication project
###Output
_____no_output_____
###Markdown
Create functions Create session to start making calls
###Code
def getSession(access_token, sasserver):
headers_sesh = {
"Content-Type": "application/json",
"Authorization": "Bearer " + access_token }
url = sasserver + '/cas-shared-default-http/cas/sessions'
try:
response = requests.post(url, headers=headers_sesh, verify=False).json()
return(response)
except requests.exceptions.RequestException as e:
return e
###Output
_____no_output_____
###Markdown
Load data from the form to be scored
###Code
def csv_load(sessionId, access_token, sasserver, data):
url = sasserver + "/cas-shared-default-http/cas/sessions/"+sessionId+"/actions/upload"
payload = "UID,Text\r\n"+data
headers = {
'Accept': 'application/json',
'Content-Type': 'binary/octet-stream',
'JSON-Parameters': '{"casout":{"caslib":"casuser","name":"tableToScore","replace":true},"importOptions":{"fileType":"csv"}}',
'Authorization': 'Bearer '+ access_token
}
try:
response = requests.request("PUT", url, headers=headers, data = payload, verify=False)
return str(response)
except requests.exceptions.RequestException as e:
return e
###Output
_____no_output_____
###Markdown
Run Sentiment Model
###Code
def callSentimentModel(sessionId, access_token, sasserver):
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": "Bearer " + access_token }
url = sasserver + '/cas-shared-default-http/cas/sessions/'+sessionId+'/actions/sentimentAnalysis.applySent'
payload = "{\"table\":{\"caslib\":\"casuser\", \"name\":\"tableToScore\"},\r\n\"docId\":\"uid\",\r\n\"text\":\"text\",\r\n\"language\":\"ENGLISH\",\r\n\"casOut\":{\"caslib\":\"casuser\", \"name\":\"sentimentAnalysis\", \"promote\":true}}"
try:
response = requests.request("POST", url, headers=headers, data=payload, verify=False).json()
return(response)
except requests.exceptions.RequestException as e:
return e
###Output
_____no_output_____
###Markdown
Get the data using the Fetch action
###Code
def getData(sasserver, sessionId, access_token, tblName):
url = sasserver + "/cas-shared-default-http/cas/sessions/" + sessionId + "/actions/table.fetch"
payload = "{ \"table\": {\"caslib\":\"casuser\", \"name\":\"" + tblName + "\"} }"
headers = {
'Authorization': "Bearer " + access_token,
'Content-Type': 'application/json'
}
try:
response = requests.request("POST", url, headers=headers, data = payload, verify=False).json()
return(response)
except requests.exceptions.RequestException as e:
return e
###Output
_____no_output_____
###Markdown
Drop table at end of use case
###Code
def dropTable(sasserver, sessionId, oaccess_token):
url = sasserver + "/cas-shared-default-http/cas/sessions/" + sessionId + "/actions/table.dropTable"
payload = "{\"caslib\": \"casuser\", \"name\": \"sentimentanalysis\"}"
headers = {
'Authorization': "Bearer " + oaccess_token,
'Content-Type': 'application/json'
}
try:
response = requests.request("POST", url, headers=headers, data=payload, verify=False).json
return(response)
except requests.exceptions.RequestException as e:
return e
###Output
_____no_output_____
###Markdown
End session
###Code
def endSession(sasserver, sessionId, oaccess_token):
url = sasserver + "/cas-shared-default-http/cas/sessions/" + sessionId
headers = {
'Authorization': "Bearer " + oaccess_token,
'Content-Type': 'application/json'
}
try:
response = requests.request("DELETE", url, headers=headers, verify=False).json
return(response)
except requests.exceptions.RequestException as e:
return e
###Output
_____no_output_____
###Markdown
Run functions
###Code
# Get Session
sessionId = getSession(oaccess_token, sasserver)
print(sessionId)
# Upload Data
unique_id = 123
parsedData = str(unique_id)+","+text
print(parsedData)
# Run Sentiment Model
sentiment_output = callSentimentModel(sessionId["session"], oaccess_token, sasserver)
sentiment_output_format = json.dumps(sentiment_output, indent=2)
print(sentiment_output_format) # optional print statement to view output
# Get Sentiment data
sentiment_data = getData(sasserver, sessionId["session"], oaccess_token, "sentimentAnalysis")["results"]
sentiment_format = json.dumps(sentiment_data, indent=2)
print(sentiment_format)
# Drop table
drop_table = dropTable(sasserver, sessionId["session"], oaccess_token)
# End session
endSession= endSession(sasserver, sessionId["session"], oaccess_token)
print(endSession)
###Output
_____no_output_____ |
Analytical-Marketing/HW/HW11.ipynb | ###Markdown
ะะฐะดะฐะฝะธะต 11. ะัะพะตะบั ะฟะพัััะพะตะฝะธั ะผะพะดะตะปะธ ะผะฐัะธะฝะฝะพะณะพ ะพะฑััะตะฝะธั ะดะปั ัะตะณัะตััะธะธ 1. ะะฐะณััะถะฐะตะผ ะฑะธะฑะปะธะพัะตะบะธ
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import LabelEncoder
###Output
_____no_output_____
###Markdown
2. ะะฐะณััะถะฐะตะผ ะดะฐัะฐัะตั
###Code
insurance = pd.read_csv('insurance.csv')
insurance.head()
###Output
_____no_output_____
###Markdown
ะะพััะฝะตะฝะธั ะดะปั ะบะพะปะพะฝะพะบ:- **age** โ ะฒะพะทัะฐัั- **sex** โ ะผัะถัะบะพะน/ะถะตะฝัะบะธะน- **bmi** โ ะธะฝะดะตะบั ะผะฐััั ัะตะปะฐ- **children** โ ะบะพะปะธัะตััะฒะพ ะดะตัะตะน, ะฟะพะบััััั
ัััะฐั
ะพะฒะบะพะน- **smoker** โ ะบััะธั/ะฝะต ะบััะธั- **region** โ ัะตะณะธะพะฝ ัะฐะฑะพัั ัััะฐั
ะพะฒะพัะฝะพะน ะบะพะผะฟะฐะฝะธะธ- **charges** โ ะทะฐััะฐัั ะฝะฐ ะผะตะดะธัะธะฝั ัะพะณะปะฐัะฝะพ ัััะฐั
ะพะฒะพะน ะบะพะผะฟะฐะฝะธะธ 3. ะะฟะธััะฒะฐะตะผ ะดะฐัะฐัะตั
###Code
insurance.shape
insurance.describe()
insurance[['sex', 'smoker', 'region']].describe()
###Output
_____no_output_____
###Markdown
4. ะะฐะผะตะฝะฐ ะบะฐัะตะณะพัะธะฐะปัะฝัั
ะดะฐะฝะฝัั
ะกะฝะฐัะฐะปะฐ ะทะฐะผะตะฝะธะผ ัะต ะดะฐะฝะฝัะต, ั ะบะพัะพััั
ะฒัะตะณะพ ะดะฒะต ะบะฐัะตะณะพัะธะธ
###Code
label_encoder = LabelEncoder()
columns = ['sex', 'smoker']
for column in columns:
insurance[column] = label_encoder.fit_transform(insurance[column])
insurance.head()
###Output
_____no_output_____
###Markdown
ะขะตะฟะตัั ะทะฐะผะตะฝะธะผ ะดะฐะฝะฝัะต, ั ะบะพัะพััั
ัะตัััะต ะบะฐัะตะณะพัะธะธ, ั ะฟะพะผะพััั one hot encoding
###Code
ohe_region = pd.get_dummies(insurance['region'], prefix='ohe_reg')
insurance = pd.concat([insurance, ohe_region], axis=1)
insurance = insurance.drop(columns=['region'])
insurance.head()
insurance = insurance.drop(columns=['ohe_reg_southwest'])
insurance
###Output
_____no_output_____
###Markdown
5. ะะธะทัะฐะปะธะทะฐัะธั ะดะฐะฝะฝัั
###Code
insurance[['age', 'bmi', 'children', 'charges']].plot(kind='box',
subplots=True,
layout=(1, 4),
sharex=False,
sharey=False,
figsize=(15, 5))
plt.show()
insurance.hist(figsize = (10, 10))
plt.show()
pd.plotting.scatter_matrix(insurance[['age', 'bmi', 'children', 'charges']], figsize=(10,10))
plt.show()
import seaborn as sns
fig = plt.figure(figsize = (15, 10))
sns.heatmap(insurance.corr(), annot = True, cmap="YlGnBu")
###Output
_____no_output_____
###Markdown
6. ะ ะฐะทะฑะธะฒะฐะตะผ ะดะฐะฝะฝัะต ะฝะฐ ะพะฑััะฐัััั ะธ ัะตััะพะฒัั ะฒัะฑะพัะบะธ
###Code
insurance_Y = insurance['charges'].values
insurance_X = insurance.loc[:, insurance.columns != 'charges'].values
insurance_X
X_train, X_test, Y_train, Y_test = train_test_split(insurance_X, insurance_Y, test_size = 0.33, random_state = 42)
###Output
_____no_output_____
###Markdown
7. ะัะพะฒะตัะธะผ ัะฐะฑะพัั ัะฐะทะฝัั
ะฐะปะณะพัะธัะผะพะฒ
###Code
models = [('LR', LinearRegression()),
('LASSO', Lasso()),
('EN', ElasticNet()),
('KNN', KNeighborsRegressor()),
('CART', DecisionTreeRegressor()),
('SVR', SVR())]
kf = KFold(n_splits = 5, random_state = 42, shuffle = True)
results = []
names = []
for name, model in models:
cv_results = cross_val_score(model, X_train, Y_train, cv = kf, scoring = 'neg_mean_absolute_error')
results.append(cv_results)
names.append(name)
print("%s: %f (%f)" % (name, cv_results.mean(), cv_results.std()))
fig = plt.figure(figsize=(8, 8))
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()
###Output
_____no_output_____
###Markdown
8. ะกัะฐะฝะดะฐััะธะทะธััะตะผ ะดะฐัะฐัะตั
###Code
pipelines = [
('ScaledLR', Pipeline([('Scaler', StandardScaler()),('LR', LinearRegression())])),
('ScaledLASSO', Pipeline([('Scaler', StandardScaler()),('LASSO', Lasso())])),
('ScaledEN', Pipeline([('Scaler', StandardScaler()),('EN', ElasticNet())])),
('ScaledKNN', Pipeline([('Scaler', StandardScaler()),('KNN', KNeighborsRegressor())])),
('ScaledCART', Pipeline([('Scaler', StandardScaler()),('CART', DecisionTreeRegressor())])),
('ScaledSVR', Pipeline([('Scaler', StandardScaler()),('SVR', SVR())])),
]
results = []
names = []
for name, pipeline in pipelines:
cv_results = cross_val_score(pipeline, X_train, Y_train, cv = kf, scoring = 'neg_mean_absolute_error')
results.append(cv_results)
names.append(name)
print("%s: %f (%f)" % (name, cv_results.mean(), cv_results.std()))
fig = plt.figure(figsize=(8, 8))
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()
###Output
_____no_output_____
###Markdown
9. ะะฐัััะฐะธะฒะฐะตะผ ะฐะปะณะพัะธัะผ
###Code
scaler = StandardScaler().fit(X_train)
rescaledX = scaler.transform(X_train)
parameters = {
"splitter": ["best", "random"],
"max_depth": [None, 1, 3, 5, 7, 9, 11, 12],
"min_samples_leaf": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"min_weight_fraction_leaf": [0.0, 0.1, 0.2, 0.3],
"max_leaf_nodes": [None, 10, 20]
}
model = DecisionTreeRegressor()
grid = GridSearchCV(estimator = model, param_grid = parameters, scoring = 'neg_mean_absolute_error', cv = kf)
grid_result = grid.fit(rescaledX, Y_train)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
###Output
Best: -2704.243557 using {'max_depth': 5, 'max_leaf_nodes': None, 'min_samples_leaf': 9, 'min_weight_fraction_leaf': 0.0, 'splitter': 'best'}
###Markdown
10. ะะฝัะฐะผะฑะปะตะฒัะต ะผะตัะพะดั
###Code
ensembles = [
('ScaledAB', Pipeline([('Scaler', StandardScaler()),('AB', AdaBoostRegressor())])),
('ScaledGBM', Pipeline([('Scaler', StandardScaler()),('GBM', GradientBoostingRegressor())])),
('ScaledRF', Pipeline([('Scaler', StandardScaler()),('RF', RandomForestRegressor(n_estimators=10))])),
('ScaledET', Pipeline([('Scaler', StandardScaler()),('ET', ExtraTreesRegressor(n_estimators=10))]))
]
results = []
names = []
for name, ensemble in ensembles:
cv_results = cross_val_score(ensemble, X_train, Y_train, cv = kf, scoring = 'neg_mean_absolute_error')
results.append(cv_results)
names.append(name)
print("%s: %f (%f)" % (name, cv_results.mean(), cv_results.std()))
fig = plt.figure(figsize=(8, 8))
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()
###Output
_____no_output_____
###Markdown
11. ะะฐัััะฐะธะฒะฐะตะผ ะฐะปะณะพัะธัะผ
###Code
scaler = StandardScaler().fit(X_train)
rescaledX = scaler.transform(X_train)
param_grid = {'n_estimators': [50, 100, 150, 200, 250, 300, 350, 400]}
model = GradientBoostingRegressor(random_state = 42)
grid = GridSearchCV(estimator = model, param_grid = param_grid, scoring = 'neg_mean_absolute_error', cv = kf)
grid_result = grid.fit(rescaledX, Y_train)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
###Output
Best: -2601.684907 using {'n_estimators': 50}
###Markdown
12. ะคะธะฝะฐะปัะฝะฐั ะผะพะดะตะปั
###Code
scaler = StandardScaler().fit(X_train)
rescaledX = scaler.transform(X_train)
model = GradientBoostingRegressor(random_state = 42, n_estimators=50)
model.fit(rescaledX, Y_train)
rescaledTestX = scaler.transform(X_test)
predictions = model.predict(rescaledTestX)
print(mean_absolute_error(Y_test, predictions))
###Output
2524.29113109176
|
project_jupyter_notebooks/RINGO_T1.3/modules/flasksampling_modules.ipynb | ###Markdown
Notebook with Python Functions for the Flask Sampling Notebook This notebook includes a collection of functions specific for developing and testing the flask sampling strategy in RINGO Task 1.3- [Python Modules and settings](modules)- [Run sampling routines with STILT results](runall) - [Run temporal sampling and footprint aggregation funtions](selectionSTILT) - [Dictionary with all stations available in STILT](STILTdictionary) - [Temporal sampling of STILT time series](samplingSTILT) - [STILT footprints for a selected station and time range](footprints) - [Coordinates of big cities in Europe](bigCities) - [Comparison of STILT results with ICOS measurements](comparison)- [Run sampling routines with ICOS measurements](runobs) - [Temporal sampling of ICOS measurements](samplingICOS) - [Test sensitivity of selection strategy on monthly mean](sensitivity)- [Create widgets for selection (STILT results + ICOS data)](widgetsSTILT)- [Create widgets for selection (ICOS data only)](widgetsICOS) Import Python modules
###Code
# import required libaries
import sys
import os
import netCDF4 as cdf
import numpy as np
import datetime as dt
import pandas as pd
from pandas.tseries.frequencies import to_offset
import matplotlib.pyplot as p
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
from cartopy import config
import cartopy.crs as ccrs
from cartopy.feature import NaturalEarthFeature, LAND, COASTLINE, LAKES
import cartopy.feature as cfeature
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')
from dateutil.rrule import rrule, MONTHLY
from IPython.core.display import display, HTML, Markdown
from ipywidgets import interact, interact_manual, Dropdown, SelectMultiple, HBox, VBox, Button, Output, FloatText, IntText, IntRangeSlider
from IPython.display import clear_output
#display(HTML("<style>.container { width:100% !important; }</style>"))
#Import ICOS tools:
from icoscp.cpb.dobj import Dobj
from icoscp.sparql.runsparql import RunSparql
from icoscp.sparql import sparqls
###Output
_____no_output_____
###Markdown
Basic settings
###Code
# settings
# path to RINGO specific STILT results (1-hourly footprints and concentrations for selected stations)
path_stilt = '/data/stilt/'
# path to footprint tool results (3-hrl footprints)
path_stiltweb = '/data/stiltweb/'
# path to user home directory
HOME = os.path.expanduser("~")+'/'
# create directory for storing plots - if not already exists
path_plots = 'Figures/'
if os.access('../', os.W_OK):
if not os.path.exists(path_plots):
os.makedirs(path_plots)
else:
os.makedirs(HOME+path_plots, exist_ok=True)
path_plots = HOME+path_plots
###Output
_____no_output_____
###Markdown
Load basic routines for handling ICOS time series and STILT model results (3-hourly)
###Code
#sys.path.insert(0,'modules')
#Import STILT tools:
from STILT_modules_plus import create_STILT_dictionary, print_STILT_dictionary
from STILT_modules_plus import lonlat_2_ixjy, read_emissions
from STILT_modules_plus import read_stilt_timeseries_RINGO_T13
from STILT_modules_plus import read_icos_data
from extra_sparqls import atc_station_tracer_query
###Output
_____no_output_____
###Markdown
Set colors and fonts
###Code
# define colors
orange='#ff8c00'
lime='#00ff00'
aqua='#00ffff'
brown='#663300'
lightgray="#C0C0C0"
gray="#808080"
cb_lightgreen='#b2df8a'
cb_green='#33a02c'
cb_lightblue='#a6cee3'
cb_blue='#1f78b4'
royal='#4169E1'
p.rcParams.update({'xtick.labelsize': 14})
p.rcParams.update({'ytick.labelsize': 14})
p.rcParams.update({'axes.labelsize': 14})
p.rcParams.update({'legend.fontsize': 14})
p.rcParams.update({'axes.titlesize': 18})
###Output
_____no_output_____
###Markdown
Back to top Read dictionary with all stations available in STILT
###Code
# Read dictionary with all stations available in STILT - for 3-hourly STILT results only
# Dictionary contains information on
# - STILT station id
# - Station coordinates (latitude, longitude)
# - Altitude of tracer release in STILT simultation
# - STILT location identifier
# - Station name - if available
stilt_stations = create_STILT_dictionary()
# to list all station with coordinates etc. uncomment next line
#print_STILT_dictionary()
###Output
_____no_output_____
###Markdown
Back to top Function for plotting STILT footprints for a selected station and time range
###Code
# plot STILT footprints for selected station and time range
def plot_footprints(station, station_lat, station_lon, loc_ident, daterange,
fxe_vmax=2.0, ident='', secsplit=False, pngfile=''):
#path to RINGO specific 1-hourly STILT footprints
path_fp=path_stilt+'/Footprints/'
#path to STILT footprints from the Footprint Tool (3-hourly only)
path_fp_3=path_stiltweb+'/slots/'
# loop over all dates and read netcdf files
# latitude, longitude are for grid center
fp=[]
nfp=0
first = True
for dd in daterange:
date_ident=str(dd.year)+'x'+str(dd.month).rjust(2, '0')+'x'+str(dd.day).rjust(2, '0')+'x'+str(dd.hour).rjust(2, '0')
filename=station+'/foot'+date_ident+'x'+loc_ident+'_aggreg.nc'
filename_3=loc_ident+'/'+str(dd.year)+'/'+str(dd.month).rjust(2, '0')+'/'+date_ident+'/foot'
#print(filename)
#print(filename_3)
read = False
if os.path.isfile(path_fp+filename):
f_fp = cdf.Dataset(path_fp+filename)
read = True
elif os.path.isfile(path_fp_3+filename_3):
f_fp = cdf.Dataset(path_fp_3+filename_3)
read = True
if (read):
if (first):
fp=f_fp.variables['foot'][:,:,:]
lon=f_fp.variables['lon'][:]
lat=f_fp.variables['lat'][:]
first = False
else:
fp=fp+f_fp.variables['foot'][:,:,:]
f_fp.close()
nfp+=1
else:
print('files do not exist: ',path_fp+filename,' ',path_fp_3+filename_3)
if nfp > 0:
mfp=fp/nfp
print(str(nfp)+' footprints found'+' '+ident)
else:
print('no footprints found'+' '+ident)
return
# plot aggregated footprint
# select colormap
#cmap = p.get_cmap('YlOrRd')
#cmap = p.get_cmap('inferno_r')
#cmap = p.get_cmap('bone_r')
cmap = p.get_cmap('GnBu')
#cmap = p.get_cmap('gist_heat_r')
# select marker color
#mcolor = 'b'
mcolor = 'r'
# select value range for logarithmic footprints
vmin=-7
vmax=-1
#grid cell index of station
ix,jy = lonlat_2_ixjy(station_lon,station_lat,lon,lat)
#print(station,station_lon,station_lat,ix,jy)
# define zoom area around station grid cell
i1 = np.max([ix-65,0])
i2 = np.min([ix+45,400])
j1 = np.max([jy-70,0])
j2 = np.min([jy+70,480])
lon_z=lon[i1:i2]
lat_z=lat[j1:j2]
fp_z=fp[0,j1:j2,i1:i2]
fig = p.figure(figsize=(17,17))
# Set scale for features from Natural Earth
#NEscale = '110m'
NEscale = '50m'
#NEscale = '10m'
# Create a feature for Countries at 1:50m from Natural Earth
countries = cfeature.NaturalEarthFeature(
category='cultural',
name='admin_0_countries',
scale=NEscale,
facecolor='none')
#Create a feature for Lakes at 1.50m from Natural Earth:
lakes = cfeature.NaturalEarthFeature(
category='physical',
name='lakes',
scale=NEscale,
facecolor='none')
# total annual emissions original EDGAR 4.3 for base year 2010
path_edgar=path_stilt+'/Emissions/'
filename='EDGARv4.3_2010_total.nc'
emis, lon, lat, dd, unit = read_emissions(path_edgar+filename)
# footprint * emission
fp_emis=fp*emis
fp_emis[fp<=0.0]=np.nan
fp_emis_z=fp_emis[0,j1:j2,i1:i2]
if not secsplit:
# set up a map
ax = p.subplot(1, 2, 1, projection=ccrs.PlateCarree())
img_extent = [lon_z.min(), lon_z.max(), lat_z.min(), lat_z.max()]
ax.set_extent(img_extent,crs=ccrs.PlateCarree())
#Add Natural Earth countries:
ax.add_feature(countries, edgecolor='black', linewidth=0.3)
#Add Natural Earth lakes:
ax.add_feature(lakes, edgecolor='black', linewidth=0.3)
#Add raster with values:
im = ax.imshow(np.log10(fp_z),interpolation='none',origin='lower', extent=img_extent,
cmap=cmap,vmin=vmin,vmax=vmax)
#Add colorbar:
cbar=p.colorbar(im,orientation='horizontal',pad=0.03,fraction=0.055,extend='both')
cbar.set_label('surface influence log$_{10}$ [ppm / ($\mu$mol / m$^{2}$s)]')
#Add plot title:
p.title('{} aggregated footprints '.format(nfp)+'\n'+'station: '+station+' '+
np.min(daterange).strftime('%Y-%m-%d')
+' -- '+np.max(daterange).strftime('%Y-%m-%d')+'\n'+ident)
#Add explanatory text under the colorbar (raster dataset min value):
ax.text(0.01, -0.29, 'min: %.5e' % np.nanmin(fp_z),
horizontalalignment='left',transform=ax.transAxes)
#Add explanatory text under the colorbar (raster dataset max value):
ax.text(0.99, -0.29, 'max: %.5e' % np.nanmax(fp_z),
horizontalalignment='right',transform=ax.transAxes)
# station location
ax.plot(station_lon,station_lat,'+',color=mcolor,ms=10,markeredgewidth=2,transform=ccrs.PlateCarree())
# set up second map
ax = p.subplot(1, 2, 2, projection=ccrs.PlateCarree())
img_extent = [lon_z.min(), lon_z.max(), lat_z.min(), lat_z.max()]
ax.set_extent(img_extent,crs=ccrs.PlateCarree())
#Add Natural Earth countries:
ax.add_feature(countries, edgecolor='black', linewidth=0.3)
#Add Natural Earth lakes:
ax.add_feature(lakes, edgecolor='black', linewidth=0.3)
#Add raster with values:
im = ax.imshow((fp_emis_z)[:,:],interpolation='none',origin='lower',extent=img_extent,
vmin=0,vmax=fxe_vmax,cmap=cmap)
#Add colorbar:
cbar=p.colorbar(im,orientation='horizontal',pad=0.03,fraction=0.055,extend='max')
cbar.set_label('surface influence [ppm]')
#Add plot title:
p.title('Footprints x Emissions '+'\n'+'station: '+station+' '+np.min(daterange).strftime('%Y-%m-%d')
+' -- '+np.max(daterange).strftime('%Y-%m-%d')+'\n'+ident)
#Add explanatory text under the colorbar (raster dataset min value):
ax.text(0.01, -0.29, 'min: %.5e' % np.nanmin(fp_emis_z), horizontalalignment='left',transform=ax.transAxes)
#Add explanatory text under the colorbar (raster dataset max value):
ax.text(0.99, -0.29, 'max: %.5e' % np.nanmax(fp_emis_z), horizontalalignment='right',transform=ax.transAxes)
#station location
ax.plot(station_lon,station_lat,'+',color=mcolor,ms=10,markeredgewidth=2,transform=ccrs.PlateCarree())
else: #if secsplit:
total_contribution = np.nansum(fp_emis_z)
print('total_contribution ',total_contribution)
print('energy/non-energy split with big cities marked')
# emissions from enery production only, original EDGAR 4.3 for base year 2010
path_edgar=path_stilt+'/Emissions/'
filename='EDGARv4.3_2010_ENE.nc'
emis2, lon, lat, dd, unit = read_emissions(path_edgar+filename)
# footprint * emission
fp_emis2=fp*emis2
fp_emis2[fp<=0.0]=np.nan
fp_emis2_z=fp_emis2[0,j1:j2,i1:i2]
ENE_contribution = np.nansum(fp_emis2_z)
ENE_ratio = 100.*ENE_contribution/total_contribution
print('ENE_contribution', '%.1f' % ENE_ratio,'%')
# set up a map
ax = p.subplot(1, 2, 1, projection=ccrs.PlateCarree())
img_extent = [lon_z.min(), lon_z.max(), lat_z.min(), lat_z.max()]
ax.set_extent(img_extent,crs=ccrs.PlateCarree())
#Add Natural Earth countries:
ax.add_feature(countries, edgecolor='black', linewidth=0.3)
#Add Natural Earth lakes:
ax.add_feature(lakes, edgecolor='black', linewidth=0.3)
#Add raster with values:
im = ax.imshow((fp_emis2_z)[:,:],interpolation='none',origin='lower',extent=img_extent,
vmin=0,vmax=fxe_vmax,cmap=cmap)
#Add colorbar:
cbar=p.colorbar(im,orientation='horizontal',pad=0.03,fraction=0.055,extend='max')
cbar.set_label('surface influence [ppm]')
#Add plot title:
p.title('avarage footprints x energy emissions '+'%.1f' % ENE_ratio
+' %'+'\n'+'station: '+station+' '
+np.min(daterange).strftime('%Y-%m-%d')+' -- '+np.max(daterange).strftime('%Y-%m-%d')
+'\n'+ident)
ax.text(0.01, -0.29, 'min: %.5f' % np.nanmin(fp_emis2_z), horizontalalignment='left',transform=ax.transAxes)
ax.text(0.99, -0.29, 'max: %.5f' % np.nanmax(fp_emis2_z), horizontalalignment='right',transform=ax.transAxes)
#station location
ax.plot(station_lon,station_lat,'+',color=mcolor,ms=10,markeredgewidth=2,transform=ccrs.PlateCarree())
# big cities in Europe
df_bigCities = get_bigCities()
ax.scatter(df_bigCities.longitude,df_bigCities.latitude,marker='o',color='k',facecolors='none',s=100,
transform=ccrs.PlateCarree())
# emissions from all emissions except enery production, original EDGAR 4.3 for base year 2010
path_edgar=path_stilt+'/Emissions/'
filename='EDGARv4.3_2010_nonENE.nc'
emis3, lon, lat, dd, unit = read_emissions(path_edgar+filename)
# footprint * emission
fp_emis3=fp*emis3
fp_emis3[fp<=0.0]=np.nan
fp_emis3_z=fp_emis3[0,j1:j2,i1:i2]
nonENE_contribution = np.nansum(fp_emis3_z)
nonENE_ratio = 100.*nonENE_contribution/total_contribution
print('nonENE_contribution', '%.1f' % nonENE_ratio,'%')
ax = p.subplot(1, 2, 2, projection=ccrs.PlateCarree())
img_extent = (lon_z.min(), lon_z.max(), lat_z.min(), lat_z.max())
ax.set_extent([lon_z.min(), lon_z.max(), lat_z.min(), lat_z.max()],crs=ccrs.PlateCarree())
ax.add_feature(countries, edgecolor='black', linewidth=0.3)
im = ax.imshow((fp_emis3_z)[:,:],interpolation='none',origin='lower',extent=img_extent,
vmin=0.000001,vmax=fxe_vmax,cmap=cmap)
cbar=p.colorbar(im,orientation='horizontal',pad=0.03,fraction=0.055,extend='max')
cbar.set_label('surface influence [ppm]')
p.title('average footprints x non-energy emissions '+'%.1f' % nonENE_ratio
+' %'+'\n'+'station: '+station+' '
+np.min(daterange).strftime('%Y-%m-%d')+' -- '+np.max(daterange).strftime('%Y-%m-%d')
+'\n'+ident)
ax.text(0.01, -0.29, 'min: %.5f' % np.nanmin(fp_emis3_z), horizontalalignment='left',transform=ax.transAxes)
ax.text(0.99, -0.29, 'max: %.5f' % np.nanmax(fp_emis3_z), horizontalalignment='right',transform=ax.transAxes)
#station location
ax.plot(station_lon,station_lat,'+',color=mcolor,ms=10,markeredgewidth=2,transform=ccrs.PlateCarree())
# big cities in Europe
ax.scatter(df_bigCities.longitude,df_bigCities.latitude,marker='o',color='k',facecolors='none',s=100,
transform=ccrs.PlateCarree())#,latlon=True)
p.tight_layout()
p.show()
if len(pngfile)>0:
fig.savefig(pngfile+'.png',dpi=100)
p.close()
###Output
_____no_output_____
###Markdown
Back to top Function to apply temporal sampling of STILT time series and plot results
###Code
# function to apply temporal sampling of STILT time series + plot results
def plot_stilt_ts_selection(df,start_date,end_date,noon_time,midday_range,
obs=None,meteo=None,pngfile='',high=5.0, low=1.5, highco=0.05, var_limit=1.,summary=False):
# selections
# afternoon sampling
# e.g. local time 12-15=> utc 11-14
dd1, dd2 = midday_range
df_day = df.loc[(df['hour'] >= dd1) & (df['hour'] <= dd2)]
# range in afternoon values (max-min)
df_range = df_day.resample('D').max() - df_day.resample('D').min()
df_range.index = df_range.index + to_offset('12H')
df_range.loc[(df_range['co2.stilt'] <= 0)]=np.nan
# low variability selection for QC (use range as variability)
df_var = df_range.loc[(df_range['co2.stilt'] < var_limit)]
df_var3 = df_var.iloc[::3, :]
# find n lowest-variability-footprints
df_var_smallest = df_range[(df_range.index >= start_date) & (df_range.index <= end_date)].nsmallest(10, 'co2.stilt')
# single noon value at 12 UTC => 13 LT for CET
ddm=noon_time
df_noon = df.loc[(df['hour'] >= ddm) & (df['hour'] <= ddm)]
# low fossil fuel CO2 => background
df_low=df_noon.loc[(df_noon['co2.fuel.coal']+df_noon['co2.fuel.oil']+df_noon['co2.fuel.gas'] < low)]
# high ffCO2 => target
df_high=df_noon.loc[(df_noon['co2.fuel.coal']+df_noon['co2.fuel.oil']+df_noon['co2.fuel.gas'] > high)]
# select background based on wind direction
# example for Gartow
df_nordsee=df_low.loc[(df_low['wind.dir'] < 360) & (df_low['wind.dir'] > 270)]
df_noon.name = df.name
df_noon.model = df.model
# select only every 3rd noon value
df_noon3 = df_noon.iloc[::3, :]
# select lowest value for previous 'ndrol' days
ndrol='3'
df_min = df.rolling(ndrol+'d',closed='right').min()
# only noon value (select based on date info in original time series)
df_min_noon = df_min.loc[(df['hour'] >= ddm) & (df['hour'] <= ddm)]
# difference between 7-day minimum and value at noon
df_offset=df_noon.subtract(df_min_noon)
# high CO offset as indicator for high ffCO2
df_highco=df_noon.loc[(df_offset['co.stilt'] > highco)]
#plot time series
if (df.name=='GAT344'):
titlex = 'Gartow 341m'
else:
titlex = stilt_stations[df.name]['name']
fig = p.figure(figsize=(15,14))
ax = fig.add_subplot(5,1,1)
p.plot(df.index,df['co2.stilt'],'.',color=lightgray,label='hourly values')
p.plot(df_noon.index,df_noon['co2.stilt'],'.',color='b',label='at '+str(ddm+1)+' LT',markersize=12)
p.plot(df_high.index,df_high['co2.stilt'],'x',color='r',label='if ffCO$_2$ offset >'+str(high)+' ppm'
,markersize=10,markeredgewidth=2)
p.plot(df_highco.index,df_highco['co2.stilt'],'+',color='m',label='if CO offset >'+str(highco)+' ppm'
,markersize=12,markeredgewidth=2)
p.title(titlex+' '+str(df['latstart'][0])+'$^\circ$N'+' '
+str(df['lonstart'][0])+'$^\circ$E')
ax.set_xlim(start_date,end_date)
#ax.set_xticklabels([])
ax.set_ylim(380,465)
ax.set_ylabel('STILT CO$_2$ [ppm]')
ax.grid(axis='x')
ax.grid(axis='y')
ax.legend(loc='upper left',ncol=8)
# Define the date format
date_form = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
ax = fig.add_subplot(5,1,2)
p.plot(df.index,df['co.stilt'],'.',color=lightgray,label='hourly values')
p.plot(df_noon.index,df_noon['co.stilt'],'.',color='b',label='at '+str(ddm+1)+' LT',markersize=12)
p.plot(df_min_noon.index,df_min_noon['co.stilt'],'-',color=gray,label=ndrol+' day minimum')
p.plot(df_highco.index,df_highco['co.stilt'],'+',color='m',label='if CO offset >'+str(highco)+' ppm'
,markersize=12,markeredgewidth=2)
ax.set_xlim(start_date,end_date)
#ax.set_xticklabels([])
ax.set_ylim(-0.01,0.24)
ax.set_ylabel('regional STILT CO [ppm]')
ax.grid(axis='x')
ax.grid(axis='y')
ax.legend(loc='upper left',ncol=8)
# Define the date format
date_form = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
ax = fig.add_subplot(5,1,3)
p.plot(df_noon.index,df_noon['co2.bio'],'.',color='g',label='STILT bio CO$_2$ at '+str(ddm+1)+' LT')
p.plot(df_noon.index,df_noon['co2.ff'],'.',color='r',label='STILT ff CO$_2$ at '+str(ddm+1)+' LT')
p.plot(df_high.index,df_high['co2.ff'],'x',color='r',label='STILT ffCO$_2$ >'+str(high),
markersize=10,markeredgewidth=2)
p.plot(df_highco.index,df_highco['co2.ff'],'+',color='m',label='STILT ffCO$_2$ (CO offset >'+str(highco)+')',
markersize=12,markeredgewidth=2)
ax.set_xlim(start_date,end_date)
#ax.set_xticklabels([])
ax.set_ylim(-2,30)
ax.set_ylabel('CO$_2$ components [ppm]')
ax.grid(axis='x')
ax.grid(axis='y')
ax.legend(loc='upper left',ncol=8)
# Define the date format
date_form = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
ax = fig.add_subplot(5,1,4)
p.plot(df_range.index,df_range['co2.stilt'],'.',color='r',label='range STILT CO$_2$ '
+str(dd1+1)+'-'+str(dd2+1)+' LT')
p.plot(df_var.index,df_var['co2.stilt'],'o',color='b',label='range STILT CO$_2$ < '
+str(var_limit)+' '+str(dd1+1)+'-'+str(dd2+1)+' LT')
ax.set_xlim(start_date,end_date)
ax.set_xticklabels([])
#ax.set_ylim(390,410)
ax.set_ylabel('CO$_2$ [ppm]')
ax.grid(axis='x')
ax.grid(axis='y')
ax.legend(loc='best',ncol=8)
# Define the date format
date_form = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
ax = fig.add_subplot(5,1,5)
p.plot([start_date,end_date],[0,0],color=gray,linewidth=0.5)
p.plot([start_date,end_date],[90,90],color=gray,linewidth=0.5)
p.plot([start_date,end_date],[180,180],color=gray,linewidth=0.5)
p.plot([start_date,end_date],[270,270],color=gray,linewidth=0.5)
p.plot([start_date,end_date],[360,360],color=gray,linewidth=0.5)
p.plot(df.index,df['wind.dir'],'.',color=lightgray,label='STILT wind dir')
ax.set_xlim(start_date,end_date)
ax.set_ylim(0,360)
ax.set_ylabel('wind direction')
ax.grid(axis='x')
#ax.grid(axis='y')
ax.legend(loc='best',ncol=8)
# Define the date format
date_form = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
p.tight_layout(h_pad=0.005)
p.show()
if len(pngfile)>0:
fig.savefig(pngfile+'.png',dpi=100)
p.close()
if summary:
# print summary
summary_file=path_plots+df.name+'_selection_STILT_counts.csv'
open(summary_file,'w').write(stilt_stations[df.name]['name']+' '+str(df['latstart'][0])+' N'+' '
+str(df['lonstart'][0])+' E'+'\n')
n_highffco2 = df_high.groupby(pd.Grouper(freq='M')).count()
display(HTML('<br> <p style="font-size:15px;font-weight:bold;color:royalblue;">Events with ffCO2 offset > '+str(high)+' ppm</p>'))
display(HTML(n_highffco2['co2.stilt'].to_frame(name='counts per month').to_html()))
open(summary_file,'a').write('Events with ffCO2 offset > '+str(high)+' ppm'+'\n')
n_highffco2['co2.stilt'].to_frame(name='counts per month').to_csv(summary_file,mode='a')
n_highco = df_highco.groupby(pd.Grouper(freq='M')).count()
display(HTML('<br> <p style="font-size:15px;font-weight:bold;color:royalblue;">Events with CO offset > '+str(highco)+' ppm</p>'))
display(HTML(n_highco['co2.stilt'].to_frame(name='counts per month').to_html()))
open(summary_file,'a').write('Events with CO offset > '+str(highco)+' ppm'+'\n')
n_highco['co2.stilt'].to_frame(name='counts per month').to_csv(summary_file,mode='a')
n_var = df_var.groupby(pd.Grouper(freq='M')).count()
display(HTML('<br> <p style="font-size:15px;font-weight:bold;color:royalblue;">Events with small daytime CO2 difference < '+str(var_limit)+' ppm</p>'))
display(HTML(n_var['co2.stilt'].to_frame(name='counts per month').to_html()))
open(summary_file,'a').write('Events with small daytime CO2 difference < '+str(var_limit)+' ppm'+'\n')
n_var['co2.stilt'].to_frame(name='counts per month').to_csv(summary_file,mode='a')
n_var_smallest = df_var_smallest.groupby(pd.Grouper(freq='M')).count()
max_var_smallest = df_var_smallest.groupby(pd.Grouper(freq='M')).max()
display(HTML('<br> <p style="font-size:15px;font-weight:bold;color:royalblue;">10 events with smallest daytime CO2 differences</p>'))
#display(HTML(n_var_smallest['co2.stilt'], ' max value: ', max_var_smallest['co2.stilt']
display(HTML(max_var_smallest['co2.stilt'].to_frame(name='max of 10 smallest var').to_html()))
open(summary_file,'a').write('10 events with smallest daytime CO2 differences'+'\n')
max_var_smallest['co2.stilt'].to_frame(name='counts per month').to_csv(summary_file,mode='a')
display(HTML('<br> <br>'))
df_select_high=df_high[(df_high.index >= start_date) & (df_high.index <= end_date)]
df_select_highco=df_highco[(df_highco.index >= start_date) & (df_highco.index <= end_date)]
df_select_low=df_low[(df_low.index >= start_date) & (df_low.index <= end_date)]
df_select_noon=df_noon[(df_noon.index >= start_date) & (df_noon.index <= end_date)]
df_select_noon3=df_noon3[(df_noon3.index >= start_date) & (df_noon3.index <= end_date)]
df_select_var=df_var[(df_var.index >= start_date) & (df_var.index <= end_date)]
df_select_var_smallest=df_var_smallest[(df_var_smallest.index >= start_date) & (df_var_smallest.index <= end_date)]
return df_select_low, df_select_high, df_select_highco, df_select_noon, df_select_noon3, df_select_var, df_select_var_smallest
###Output
_____no_output_____
###Markdown
Back to top Function to call temporal selection function and aggregate footprints accordingly
###Code
# function to call temporal secletion function and aggregate footprints accordingly
def all_selection_plots(station,sdate,edate,loc_ident,station_lat,station_lon,
high,low,highco,var_limit, noon_time, midday_range,summary=False,plot_foot=True):
# read 1-hourly STILT results (RINGO specific STILT runs)
df_stilt_1hr = pd.DataFrame()
for year in range(sdate.year,edate.year+1):
df = read_stilt_timeseries_RINGO_T13(station,year,loc_ident)
df_stilt_1hr = df_stilt_1hr.append(df)
df_stilt_1hr.name = df.name
df_stilt_1hr.model = df.model
for yrmon in rrule(MONTHLY, dtstart=sdate, until=edate):
start_date = dt.datetime(yrmon.year,yrmon.month,1,0)
if yrmon.month >= 12:
end_date = dt.datetime(yrmon.year+1,1,1,0)
else:
end_date = dt.datetime(yrmon.year,yrmon.month+1,1,0)
end_date = end_date - dt.timedelta(hours=1)
# return time serie selection
pngfile='selection_STILT_'+station+'_'+str(start_date.year)+str(start_date.month).zfill(2)
df_select_noon_low, df_select_noon_high, df_select_noon_highco, df_select_noon, df_select_3rdnoon, df_select_noon_var, df_select_noon_var_smallest = \
plot_stilt_ts_selection(df_stilt_1hr,start_date,end_date,noon_time,midday_range,pngfile=path_plots+pngfile,
low=low,high=high,highco=highco,summary=summary)
summary = False
if plot_foot:
# aggregated footprints
# set upper limit for color scale in 'footprint x emissions' map
# e.g. fxe_vmax=0.16
# in all other maps the color scale is set automatically
# all footprints at noontime
selected=df_select_noon.index
pngfile='FP_noon_STILT_'+station+'_'+str(start_date.year)+str(start_date.month).zfill(2)
plot_footprints(station,station_lat,station_lon,loc_ident,selected,
fxe_vmax=0.14,ident='all '+str(noon_time+1)+' LT',pngfile=path_plots+pngfile)
# footprints at noontime, every 3rd day only
selected=df_select_3rdnoon.index
pngfile='FP_noon3_STILT_'+station+'_'+str(start_date.year)+str(start_date.month).zfill(2)
plot_footprints(station,station_lat,station_lon,loc_ident,selected,
fxe_vmax=0.14,ident=str(noon_time+1)+' LT every 3rd day',pngfile=path_plots+pngfile)
# 10 footprints for situations with lowest variability around noon
selected=df_select_noon_var_smallest.index
pngfile='FP_10lowestvar_STILT_'+station+'_'+str(start_date.year)+str(start_date.month).zfill(2)
plot_footprints(station,station_lat,station_lon,loc_ident,selected,
fxe_vmax=0.14,ident='10 lowest variability',pngfile=path_plots+pngfile)
# footprints for situations with low variability around noon
selected=df_select_noon_var.index
pngfile='FP_var_STILT_'+station+'_'+str(start_date.year)+str(start_date.month).zfill(2)
plot_footprints(station,station_lat,station_lon,loc_ident,selected,
fxe_vmax=0.14,ident='low variability < '+str(var_limit)+' ppm',pngfile=path_plots+pngfile)
# footprints for low ffCO2
selected=df_select_noon_low.index
pngfile='FP_lowffCO2_STILT_'+station+'_'+str(start_date.year)+str(start_date.month).zfill(2)
plot_footprints(station,station_lat,station_lon,loc_ident,selected,
fxe_vmax=0.14,ident='low ffCO2 < '+str(low)+' ppm',pngfile=path_plots+pngfile)
# footprints for high ffCO2
selected=df_select_noon_high.index
pngfile='FP_highffCO2_STILT_'+station+'_'+str(start_date.year)+str(start_date.month).zfill(2)
plot_footprints(station,station_lat,station_lon,loc_ident,selected,
fxe_vmax=0.14,ident='high ffCO2 > '+str(high)+' ppm',secsplit=False,pngfile=path_plots+pngfile)
# footprints for high ffCO2, split into energy and non-energy emissions
selected=df_select_noon_high.index
pngfile='FP_highffCO2_secsplit_STILT_'+station+'_'+str(start_date.year)+str(start_date.month).zfill(2)
plot_footprints(station,station_lat,station_lon,loc_ident,selected,
fxe_vmax=0.14,ident='high ffCO2 > '+str(high)+' ppm',secsplit=True,
pngfile=path_plots+pngfile)
# footprints for high CO offset
selected=df_select_noon_highco.index
pngfile='FP_highffCO_STILT_'+station+'_'+str(start_date.year)+str(start_date.month).zfill(2)
plot_footprints(station,station_lat,station_lon,loc_ident,selected,
fxe_vmax=0.14,ident='high CO offset > '+str(highco)+' ppm',pngfile=path_plots+pngfile)
###Output
_____no_output_____
###Markdown
Back to top Function for plotting STILT time series together with observations
###Code
# function to plot STILT time series together with observations
def plot_icos_stilt_timeseries(station, df, start_date, end_date,
obs=None,meteo=None,title2='',linestyle = '.',
pngfile='',add_tracer=[], citation=''):
#plot time series
tracer = add_tracer + ['co2']
tracer = [x.lower() for x in tracer]
if obs is not None and not obs.empty:
if obs.columns.str.contains('Flag').any():
obs = obs.drop(columns=['Flag_co2', 'Flag_co'])
fig = p.figure(figsize=(15,8.8))
ax = fig.add_subplot(3,1,1)
p.plot(df.index,df['co2.stilt'],linestyle,color='b',label='STILT CO$_2$')
p.plot(df.index,df['co2.background'],linestyle,color='c',label='STILT CO$_2$ background')
if obs is not None and not obs.empty:
if 'DateTime' in obs:
p.plot(obs.DateTime,obs['CO2'],linestyle,color='k',label='Observation CO$_2$')
else:
if 'co2' in obs:
p.plot(obs.index,obs['co2'],linestyle,color='k',label='Observation CO$_2$')
if 'CO2' in obs:
p.plot(obs.index,obs['CO2'],linestyle,color='k',label='Observation CO$_2$')
p.title(stilt_stations[df.name]['icosName']+' {:.0f}m'.format(stilt_stations[df.name]['icosHeight'])+' '
+str(df['latstart'][0])+'$^\circ$N'+' '+str(df['lonstart'][0])+'$^\circ$E'+' '+title2)
ax.set_xlim(start_date,end_date)
ax.set_ylim(380,465)
ax.set_ylabel('CO$_2$ [ppm]')
ax.grid(axis='x')
ax.grid(axis='y')
ax.legend(loc='upper left',ncol=8)
# Define the date format
date_form = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
ax = fig.add_subplot(3,1,2)
p.plot(df.index,df['co2.fuel.coal']+df['co2.fuel.oil']+df['co2.fuel.gas'],linestyle,color='r',label='ffCO$_2$ offset')
p.plot(df.index,df['co2.bio'],linestyle,color='g',label='biospheric CO$_2$ offset')
ax.set_xlim(start_date,end_date)
ax.set_ylim(-31,54)
ax.set_ylabel('STILT CO$_2$ [ppm]')
ax.grid(axis='x')
ax.grid(axis='y')
ax.legend(loc='upper left',ncol=8)
# Define the date format
date_form = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
if ('co.stilt' in df) and ('co' in tracer):
ax = fig.add_subplot(3,1,3)
p.plot(df.index,df['co.stilt'],linestyle,color='m',label='STILT CO (no background)')
if obs is not None and not obs.empty:
if 'DateTime' in obs:
p.plot(obs.index,obs['CO'],linestyle,color='k',label='Observation CO')
else:
# select lowest value for previous 'nrol' days
# only works if DateTime is defined as index
# in case of many missing values, 'min' is not defined, better use very low quantile
nrol='3'
df_obs_min = obs.rolling(nrol+'d',closed='right',min_periods=1).quantile(0.001)
if 'co' in obs:
p.plot(obs.index,obs['co']-df_obs_min['co'],linestyle,color='k',label='Observation CO')
if 'CO' in obs:
p.plot(obs.index,obs['CO']-df_obs_min['CO'],linestyle,color='k',label='Observation CO')
ax.set_xlim(start_date,end_date)
ax.set_ylabel('CO [ppm]')
ax.grid(axis='x')
ax.grid(axis='y')
ax.legend(loc='best',ncol=8)
# Define the date format
date_form = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
p.tight_layout()
p.show()
display(Markdown("<sub>"+citation+"</sub>"))
if len(pngfile)>0:
fig.savefig(pngfile+'.png',dpi=100)
p.close()
###Output
_____no_output_____
###Markdown
Function for comparison of STILT results with ICOS data and plot time series
###Code
# function for comparison of STILT results with ICOS data and plot time series
def plot_comparison(ist,df_co2,df_co,df_meteo,sdate,edate,citation=''):
# check if ICOS data (CO2,CO) is available and combine CO2 and CO in one data frame
if not df_co2.empty and not df_co.empty:
df_obs = pd.merge(df_co2,df_co,left_index=True, right_index=True, how='outer', suffixes=('_co2', '_co'))
if df_obs.columns.str.contains('Flag').any():
df_obs = df_obs.drop(columns=['Flag_co2', 'Flag_co'])
else:
df_obs = pd.DataFrame(None)
# check if STILT results are available for the station
if not ist in stilt_stations.keys():
print('no STILT results for station: ',ist)
else:
# read 1-hourly STILT time series, available only for specific stations
df_stilt = pd.DataFrame()
for year in range(sdate.year,edate.year+1):
df = read_stilt_timeseries_RINGO_T13(ist,year,stilt_stations[ist]['locIdent'])
df_stilt = df_stilt.append(df)
df_stilt.name = df.name
df_stilt.model = df.model
if df.empty:
print('no 1-hourly STILT results for station: ',ist)
else:
# monthly plots
for yrmon in rrule(MONTHLY, dtstart=sdate, until=edate):
start_date = dt.datetime(yrmon.year,yrmon.month,1,0)
if yrmon.month >= 12:
end_date = dt.datetime(yrmon.year+1,1,1,0)
else:
end_date = dt.datetime(yrmon.year,yrmon.month+1,1,0)
end_date = end_date - dt.timedelta(hours=1)
timeselect='all'
# plot time series
pngfile='comparison_ICOS_STILT_'+ist+'_'+str(start_date.year)+str(start_date.month).zfill(2)
plot_icos_stilt_timeseries(ist, df_stilt, start_date, end_date, obs = df_obs,
linestyle='-',add_tracer=['co'],pngfile=path_plots+pngfile,citation=citation)
###Output
_____no_output_____
###Markdown
Back to top Function to apply temporal sampling of ICOS measurements time series and plot results
###Code
# function to apply temporal sampling of ICOS measurements time series and plot results
def plot_icos_ts_selection(ist, df_co2, df_co, df_meteo, highco, var_limit, stdev_limit,
noon_time, midday_range, start=None, end=None, summary=False, citation=''):
# check if ICOS data (CO2,CO) is available and combine CO2 and CO in one data frame
if not df_co2.empty and not df_co.empty:
df_obs = pd.merge(df_co2,df_co,left_index=True, right_index=True, how='outer', suffixes=('_co2', '_co'))
if df_obs.columns.str.contains('Flag').any():
df_obs = df_obs.drop(columns=['Flag_co2', 'Flag_co'])
# afternoon sampling
# e.g. local time 12-15=> utc 11-14
dd1, dd2 = midday_range
ddm = noon_time
df_obs_day = df_obs.loc[(df_obs.index.hour >= dd1) & (df_obs.index.hour <= dd2)]
# range in afternoon values (max-min)
#df_co2_range = (df_obs_day['co2'].resample('D',loffset='12H').max() - df_obs_day['co2'].resample('D',loffset='12H').min()).to_frame()
df_co2_range = (df_obs_day['co2'].resample('D').max() - df_obs_day['co2'].resample('D').min()).to_frame()
df_co2_range.index = df_co2_range.index + to_offset('12H')
df_co2_range.loc[(df_co2_range['co2'] <= 0)]=np.nan
# low variability selection for QC (use range as variability)
df_obs_var = df_co2_range.loc[(df_co2_range['co2'] < var_limit)]
df_obs_var3 = df_obs_var.iloc[::3, :]
df_obs_lowStdev=df_obs.loc[(df_obs['Stdev_co2'] < stdev_limit)]
# single noon value at 12 UTC => 13 LT for CET
df_obs_noon = df_obs.loc[(df_obs.index.hour >= ddm) & (df_obs.index.hour <= ddm)]
# high CO as indicator for high ffCO2
df_obs_highco=df_obs_noon.loc[(df_obs_noon['co'] > highco)]
# select only every 3rd noon value
df_obs_noon3 = df_obs_noon.iloc[::3, :]
# select lowest value for previous 'nrol' days
# in case of many missing values, 'min' is not defined, better use very low quantile
nrol='3'
df_obs_min = df_obs.rolling(nrol+'d',closed='right',min_periods=1).min()###.quantile(0.001,numeric_only=True)
# only noon value (select based on date info in original time series)
df_obs_min_noon = df_obs_min.loc[(df_obs.index.hour >= ddm) & (df_obs.index.hour <= ddm)]
# difference between 'nrol'-day minimum and value at noon
df_obs_offset=df_obs_noon.subtract(df_obs_min_noon)
# high CO offset as indicator for high ffCO2
df_obs_highco=df_obs_noon.loc[(df_obs_offset['co'] > highco)]
# low standard deviation at noon
df_obs_lowStdev_noon=df_obs_noon.loc[(df_obs_noon['Stdev_co2'] < stdev_limit)]
# plot time series for each month
# if start and/or end date are specified in the parameters list, use them
# otherwise extract start and end date of time series
sd0 = df_obs.index[0]
ed0 = df_obs.index[-1]+dt.timedelta(days=32-df_obs.index[-1].day) #round up to next month/year
if start is not None:
sd = start
else:
#sd0 = df_obs.index[0]
sd = dt.datetime(sd0.year,sd0.month,1,0)
if end is not None:
ed = end
else:
#ed0 = df_obs.index[-1]+dt.timedelta(days=32-df_obs.index[-1].day) #round up to next month/year
ed = dt.datetime(ed0.year,ed0.month,1,0)
for yrmon in rrule(MONTHLY, dtstart=sd, until=ed):
start_date = dt.datetime(yrmon.year,yrmon.month,1,0)
if yrmon.month >= 12:
end_date = dt.datetime(yrmon.year+1,1,1,0)
else:
end_date = dt.datetime(yrmon.year,yrmon.month+1,1,0)
end_date = end_date - dt.timedelta(hours=1)
if (start_date > ed0) or (end_date < sd0):
display(HTML('<p style="font-size:15px;font-weight:bold;color:red;">no ICOS data available for '
+start_date.strftime("%Y-%m-%d %H:%M:%S")+' - '+end_date.strftime("%Y-%m-%d %H:%M:%S")+'</p>'))
else:
fig = p.figure(figsize=(15,14))
ax = fig.add_subplot(5,1,1)
p.plot(df_obs.index,df_obs['co2'],'.',color=lightgray,label='hourly values')
p.plot(df_obs_noon.index,df_obs_noon['co2'],'.',color='b',label='at '+str(ddm+1)+' LT',markersize=12)
p.plot(df_obs_highco.index,df_obs_highco['co2'],'+',color='m',label='if obs CO offset >'+str(highco)+' ppm',
markersize=12,markeredgewidth=2)
p.title(stilt_stations[ist]['icosName']+' {:.0f}m'.format(stilt_stations[ist]['icosHeight'])
+' {:.2f}$^\circ$N'.format(stilt_stations[ist]['icosLat'])
+' {:.2f}$^\circ$E'.format(stilt_stations[ist]['icosLon']))
ax.set_xlim(start_date,end_date)
ax.set_ylim(380,465)
ax.set_ylabel(' obs CO$_2$ [ppm]')
ax.grid(axis='x')
ax.grid(axis='y')
ax.legend(loc='upper left',ncol=8)
# Define the date format
date_form = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
ax = fig.add_subplot(5,1,2)
#p.plot([start_date,end_date],[highco,highco],':',color='k')#,linewidth=0.5)
p.plot(df_obs.index,df_obs['co'],'.',color=lightgray,label='hourly values')
p.plot(df_obs_noon.index,df_obs_noon['co'],'.',color='b',label='at '+str(ddm+1)+' LT',markersize=12)
#p.plot(df_obs_min_noon.index,df_obs_min_noon['co'],'-',color=gray,label=nrol+' day minimum at '+str(ddm+1)+' LT')
p.plot(df_obs_min_noon.index,df_obs_min_noon['co'],'-',color=gray,label=nrol+' day minimum')
p.plot(df_obs_highco.index,df_obs_highco['co'],'+',color='m',label='if obs CO offset >'+str(highco)+' ppm',
markersize=12,markeredgewidth=2)
ax.set_xlim(start_date,end_date)
ax.set_ylim(0.06,0.31)
ax.set_ylabel('obs CO [ppm]')
ax.grid(axis='x')
ax.grid(axis='y')
ax.legend(loc='upper left',ncol=8)
# Define the date format
date_form = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
ax = fig.add_subplot(5,1,3)
p.plot(df_obs.index,df_obs['Stdev_co2'],'.',color=lightgray,label='CO$_2$ Stdev ')
p.plot(df_obs_noon.index,df_obs_noon['Stdev_co2'],'.',color=lime,label='CO$_2$ Stdev at '+str(ddm+1)+' LT')
#p.title(ist)
ax.set_xlim(start_date,end_date)
#ax.set_ylim(-0.5,6)
ax.set_ylabel('CO$_2$ [ppm]')
ax.grid(axis='x')
ax.grid(axis='y')
ax.legend(loc='best',ncol=8)
# Define the date format
date_form = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
ax = fig.add_subplot(5,1,4)
p.plot([start_date,end_date],[var_limit,var_limit],':',color='k')#,linewidth=0.5)
p.plot([start_date,end_date],[stdev_limit,stdev_limit],':',color='k')#,linewidth=0.5)
p.plot(df_co2_range.index,df_co2_range['co2'],'.',color='r',label='range CO$_2$ '+str(dd1+1)+'-'+str(dd2+1)+' LT')
p.plot(df_obs_var.index,df_obs_var['co2'],'o',color='b',label='range CO$_2$ < '+str(var_limit)+' '+str(dd1+1)+'-'+str(dd2+1)+' LT')
p.plot(df_obs_noon.index,df_obs_noon['Stdev_co2'],'.',color=lime,label=' CO$_2$ Stdev at '+str(ddm+1)+' LT')
p.plot(df_obs_lowStdev_noon.index,df_obs_lowStdev_noon['Stdev_co2'],'^',color='g',label=' CO$_2$ Stdev < '+str(stdev_limit)+' '+str(ddm+1)+' LT')
ax.set_xlim(start_date,end_date)
#ax.set_ylim(-0.5,6)
#ax.set_ylim(390,410)
ax.set_ylabel('CO$_2$ [ppm]')
ax.grid(axis='x')
ax.grid(axis='y')
ax.legend(loc='best',ncol=8)
# Define the date format
date_form = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
ax = fig.add_subplot(5,1,5)
p.plot([start_date,end_date],[0,0],color=gray,linewidth=0.5)
p.plot([start_date,end_date],[90,90],color=gray,linewidth=0.5)
p.plot([start_date,end_date],[180,180],color=gray,linewidth=0.5)
p.plot([start_date,end_date],[270,270],color=gray,linewidth=0.5)
p.plot([start_date,end_date],[360,360],color=gray,linewidth=0.5)
if not df_meteo.empty:
p.plot(df_meteo.index,df_meteo['WD'],'.',color='lightgray',label='wind dir')
ax.set_xlim(start_date,end_date)
ax.set_ylim(0,360)
ax.set_ylabel('wind direction [$^\circ$]')
ax.grid(axis='x')
#ax.grid(axis='y')
ax.legend(loc='best',ncol=8)
# Define the date format
date_form = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
p.tight_layout()
p.show()
display(Markdown("<sub>"+citation+"</sub>"))
pngfile='selection_ICOS_'+ist+'_'+str(start_date.year)+str(start_date.month).zfill(2)
fig.savefig(path_plots+pngfile+'.png',dpi=100)
p.close()
if summary:
# print summary
summary_file=path_plots+ist+'_selection_ICOS_counts.csv'
open(summary_file,'w').write(stilt_stations[ist]['icosName']+' {:.0f}'.format(stilt_stations[ist]['icosHeight'])
+' {:.2f}'.format(stilt_stations[ist]['icosLat'])
+' {:.2f}'.format(stilt_stations[ist]['icosLon'])+'\n')
n_highco = df_obs_highco.groupby(pd.Grouper(freq='M')).count()
display(HTML('<br> <p style="font-size:15px;font-weight:bold;color:royalblue;">Events with CO offset > '+str(highco)+' ppm</p>'))
display(HTML(n_highco['co'].to_frame(name='counts per month').to_html()))
open(summary_file,'a').write('Events with CO offset > '+str(highco)+' ppm'+'\n')
n_highco['co'].to_frame(name='counts per month').to_csv(summary_file,mode='a')
ny_highco = n_highco['co2'].to_frame().groupby(n_highco['co2'].to_frame().index.month).mean()
display(HTML('<br> <b>multi-annual mean</b>'))
display(HTML(ny_highco['co2'].to_frame(name='counts per month').to_html()))
n_lowStdev = df_obs_lowStdev_noon.groupby(pd.Grouper(freq='M')).count()
display(HTML('<br> <p style="font-size:15px;font-weight:bold;color:royalblue;">Events with low CO2 variability < '+str(stdev_limit)+'ppm</p>'))
display(HTML(n_lowStdev['Stdev_co2'].to_frame(name='counts per month').to_html()))
open(summary_file,'a').write('Events with low CO2 variability < '+str(stdev_limit)+'ppm'+'\n')
n_lowStdev['co'].to_frame(name='counts per month').to_csv(summary_file,mode='a')
ny_lowStdev = n_lowStdev['co2'].to_frame().groupby(n_lowStdev['co2'].to_frame().index.month).mean()
display(HTML('<br> <b>multi-annual mean</b>'))
display(HTML(ny_lowStdev['co2'].to_frame(name='counts per month').to_html()))
n_var = df_obs_var.groupby(pd.Grouper(freq='M')).count()
display(HTML('<br> <p style="font-size:15px;font-weight:bold;color:royalblue;">Events with small daytime CO2 difference < '+str(var_limit)+'ppm</p>'))
display(HTML(n_var['co2'].to_frame(name='counts per month').to_html()))
open(summary_file,'a').write('Events with small daytime CO2 difference < '+str(var_limit)+'ppm'+'\n')
n_var['co2'].to_frame(name='counts per month').to_csv(summary_file,mode='a')
ny_var = n_var['co2'].to_frame().groupby(n_var['co2'].to_frame().index.month).mean()
display(HTML('<br> <b>multi-annual mean</b>'))
display(HTML(ny_var['co2'].to_frame(name='counts per month').to_html()))
###Output
_____no_output_____
###Markdown
Back to top Read coordinates of big cities in Europe
###Code
def get_bigCities():
filename='majorCitiesEurope.xlsx'
df_bigCities = pd.read_excel(filename,sheet_name='Tabelle2',skiprows=1,
names=['Rang','Name','Einwohner','longitude','latitude','inhabitants'])
#problems with decimlal points...
df_bigCities.inhabitants[df_bigCities.inhabitants > 100] = df_bigCities.inhabitants/1000.
return df_bigCities
###Output
_____no_output_____
###Markdown
Back to top Function to test sensitivity of selection strategy on monthly mean
###Code
# function to test sensitivity of selection strategy on monthly mean
def sensitivity_selection_icos(ist, df_co2, df_co, df_meteo, highco, var_limit, stdev_limit,
noon_time, midday_range, start=None, end=None, summary=False, citation=''):
# colorbrewer colors red to blue
cb=['#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac']
# check if ICOS data (CO2,CO) is available and combine CO2 and CO in one data frame
if not df_co2.empty and not df_co.empty:
df_obs = pd.merge(df_co2,df_co,left_index=True, right_index=True, how='outer', suffixes=('_co2', '_co'))
if df_obs.columns.str.contains('Flag').any():
df_obs = df_obs.drop(columns=['Flag_co2', 'Flag_co'])
# plot time series for each month
# if start and/or end date are specified in the parameters list, use them
# otherwise extraxt start and end date of time series
if start is not None:
sd = start
else:
sd0 = df_obs.index[0]
sd = dt.datetime(sd0.year,sd0.month,1,0)
if end is not None:
ed = end
else:
ed0 = df_obs.index[-1]+dt.timedelta(days=32-df_obs.index[-1].day) #round up to next month/year
ed = dt.datetime(ed0.year,ed0.month,1,0)
# restrict to global start and end date
df_obs = df_obs.loc[(df_obs.index >= sd) & (df_obs.index <= ed)]
# afternoon sampling
# e.g. local time 12-15=> utc 11-14
dd1, dd2 = midday_range
ddm = noon_time
df_obs_noon = df_obs.loc[df_obs.index.hour == ddm]
df_obs_day = df_obs.loc[(df_obs.index.hour >= dd1) & (df_obs.index.hour <= dd2)]
#df_obs_dm = df_obs_day.resample('D',loffset=str(ddm)+'H').mean()
df_obs_dm = df_obs_day.resample('D').mean()
df_obs_dm.index = df_obs_dm.index + to_offset(str(ddm)+'H')
df_obs_dm_3_1 = df_obs_noon.iloc[0::3, :]
df_obs_dm_3_2 = df_obs_noon.iloc[1::3, :]
df_obs_dm_3_3 = df_obs_noon.iloc[2::3, :]
df_obs_dm_7_1 = df_obs_noon.iloc[0::7, :]
df_obs_dm_mm = df_obs_dm.resample('M').mean()
df_obs_dm_mm.index = df_obs_dm_mm.index + to_offset('-15D')
df_obs_dm_3_1_mm = df_obs_dm_3_1.resample('M').mean()
df_obs_dm_3_1_mm.index = df_obs_dm_3_1_mm.index + to_offset('-15D')
df_obs_dm_3_2_mm = df_obs_dm_3_2.resample('M').mean()
df_obs_dm_3_2_mm.index = df_obs_dm_3_2_mm.index + to_offset('-15D')
df_obs_dm_3_3_mm = df_obs_dm_3_3.resample('M').mean()
df_obs_dm_3_3_mm.index = df_obs_dm_3_3_mm.index + to_offset('-15D')
df_obs_dm_7_1_mm = df_obs_dm_7_1.resample('M').mean()
df_obs_dm_7_1_mm.index = df_obs_dm_7_1_mm.index + to_offset('-15D')
df_obs_dm_std = df_obs_dm.resample('M').std()
df_obs_dm_std.index = df_obs_dm_std.index + to_offset('-15D')
df_obs_dm_3_1_std = df_obs_dm_3_1.resample('M').std()
df_obs_dm_3_1_std.index = df_obs_dm_3_1_std.index + to_offset('-15D')
df_obs_dm_3_2_std = df_obs_dm_3_2.resample('M').std()
df_obs_dm_3_2_std.index = df_obs_dm_3_2_std.index + to_offset('-15D')
df_obs_dm_3_3_std = df_obs_dm_3_3.resample('M').std()
df_obs_dm_3_3_std.index = df_obs_dm_3_3_std.index + to_offset('-15D')
df_obs_dm_7_1_std = df_obs_dm_7_1.resample('M').std()
df_obs_dm_7_1_std.index = df_obs_dm_7_1_std.index + to_offset('-15D')
df_obs_dm_count = df_obs_dm.resample('M').count()
df_obs_dm_count.index = df_obs_dm_count.index + to_offset('-15D')
df_obs_dm_3_1_count = df_obs_dm_3_1.resample('M').count()
df_obs_dm_3_1_count.index = df_obs_dm_3_1_count.index + to_offset('-15D')
df_obs_dm_3_2_count = df_obs_dm_3_2.resample('M').count()
df_obs_dm_3_2_count.index = df_obs_dm_3_2_count.index + to_offset('-15D')
df_obs_dm_3_3_count = df_obs_dm_3_3.resample('M').count()
df_obs_dm_3_3_count.index = df_obs_dm_3_3_count.index + to_offset('-15D')
df_obs_dm_7_1_count = df_obs_dm_7_1.resample('M').count()
df_obs_dm_7_1_count.index = df_obs_dm_7_1_count.index + to_offset('-15D')
# find n lowest-variability-footprints
df_var_smallest = pd.DataFrame()
# plot time series for each month
# if start and/or end date are specified in the parameters list, use them
# otherwise extraxt start and end date of time series
if start is not None:
sd = start
else:
sd0 = df_obs.index[0]
sd = dt.datetime(sd0.year,sd0.month,1,0)
if end is not None:
ed = end
else:
ed0 = df_obs.index[-1]+dt.timedelta(days=32-df_obs.index[-1].day) #round up to next month/year
ed = dt.datetime(ed0.year,ed0.month,1,0)
for yrmon in rrule(MONTHLY, dtstart=sd, until=ed):
start_date = dt.datetime(yrmon.year,yrmon.month,1,0)
if yrmon.month >= 12:
end_date = dt.datetime(yrmon.year+1,1,1,0)
else:
end_date = dt.datetime(yrmon.year,yrmon.month+1,1,0)
end_date = end_date - dt.timedelta(hours=1)
df_vs = df_obs_noon[(df_obs_noon.index >= start_date) &
(df_obs_noon.index <= end_date)].nsmallest(10, 'Stdev_co2')
df_var_smallest = df_var_smallest.append(df_vs)
#df_var_smallest_mm = df_var_smallest.resample('M',loffset='-15D').mean()
#df_var_smallest_std = df_var_smallest.resample('M',loffset='-15D').std()
#df_var_smallest_count = df_var_smallest.resample('M',loffset='-15D').count()
df_var_smallest_mm = df_var_smallest.resample('M').mean()
df_var_smallest_mm.index = df_var_smallest_mm.index + to_offset('-15D')
df_var_smallest_std = df_var_smallest.resample('M').std()
df_var_smallest_std.index = df_var_smallest_std.index + to_offset('-15D')
df_var_smallest_count = df_var_smallest.resample('M').count()
df_var_smallest_count.index = df_var_smallest_count.index + to_offset('-15D')
idx = df_obs_dm_mm.index.intersection(df_obs_dm_7_1_mm.index)
idxx = df_obs_dm_mm.index.intersection(df_var_smallest_mm.index)
fig = p.figure(figsize=(15,17))
start_date = dt.datetime(2016,5,1,0)
end_date = dt.datetime(2019,5,1,0)
idx = pd.date_range(df_obs_dm_3_1.index[0],df_obs_dm_3_1.index[-1],freq='3D' )
#df_obs_dm_3_1 = df_obs_dm_3_1.reindex(idx, fill_value=np.nan)
ax = fig.add_subplot(5,1,1)
p.plot(df_obs.index,df_obs['co2'],'.',color=lightgray,label='hourly CO$_2$ obs ')
p.plot(df_obs_dm_3_1.index,df_obs_dm_3_1['co2'],'.',color=cb[7],label='_nolegend_')
p.plot(df_obs_dm_3_2.index,df_obs_dm_3_2['co2'],'.',color=cb[6],label='_nolegend_')
p.plot(df_obs_dm_3_3.index,df_obs_dm_3_3['co2'],'.',color=cb[5],label='_nolegend_')
p.plot(df_obs_dm_3_1.index,df_obs_dm_3_1['co2'],'-',color=cb[7],label='_nolegend_')#,label=str(ddm+1)+' LT every 3rd day')#,label=ist+' CO2 obs mean('+str(dd1+1)+'-'+str(dd2+1)+' LT)')
p.plot(df_obs_dm_3_2.index,df_obs_dm_3_2['co2'],'-',color=cb[6],label='every 3rd day at '+str(ddm+1)+' LT ')#,label=ist+' CO2 obs mean('+str(dd1+1)+'-'+str(dd2+1)+' LT)')
p.plot(df_obs_dm_3_3.index,df_obs_dm_3_3['co2'],'-',color=cb[5],label='_nolegend_')#,label=str(ddm+1)+' LT every 3rd day')
p.plot(df_var_smallest.index,df_var_smallest['co2'],'.',color=cb[0],label='10 days per month w/ smallest var at '+str(ddm+1)+' LT')#,label=ist+' CO2 obs mean('+str(dd1+1)+'-'+str(dd2+1)+' LT)')
p.title(stilt_stations[ist]['icosName']+' {:.0f}m'.format(stilt_stations[ist]['icosHeight'])
+' {:.2f}$^\circ$N'.format(stilt_stations[ist]['icosLat'])
+' {:.2f}$^\circ$E'.format(stilt_stations[ist]['icosLon']))
ax.set_xlim(start_date,end_date)
ax.set_ylim(380,460)
ax.set_ylabel('CO$_2$ [ppm]')
ax.grid(axis='x')
ax.grid(axis='y')
ax.legend(loc='upper left',ncol=8)
ax = fig.add_subplot(5,1,2)
p.errorbar(df_obs_dm_mm.index-dt.timedelta(days=4), df_obs_dm_mm['co2'],
yerr=df_obs_dm_std['co2'], fmt='o', color='k',ms=10,label=str(dd1+1)+'-'+str(dd2+1)+' LT mean every day')
p.errorbar(df_var_smallest_mm.index-dt.timedelta(days=-4), df_var_smallest_mm['co2'],
yerr=df_var_smallest_std['co2'], fmt='o', color=cb[0],label='10 days per month w/ smallest var at '+str(ddm+1)+' LT')
p.errorbar(df_obs_dm_3_1_mm.index-dt.timedelta(days=2), df_obs_dm_3_1_mm['co2'],
yerr=df_obs_dm_3_1_std['co2'], fmt='o', color=cb[7],label='_nolegend_')
p.errorbar(df_obs_dm_3_2_mm.index-dt.timedelta(days=0), df_obs_dm_3_2_mm['co2'],
yerr=df_obs_dm_3_2_std['co2'], fmt='o', color=cb[6],label='every 3rd day at '+str(ddm+1)+' LT')
p.errorbar(df_obs_dm_3_3_mm.index-dt.timedelta(days=-2), df_obs_dm_3_3_mm['co2'],
yerr=df_obs_dm_3_3_std['co2'], fmt='o', color=cb[5],label='_nolegend_')
ax.set_xlim(start_date,end_date)
ax.set_ylim(380,460)
ax.set_ylabel('monthly mean CO$_2$ [ppm]')
ax.grid(axis='x')
ax.grid(axis='y')
ax.legend(loc='upper left',ncol=8)
ax = fig.add_subplot(5,1,3)
p.plot(df_obs_dm_mm.index-dt.timedelta(days=5), df_obs_dm_mm['co2']-df_obs_dm_mm['co2'], '-',
color='gray',label='_nolegend_')
p.plot(df_var_smallest_mm.index-dt.timedelta(days=-4), df_var_smallest_mm['co2']-df_obs_dm_mm.loc[idxx]['co2'], '-',
color=cb[0],label=str(ddm+1)+' LT smallest var - '+str(dd1+1)+'-'+str(dd2+1)+' LT mean every day')
p.plot(df_obs_dm_3_1_mm.index-dt.timedelta(days=2), df_obs_dm_3_1_mm['co2']-df_obs_dm_mm['co2'], '-',
color=cb[7],label='_nolegend_')
p.plot(df_obs_dm_3_2_mm.index-dt.timedelta(days=0), df_obs_dm_3_2_mm['co2']-df_obs_dm_mm['co2'], '-',
color=cb[6],label=str(ddm+1)+' LT every 3rd day - '+str(dd1+1)+'-'+str(dd2+1)+' LT mean every day')
p.plot(df_obs_dm_3_3_mm.index-dt.timedelta(days=-2), df_obs_dm_3_3_mm['co2']-df_obs_dm_mm['co2'], '-',
color=cb[5],label='_nolegend_')
ax.set_xlim(start_date,end_date)
ax.set_ylim(-6,6)
ax.set_ylabel('CO$_2$ deviation [ppm]')
ax.grid(axis='x')
ax.grid(axis='y')
ax.legend(loc='upper left',ncol=8)
ax = fig.add_subplot(5,1,4)
p.bar(df_obs_dm_std.index-dt.timedelta(days=0), df_obs_dm_std['co2'], color='gray',
label='afternoon mean every day')
p.bar(df_var_smallest_std.index-dt.timedelta(days=-4), df_var_smallest_std['co2'], color=cb[0],
label=str(ddm+1)+' LT smallest var')
p.bar(df_obs_dm_3_1_std.index-dt.timedelta(days=2), df_obs_dm_3_1_std['co2'], color=cb[7],
label=str(ddm+1)+' LT every 3th day')
p.bar(df_obs_dm_3_2_std.index-dt.timedelta(days=0), df_obs_dm_3_2_std['co2'], color=cb[6],
label=str(ddm+1)+' LT every 3th day')
p.bar(df_obs_dm_3_3_std.index-dt.timedelta(days=-2), df_obs_dm_3_3_std['co2'], color=cb[5],
label=str(ddm+1)+' LT every 3th day')
#p.title(ist)
ax.set_xlim(start_date,end_date)
#ax.set_ylim(380,460)
ax.set_ylabel('standard deviation [ppm]')
ax.grid(axis='x')
ax.grid(axis='y')
ax.legend(loc='best',ncol=1)
ax = fig.add_subplot(5,1,5)
p.bar(df_obs_dm_count.index-dt.timedelta(days=4), df_obs_dm_count['co2'], color='gray',
label='afternoon mean every day')
p.bar(df_var_smallest_count.index-dt.timedelta(days=-4), df_var_smallest_count['co2'], color=cb[0],
label=str(ddm+1)+' LT smallest var')
p.bar(df_obs_dm_3_1_count.index-dt.timedelta(days=2), df_obs_dm_3_1_count['co2'], color=cb[7],
label=str(ddm+1)+' LT every 3th day')
p.bar(df_obs_dm_3_2_count.index-dt.timedelta(days=0), df_obs_dm_3_2_count['co2'], color=cb[6],
label=str(ddm+1)+' LT every 3th day')
p.bar(df_obs_dm_3_3_count.index-dt.timedelta(days=-2), df_obs_dm_3_3_count['co2'], color=cb[5],
label=str(ddm+1)+' LT every 3th day')
#p.title(ist)
ax.set_xlim(start_date,end_date)
#ax.set_ylim(380,460)
ax.set_ylabel('afternoon values per month')
ax.grid(axis='x')
ax.grid(axis='y')
ax.legend(loc='best',ncol=1)
p.tight_layout()
p.show()
display(Markdown("<sub>"+citation+"</sub>"))
pngfile='sensitivity_ICOS_'+ist+'_'+str(start_date.year)+str(start_date.month).zfill(2)
fig.savefig(path_plots+pngfile+'.png',dpi=100)
p.close()
if summary:
# print summary
display(HTML('<br> <b>afternoon mean every day</b>'))
display(HTML(df_obs_dm['co2'].groupby(pd.Grouper(freq='M')).agg(['mean', 'std','count']).to_html()))
display(HTML('<br> <b>'+str(ddm+1)+' LT every 3rd day</b>'))
display(HTML(df_obs_dm_3_1['co2'].groupby(pd.Grouper(freq='M')).agg(['mean', 'std','count']).to_html()))
display(HTML('<br> <b>'+str(ddm+1)+' LT every 3rd day</b>'))
display(HTML(df_obs_dm_3_2['co2'].groupby(pd.Grouper(freq='M')).agg(['mean', 'std','count']).to_html()))
display(HTML('<br> <b>'+str(ddm+1)+' LT every 3rd day</b>'))
display(HTML(df_obs_dm_3_3['co2'].groupby(pd.Grouper(freq='M')).agg(['mean', 'std','count']).to_html()))
display(HTML('<br> <b>'+str(ddm+1)+' LT every 7th day</b>'))
display(HTML(df_obs_dm_7_1['co2'].groupby(pd.Grouper(freq='M')).agg(['mean', 'std','count']).to_html()))
display(HTML('<br> <b>'+str(ddm+1)+' LT lowest variability</b>'))
display(HTML(df_var_smallest['co2'].groupby(pd.Grouper(freq='M')).agg(['mean', 'std','count']).to_html()))
summary_file=path_plots+ist+'_sensitivity_monthly_ICOS_counts.csv'
open(summary_file,'w').write(stilt_stations[ist]['icosName']+' {:.0f}'.format(stilt_stations[ist]['icosHeight'])
+' {:.2f}'.format(stilt_stations[ist]['icosLat'])
+' {:.2f}'.format(stilt_stations[ist]['icosLon'])+'\n')
open(summary_file,'a').write('afternoon mean every day'+'\n')
df_obs_dm['co2'].groupby(pd.Grouper(freq='M')).agg(['mean', 'std','count']).to_csv(summary_file,mode='a')
open(summary_file,'a').write(str(ddm+1)+' LT every 3rd day'+'\n')
df_obs_dm_3_1['co2'].groupby(pd.Grouper(freq='M')).agg(['mean', 'std','count']).to_csv(summary_file,mode='a')
open(summary_file,'a').write(str(ddm+1)+' LT every 3rd day'+'\n')
df_obs_dm_3_2['co2'].groupby(pd.Grouper(freq='M')).agg(['mean', 'std','count']).to_csv(summary_file,mode='a')
open(summary_file,'a').write(str(ddm+1)+' LT every 3rd day'+'\n')
df_obs_dm_3_3['co2'].groupby(pd.Grouper(freq='M')).agg(['mean', 'std','count']).to_csv(summary_file,mode='a')
open(summary_file,'a').write(str(ddm+1)+' LT every 7th day'+'\n')
df_obs_dm_7_1['co2'].groupby(pd.Grouper(freq='M')).agg(['mean', 'std','count']).to_csv(summary_file,mode='a')
open(summary_file,'a').write(str(ddm+1)+' LT lowest variability'+'\n')
df_var_smallest['co2'].groupby(pd.Grouper(freq='M')).agg(['mean', 'std','count']).to_csv(summary_file,mode='a')
###Output
_____no_output_____
###Markdown
Back to top Call all functions for selection of STILT time series, footprints and comparison with ICOS measurement data
###Code
# call all functions for selection of STILT time series, footprints and comparison with ICOS measurement data
def run_all(station_selection, ystart, mstart, yend, mend,
low_stilt, high_stilt, highco_stilt, var_limit_stilt,
highco_obs, var_limit_obs, stdev_limit_obs,
noon_time, midday_range):
global_start_date = dt.datetime(ystart,mstart,1,0)
global_end_date = dt.datetime(yend,mend,1,0)
if (global_start_date > global_end_date):
global_start_date = global_end_date
if mend >= 12:
global_end_date = dt.datetime(yend+1,1,1,0)
else:
global_end_date = dt.datetime(yend,mend+1,1,0)
global_end_date = global_end_date - dt.timedelta(hours=1)
if (global_start_date > global_end_date):
print('Select Start Date < End Date')
return
for station in station_selection:
loc_ident=stilt_stations[station]['locIdent']
station_lat=stilt_stations[station]['lat']
station_lon=stilt_stations[station]['lon']
height = stilt_stations[station]['icosHeight']
# selection of STILT time series (temporal and concentration thresholds)
# only for those station for which 1-hourly results are availabe (see dropdown list)
display(HTML('<p style="font-size:20px;font-weight:bold;color:royalblue;"> <br> Test sampling strategy on 1-hourly STILT time series </p>'))
all_selection_plots(station,global_start_date,global_end_date,loc_ident,station_lat,station_lon,
high_stilt, low_stilt, highco_stilt, var_limit_stilt, noon_time, midday_range, summary=True)
# read ICOS data from ICOS Carbon Portal
display(HTML('<p style="font-size:20px;font-weight:bold;color:royalblue;"> <br> Read ICOS observation time series from Carbon Portal </p>'))
tracer = 'CO2'
dobj_L2_co2 = RunSparql(sparql_query=atc_station_tracer_query(station[0:3], height, tracer, level=2),output_format='pandas').run()
df_co2 = read_icos_data(dobj_L2_co2,tracer)
if not df_co2.loc[global_start_date:global_end_date].empty:
citation_co2 = RunSparql(sparql_query=sparqls.get_icos_citation(dobj_L2_co2.dobj.iloc[0]), output_format='pandas').run().cit[0]
else:
citation_co2 = ''
display(HTML('<p style="font-size:15px;font-weight:bold;color:red;">No ICOS '+tracer+' data available for '
+global_start_date.strftime("%Y-%m-%d %H:%M:%S")+' - '+global_end_date.strftime("%Y-%m-%d %H:%M:%S")+'</p>'))
tracer = 'CO'
dobj_L2_co = RunSparql(sparql_query=atc_station_tracer_query(station[0:3], height, tracer, level=2),output_format='pandas').run()
df_co = read_icos_data(dobj_L2_co,tracer)
if not df_co.empty:
df_co['co'] = df_co['co'] / 1000. #convert observed CO from ppb to ppm
if not df_co.loc[global_start_date:global_end_date].empty:
citation_co = RunSparql(sparql_query=sparqls.get_icos_citation(dobj_L2_co.dobj.iloc[0]), output_format='pandas').run().cit[0]
else:
citation_co = ''
display(HTML('<p style="font-size:15px;font-weight:bold;color:red;">No ICOS '+tracer+' data available for '
+global_start_date.strftime("%Y-%m-%d %H:%M:%S")+' - '+global_end_date.strftime("%Y-%m-%d %H:%M:%S")+'</p>'))
tracer = 'MTO'
dobj_L2_mto = RunSparql(sparql_query=atc_station_tracer_query(station[0:3], height, tracer, level=2),output_format='pandas').run()
df_mto = read_icos_data(dobj_L2_mto,tracer)
if not df_mto.loc[global_start_date:global_end_date].empty:
citation_mto = RunSparql(sparql_query=sparqls.get_icos_citation(dobj_L2_mto.dobj.iloc[0]), output_format='pandas').run().cit[0]
else:
citation_mto = ''
display(HTML('<p style="font-size:15px;font-weight:bold;color:red;">No ICOS '+tracer+' data available for '
+global_start_date.strftime("%Y-%m-%d %H:%M:%S")+' - '+global_end_date.strftime("%Y-%m-%d %H:%M:%S")+'</p>'))
display(HTML('<p style="font-size:20px;font-weight:bold;color:royalblue;"> <br> Comparison ICOS observation time series with STILT results </p>'))
citation=citation_co2+'<br>'+citation_co
plot_comparison(station, df_co2, df_co, df_mto, global_start_date,global_end_date, citation=citation)
if ((len(citation_co2) > 0) & (len(citation_co) > 0)):
citation=citation_co2+'<br>'+citation_co
if (len(citation_mto) > 0):
citation=citation+'<br>'+citation_mto
# specify start and end date in case you want to analyse the same time period
display(HTML('<p style="font-size:20px;font-weight:bold;color:royalblue;"> <br> Apply selection to ICOS observation time series </p>'))
plot_icos_ts_selection(station, df_co2, df_co, df_mto,
highco_obs, var_limit_obs, stdev_limit_obs, noon_time, midday_range,
start=global_start_date, end=global_end_date, summary=True, citation=citation)
# if you want plots for the full time series at the ICOS site, use this call
#plot_icos_ts_selection(station, df_co2, df_co, df_mto,
# highco_obs, var_limit_obs, stdev_limit_obs, citation=citation)
display(HTML('<p style="font-size:20px;font-weight:bold;color:royalblue;"> <br> Sensitivity of selection strategy on monthly mean </p>'))
# specify start and end date in case you want to analyse the same time period
#sensitivity_selection_icos(station, df_co2, df_co, df_mto,
# highco_obs, var_limit_obs, stdev_limit_obs,
# start=global_start_date, end=global_end_date, summary=True, citation=citation)
# if you want plots for the full time series at the ICOS site, use this call
# only for ICOS 2019 release
start = dt.datetime(2016,5,1,0)
end = dt.datetime(2019,5,1,0)
sensitivity_selection_icos(station, df_co2, df_co, df_mto,
highco_obs, var_limit_obs, stdev_limit_obs, noon_time, midday_range,
start=start, end=end, summary=True, citation=citation_co2)
###Output
_____no_output_____
###Markdown
Back to top Call all functions for selection of ICOS measurement time series
###Code
# call all functions for selection of ICOS measurement time series
def run_obs(station_selection, ystart, mstart, yend, mend,
highco_obs, var_limit_obs, stdev_limit_obs,
noon_time, midday_range):
global_start_date = dt.datetime(ystart,mstart,1,0)
global_end_date = dt.datetime(yend,mend,1,0)
if (global_start_date > global_end_date):
global_start_date = global_end_date
if mend >= 12:
global_end_date = dt.datetime(yend+1,1,1,0)
else:
global_end_date = dt.datetime(yend,mend+1,1,0)
global_end_date = global_end_date - dt.timedelta(hours=1)
if (global_start_date > global_end_date):
print('Select Start Date < End Date')
return
for station in station_selection:
loc_ident=stilt_stations[station]['locIdent']
station_lat=stilt_stations[station]['lat']
station_lon=stilt_stations[station]['lon']
height = stilt_stations[station]['icosHeight']
# read ICOS data from ICOS Carbon Portal
display(HTML('<p style="font-size:20px;font-weight:bold;color:royalblue;"> <br> Read ICOS observation time series from Carbon Portal </p>'))
tracer = 'CO2'
dobj_L2_co2 = RunSparql(sparql_query=atc_station_tracer_query(station[0:3], height, tracer, level=2),output_format='pandas').run()
df_co2 = read_icos_data(dobj_L2_co2,tracer)
if not df_co2.loc[global_start_date:global_end_date].empty:
citation_co2 = RunSparql(sparql_query=sparqls.get_icos_citation(dobj_L2_co2.dobj.iloc[0]), output_format='pandas').run().cit[0]
else:
citation_co2 = ''
display(HTML('<p style="font-size:15px;font-weight:bold;color:red;">No ICOS '+tracer+' data available for '
+global_start_date.strftime("%Y-%m-%d %H:%M:%S")+' - '+global_end_date.strftime("%Y-%m-%d %H:%M:%S")+'</p>'))
tracer = 'CO'
dobj_L2_co = RunSparql(sparql_query=atc_station_tracer_query(station[0:3], height, tracer, level=2),output_format='pandas').run()
df_co = read_icos_data(dobj_L2_co,tracer)
if not df_co.empty:
df_co['co'] = df_co['co'] / 1000. #convert observed CO from ppb to ppm
if not df_co.loc[global_start_date:global_end_date].empty:
citation_co = RunSparql(sparql_query=sparqls.get_icos_citation(dobj_L2_co.dobj.iloc[0]), output_format='pandas').run().cit[0]
else:
citation_co = ''
display(HTML('<p style="font-size:15px;font-weight:bold;color:red;">No ICOS '+tracer+' data available for '
+global_start_date.strftime("%Y-%m-%d %H:%M:%S")+' - '+global_end_date.strftime("%Y-%m-%d %H:%M:%S")+'</p>'))
tracer = 'MTO'
dobj_L2_mto = RunSparql(sparql_query=atc_station_tracer_query(station[0:3], height, tracer, level=2),output_format='pandas').run()
df_mto = read_icos_data(dobj_L2_mto,tracer)
if not df_mto.loc[global_start_date:global_end_date].empty:
citation_mto = RunSparql(sparql_query=sparqls.get_icos_citation(dobj_L2_mto.dobj.iloc[0]), output_format='pandas').run().cit[0]
else:
citation_mto = ''
display(HTML('<p style="font-size:15px;font-weight:bold;color:red;">No ICOS '+tracer+' data available for '
+global_start_date.strftime("%Y-%m-%d %H:%M:%S")+' - '+global_end_date.strftime("%Y-%m-%d %H:%M:%S")+'</p>'))
if ((len(citation_co2) > 0) & (len(citation_co) > 0)):
citation=citation_co2+'<br>'+citation_co
if (len(citation_mto) > 0):
citation=citation+'<br>'+citation_mto
# specify start and end date in case you want to analyse the same time period
display(HTML('<p style="font-size:20px;font-weight:bold;color:royalblue;"> <br> Apply selection to ICOS observation time series </p>'))
plot_icos_ts_selection(station, df_co2, df_co, df_mto,
highco_obs, var_limit_obs, stdev_limit_obs, noon_time, midday_range,
start=global_start_date, end=global_end_date, summary=True, citation=citation)
# if you want plots for the full time series at the ICOS site, use this call
#plot_icos_ts_selection(station, df_co2, df_co, df_mto,
# highco_obs, var_limit_obs, stdev_limit_obs, citation=citation)
display(HTML('<p style="font-size:20px;font-weight:bold;color:royalblue;"> <br> Sensitivity of selection strategy on monthly mean </p>'))
#sensitivity_selection_icos(station, df_co2, df_co, df_mto,
# highco_obs, var_limit_obs, stdev_limit_obs,
# start=global_start_date, end=global_end_date, summary=True)
# only for ICOS 2019 release
start = dt.datetime(2016,5,1,0)
end = dt.datetime(2019,5,1,0)
sensitivity_selection_icos(station, df_co2, df_co, df_mto,
highco_obs, var_limit_obs, stdev_limit_obs, noon_time, midday_range,
start=start, end=end, summary=True, citation=citation_co2)
###Output
_____no_output_____
###Markdown
Prepare for widgets
###Code
%%javascript
IPython.OutputArea.prototype._should_scroll = function(lines) {
return false;
}
###Output
_____no_output_____
###Markdown
Back to top Create widgets for selection (STILT results + ICOS data)
###Code
# create widgets for selection (STILT results + ICOS data)
def create_widget_selection():
# find stations for which 1-hourly STILT results are available
allStations = sorted([stilt_stations[kk]['name'] for kk in os.listdir(path_stilt+'/Results_RINGO_T1.3/')])
#Create a Dropdown widget with station names:
station = Dropdown(options = allStations,
description = 'Station',
disabled= False,)
#Create a Dropdown widget with year values (start year):
s_year = Dropdown(options = [2017, 2018],
description = 'Start Year',
disabled= False,)
#Create a Dropdown widget with month values (start month):
s_month = Dropdown(options = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
description = 'Start Month',
disabled= False,)
#Create a Dropdown widget with year values (end year):
e_year = Dropdown(options = [2017, 2018],
description = 'End Year',
disabled= False,)
#Create a Dropdown widget with month values (end month):
e_month = Dropdown(options = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
description = 'End Month',
disabled= False,)
#Create a Button widget to control execution:
update_button = Button(description='Update',
disabled=False,
button_style='danger', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',)
#Create a FloatText widget for STILT low fossil fuel CO2 threshold:
low_stilt = FloatText(value=1.0,
description='Low ffCO2',
step=0.01,
description_long='Click me',
disabled=False)
#Create a FloatText widget for STILT high fossil fuel CO2 threshold:
high_stilt = FloatText(value=4.0,
description='High ffCO2',
step=0.01,
disabled=False)
#Create a FloatText widget for STILT high CO threshold:
highco_stilt = FloatText(value=0.04,
description='High ffCO',
step=0.01,
disabled=False)
#Create a FloatText widget for STILT variability threshold:
var_stilt = FloatText(value=1.0,
description='Var',
step=0.01,
disabled=False)
#Create a FloatText widget for threshold of high CO in ICOS observation:
highco_obs = FloatText(value=0.04,
description='High CO',
step=0.01,
disabled=False)
#Create a FloatText widget for threshold of variability in ICOS observation:
var_obs = FloatText(value=0.5,
description='Var',
step=0.01,
disabled=False)
#Create a FloatText widget with threshold of variability in ICOS observation:
stdev_obs = FloatText(value=0.5,
description='Stdev',
step=0.01,
disabled=False)
#Create a FloatText widget for selection of noon hour:
noon = IntText(value=12,
description='noon',
step=1,
disabled=False)
#Create a FloatText widget for selection of mid-day time range:
midday = IntRangeSlider(value=[10, 14],
min=6,
max=19,
step=1,
description='Mid-day',
disabled=False,
orientation='horizontal',
readout=True,
readout_format='d')
header_station = Output()
with header_station:
display(HTML('<p style="font-size:15px;font-weight:bold;color:royalblue;">Select here station and time range: </p>'))
header_stilt_1 = Output()
with header_stilt_1:
display(HTML('<p style="font-size:15px;font-weight:bold;color:royalblue;">Select here thresholds for STILT time series (all thresholds in ppm): </p>'))
header_stilt_2 = Output()
with header_stilt_2:
display(HTML('Select here thresholds for high fossil fuel CO2 and high fossil fuel CO in STILT time series: '))
header_stilt_3 = Output()
with header_stilt_3:
display(HTML('Select here thresholds for low fossil fuel CO2 and low CO2 variability in STILT time series: '))
header_obs_1 = Output()
with header_obs_1:
display(HTML('<p style="font-size:15px;font-weight:bold;color:royalblue;">Select here thresholds for ICOS observation time series (all thresholds in ppm): </p>'))
header_obs_2 = Output()
with header_obs_2:
display(HTML('Select here thresholds for high CO in ICOS observation time series: '))
header_obs_3 = Output()
with header_obs_3:
display(HTML('Select here thresholds for low CO2 variability during mid-day (Var) '+
'and standard deviation in noon-time measurement (Stdev) <br> '+
'in ICOS observation time series: '))
header_noon_1 = Output()
with header_noon_1:
display(HTML('Select here the mid-day time range and noon-time hour (in UTC): '))
station_box = VBox([header_station,station])
#Create a VBox for year and month:
year_box = VBox([s_year, e_year])
month_box = VBox([s_month, e_month])
#Create a HBox for STILT thresholds:
var_stilt_box = HBox([low_stilt, var_stilt])
high_stilt_box = HBox([high_stilt, highco_stilt])
#Add both time-related VBoxes to a HBox:
time_box = HBox([year_box, month_box])
#Create a Vbox for mid-day selection:
noon_box = HBox([midday,noon])
noon_box_header = VBox([header_noon_1,noon_box])
high_stilt_box_header = VBox([header_stilt_2, high_stilt_box])
var_stilt_box_header = VBox([header_stilt_3, var_stilt_box])
stilt_box = VBox([header_stilt_1, high_stilt_box_header, var_stilt_box_header])
#Create a HBox for ICOS observation thresholds:
var_obs_box = HBox([var_obs, stdev_obs])
high_obs_box = HBox([highco_obs])
high_obs_box_header = VBox([header_obs_2, high_obs_box])
var_obs_box_header = VBox([header_obs_3, var_obs_box])
obs_box = VBox([header_obs_1, high_obs_box_header, var_obs_box_header])
#Add all widgets to a VBox:
form = VBox([station_box, time_box, noon_box_header, stilt_box, obs_box, update_button])
#Set font of all widgets in the form:
station.layout.width = '603px'
update_button.style.button_color=royal
time_box.layout.margin = '25px 0px 10px 0px'
year_box.layout.margin = '0px 0px 0px 0px'
stilt_box.layout.margin = '25px 0px 0px 0px'
high_stilt_box_header.layout.margin = '5px 0px 0px 0px'
var_stilt_box_header.layout.margin = '5px 0px 0px 0px'
obs_box.layout.margin = '25px 0px 0px 0px'
update_button.layout.margin = '50px 100px 40px 275px' #top, right, bottom, left
#Initialize form output:
form_out = Output()
#Initialize results output:
results_out = Output()
#Define update function:
def update_func(button_c):
#Display selection:
with results_out:
#Clear previous results:
clear_output()
display(HTML('<p style="font-size:15px;font-weight:bold;">All plots and tables are also saved in the folder: '+path_plots+'</p><br>'))
#Print "results" (current selection):
print('Station: ', station.value)
print('Start Year: ', s_year.value, '\t Start Month: ', s_month.value)
print('End Year: ', e_year.value, '\t End Month: ', e_month.value)
print('Noon: ',noon.value,' UTC', '\t \t Mid-day range: ',midday.value,' UTC')
print('Threshold for low ffCO2 in STILT: ', low_stilt.value)
print('Threshold for high ffCO2 in STILT: ', high_stilt.value)
print('Threshold for high ffCO in STILT: ', highco_stilt.value)
print('Threshold for low variability in STILT: ', var_stilt.value)
print('Threshold for high CO in ICOS observations: ', highco_obs.value)
print('Threshold for low mid-day variability in ICOS CO2 observations: ', var_obs.value)
print('Threshold for low noon-time standard deviation in ICOS CO2 observations: ', stdev_obs.value)
station_selection = [key for (key, value) in stilt_stations.items() if value['name'] == station.value]
run_all(station_selection, s_year.value, s_month.value, e_year.value, e_month.value,
low_stilt.value, high_stilt.value, highco_stilt.value, var_stilt.value,
highco_obs.value, var_obs.value, stdev_obs.value, noon.value, midday.value)
#Call update-function when button is clicked:
update_button.on_click(update_func)
#Open form object:
with form_out:
#Clear previous selections in form:
clear_output()
#Display form and results:
display(form, results_out)
#Display form:
display(form_out)
###Output
_____no_output_____
###Markdown
Back to top Create widgets for selection (ICOS data only)
###Code
# Create widgets for selection (ICOS data only)
def create_widget_selection_icos():
# find stations for which 1-hourly STILT results are available
allStations = sorted([stilt_stations[kk]['name'] for kk in os.listdir(path_stilt+'/Results_RINGO_T1.3/')])
#Create a Dropdown widget with station names:
station = Dropdown(options = allStations,
description = 'Station',
disabled= False,)
#Create a Dropdown widget with year values (start year):
s_year = Dropdown(options = [2017, 2018, 2019],
description = 'Start Year',
disabled= False,)
#Create a Dropdown widget with month values (start month):
s_month = Dropdown(options = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
description = 'Start Month',
disabled= False,)
#Create a Dropdown widget with year values (end year):
e_year = Dropdown(options = [2017, 2018, 2019],
description = 'End Year',
disabled= False,)
#Create a Dropdown widget with month values (end month):
e_month = Dropdown(options = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
description = 'End Month',
disabled= False,)
#Create a Button widget to control execution:
update_button = Button(description='Update',
disabled=False,
button_style='danger', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',)
#Create a FloatText widget for threshold of high CO in ICOS observation:
highco_obs = FloatText(value=0.04,
description='High CO',
step=0.01,
disabled=False)
#Create a FloatText widget for threshold of variability in ICOS observation:
var_obs = FloatText(value=0.5,
description='Var',
step=0.01,
disabled=False)
#Create a FloatText widget with threshold of variability in ICOS observation:
stdev_obs = FloatText(value=0.5,
description='Stdev',
step=0.01,
disabled=False)
#Create a FloatText widget for selection of noon hour:
noon = IntText(value=12,
description='noon',
step=1,
disabled=False)
#Create a FloatText widget for selection of mid-day time range:
midday = IntRangeSlider(value=[10, 14],
min=6,
max=19,
step=1,
description='Mid-day',
disabled=False,
orientation='horizontal',
readout=True,
readout_format='d')
header_station = Output()
with header_station:
display(HTML('<p style="font-size:15px;font-weight:bold;color:royalblue;">Select here station and time range: </p>'))
header_obs_1 = Output()
with header_obs_1:
display(HTML('<p style="font-size:15px;font-weight:bold;color:royalblue;">Select here thresholds for ICOS observation time series (all thresholds in ppm): </p>'))
header_obs_2 = Output()
with header_obs_2:
display(HTML('Select here thresholds for high CO in ICOS observation time series: '))
header_obs_3 = Output()
with header_obs_3:
display(HTML('Select here thresholds for low CO2 variability during mid-day (Var) '+
'and standard deviation in noon-time measurement (Stdev) <br> '+
'in ICOS observation time series: '))
header_noon_1 = Output()
with header_noon_1:
display(HTML('Select here the mid-day time range and noon-time hour (in UTC): '))
station_box = VBox([header_station,station])
#Create a VBox for year and month:
year_box = VBox([s_year, e_year])
month_box = VBox([s_month, e_month])
#Add both time-related VBoxes to a HBox:
time_box = HBox([year_box, month_box])
#Create a Vbox for mid-day selection:
noon_box = HBox([midday,noon])
noon_box_header = VBox([header_noon_1,noon_box])
#Create a HBox for ICOS observation thresholds:
var_obs_box = HBox([var_obs, stdev_obs])
high_obs_box = HBox([highco_obs])
high_obs_box_header = VBox([header_obs_2, high_obs_box])
var_obs_box_header = VBox([header_obs_3, var_obs_box])
obs_box = VBox([header_obs_1, high_obs_box_header, var_obs_box_header])
#Add all widgets to a VBox:
form = VBox([station_box, time_box, noon_box_header, obs_box, update_button])
#Set font of all widgets in the form:
station.layout.width = '603px'
update_button.style.button_color=royal
time_box.layout.margin = '25px 0px 10px 0px'
year_box.layout.margin = '0px 0px 0px 0px'
obs_box.layout.margin = '25px 0px 0px 0px'
update_button.layout.margin = '50px 100px 40px 275px' #top, right, bottom, left
#Initialize form output:
form_out = Output()
#Initialize results output:
results_out = Output()
#Define update function:
def update_func(button_c):
#Display selection:
with results_out:
#Clear previous results:
clear_output()
display(HTML('<p style="font-size:15px;font-weight:bold;">All plots and tables are also saved in the folder: '+path_plots+'</p><br>'))
#Print "results" (current selection):
print('Station: ', station.value)
print('Start Year: ', s_year.value, '\t Start Month: ', s_month.value)
print('End Year: ', e_year.value, '\t End Month: ', e_month.value)
print('Noon: ',noon.value,' UTC', '\t \t Mid-day range: ',midday.value,' UTC')
print('Threshold for high CO in ICOS observations: ', highco_obs.value)
print('Threshold for low mid-day variability in ICOS CO2 observations: ', var_obs.value)
print('Threshold for low noon-time standard deviation in ICOS CO2 observations: ', stdev_obs.value)
station_selection = [key for (key, value) in stilt_stations.items() if value['name'] == station.value]
run_obs(station_selection, s_year.value, s_month.value, e_year.value, e_month.value,
highco_obs.value, var_obs.value, stdev_obs.value, noon.value, midday.value)
#Call update-function when button is clicked:
update_button.on_click(update_func)
#Open form object:
with form_out:
#Clear previous selections in form:
clear_output()
#Display form and results:
display(form, results_out)
#Display form:
display(form_out)
###Output
_____no_output_____ |
docs/qudits.ipynb | ###Markdown
Copyright 2020 The Cirq Developers
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Qudits View on QuantumAI Run in Google Colab View source on GitHub Download notebook
###Code
try:
import cirq
except ImportError:
print("installing cirq...")
!pip install --quiet cirq
print("installed cirq.")
###Output
_____no_output_____
###Markdown
Most of the time in quantum computation, we work with qubits, which are 2-level quantum systems. A qu-*d*-it is a generalization of a qubit to a d-level or d-dimension system.Qudits with known values for d have specific names. A qubit has dimension 2, a qutrit has dimension 3, a ququart has dimension 4, and so on.In Cirq, qudits work exactly like qubits except they have a `dimension` attribute other than 2, and they can only be used with gates specific to that dimension.Both qubits and qudits are represented by a `Qid` object.To apply a gate to some qudits, the dimensions of the qudits must match the dimensions it works on. For example, if a gate represents a unitary evolution on three qudits, a qubit, a qutrit, and another qutrit, the gate's "qid shape" is `(2, 3, 3)` and its `on` method will accept exactly 3 `Qid`s with dimension 2, 3, and 3. This is an example single qutrit gate used in a circuit:
###Code
import cirq
class QutritPlusGate(cirq.SingleQubitGate):
def _qid_shape_(self):
return (3,)
def _unitary_(self):
return np.array([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
def _circuit_diagram_info_(self, args):
return '[+1]'
q0 = cirq.LineQid(0, dimension=3)
circuit = cirq.Circuit(
QutritPlusGate().on(q0)
)
print(circuit)
###Output
0 (d=3): โโโ[+1]โโโ
###Markdown
Copyright 2020 The Cirq Developers
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Qudits View on QuantumLib Run in Google Colab View source on GitHub Download notebook
###Code
try:
import cirq
except ImportError:
print("installing cirq...")
!pip install --quiet cirq
print("installed cirq.")
###Output
_____no_output_____
###Markdown
Most of the time in quantum computation, we work with qubits, which are 2-level quantum systems. A qu-*d*-it is a generalization of a qubit to a d-level or d-dimension system.Qudits with known values for d have specific names. A qubit has dimension 2, a qutrit has dimension 3, a ququart has dimension 4, and so on.In Cirq, qudits work exactly like qubits except they have a `dimension` attribute other than 2, and they can only be used with gates specific to that dimension.Both qubits and qudits are represented by a `Qid` object.To apply a gate to some qudits, the dimensions of the qudits must match the dimensions it works on. For example, if a gate represents a unitary evolution on three qudits, a qubit, a qutrit, and another qutrit, the gate's "qid shape" is `(2, 3, 3)` and its `on` method will accept exactly 3 `Qid`s with dimension 2, 3, and 3. This is an example single qutrit gate used in a circuit:
###Code
import cirq
class QutritPlusGate(cirq.SingleQubitGate):
def _qid_shape_(self):
return (3,)
def _unitary_(self):
return np.array([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
def _circuit_diagram_info_(self, args):
return '[+1]'
q0 = cirq.LineQid(0, dimension=3)
circuit = cirq.Circuit(
QutritPlusGate().on(q0)
)
print(circuit)
###Output
0 (d=3): โโโ[+1]โโโ
###Markdown
QuditsMost of the time in quantum computation, we work with qubits, which are 2-level quantum systems. A qu-*d*-it is a generalization of a qubit to a d-level or d-dimension system.Qudits with known values for d have specific names. A qubit has dimension 2, a qutrit has dimension 3, a ququart has dimension 4, and so on.In Cirq, qudits work exactly like qubits except they have a `dimension` attribute other than 2, and they can only be used with gates specific to that dimension.Both qubits and qudits are represented by a `Qid` object.To apply a gate to some qudits, the dimensions of the qudits must match the dimensions it works on. For example, if a gate represents a unitary evolution on three qudits, a qubit, a qutrit, and another qutrit, the gate's "qid shape" is `(2, 3, 3)` and its `on` method will accept exactly 3 `Qid`s with dimension 2, 3, and 3. This is an example single qutrit gate used in a circuit:
###Code
import cirq
class QutritPlusGate(cirq.SingleQubitGate):
def _qid_shape_(self):
return (3,)
def _unitary_(self):
return np.array([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
def _circuit_diagram_info_(self, args):
return '[+1]'
q0 = cirq.LineQid(0, dimension=3)
circuit = cirq.Circuit(
QutritPlusGate().on(q0)
)
print(circuit)
###Output
0 (d=3): โโโ[+1]โโโ
###Markdown
Copyright 2020 The Cirq Developers
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Qudits View on QuantumAI Run in Google Colab View source on GitHub Download notebook
###Code
try:
import cirq
except ImportError:
print("installing cirq...")
!pip install --quiet cirq
print("installed cirq.")
###Output
_____no_output_____
###Markdown
Most of the time in quantum computation, we work with qubits, which are 2-level quantum systems. However, it is possible to also define quantum computation with higher dimensional systems. A qu-*d*-it is a generalization of a qubit to a d-level or d-dimension system. For example, the state of a single qubit is a superposition of two basis states, $|\psi\rangle=\alpha|0\rangle+\beta|1\rangle$, whereas the state of a qudit for a three dimensional system is a superposition of three basis states $|\psi\rangle=\alpha|0\rangle+\beta|1\rangle+\gamma|2\rangle$.Qudits with known values for d have specific names. A **qubit** has dimension 2, a **qutrit** has dimension 3, a **ququart** has dimension 4, and so on.In Cirq, qudits work exactly like qubits except they have a `dimension` attribute different than 2, and they can only be used with gates specific to that dimension. In cirq, both qubits and qudits are subclasses of the class `cirq.Qid`. To apply a gate to some qudits, the dimensions of the qudits must match the dimensions it works on. For example, consider gate represents a unitary evolution on three qudits,. Further suppose that there are a qubit, a qutrit, and another qutrit. Then the gate's "qid shape" is `(2, 3, 3)` and its `on` method will accept exactly 3 `Qid`s with dimension 2, 3, and 3, respectively.This is an example single qutrit gate acting on a single qutrit in a simple quantum circuit:
###Code
import cirq
import numpy as np
class QutritPlusGate(cirq.Gate):
"""A gate that adds one in the computational basis of a qutrit.
This gate acts on three-level systems. In the computational basis of
this system it enacts the transformation U|xใ = |x + 1 mod 3ใ, or
in other words U|0ใ = |1ใ, U|1ใ = |2ใ, and U|2> = |0ใ.
"""
def _qid_shape_(self):
# By implementing this method this gate implements the
# cirq.qid_shape protocol and will return the tuple (3,)
# when cirq.qid_shape acts on an instance of this class.
# This indicates that the gate acts on a single qutrit.
return (3,)
def _unitary_(self):
# Since the gate acts on three level systems it has a unitary
# effect which is a three by three unitary matrix.
return np.array([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
def _circuit_diagram_info_(self, args):
return '[+1]'
# Here we create a qutrit for the gate to act on.
q0 = cirq.LineQid(0, dimension=3)
# We can now enact the gate on this qutrit.
circuit = cirq.Circuit(
QutritPlusGate().on(q0)
)
# When we print this out we see that the qutrit is labeled by its dimension.
print(circuit)
###Output
_____no_output_____
###Markdown
cirq.Qid`cirq.Qid` is the type that represents both qubits and qudits.Cirq has the built-in qubit types, `cirq.NamedQubit`, `cirq.GridQubit`, and `cirq.LineQubit`, and it also provides corresponding `cirq.Qid` types: - `cirq.NamedQid` - Example: Create a qutrit named 'a' by specifying the dimension in the constructor: `cirq.NamedQid('a', dimension=3)`.- `cirq.GridQid` - Example: Create a qutrit at location (2, 0) by specifying the dimension in the constructor: `cirq.GridQid(2, 0, dimension=3)`. - Example: You can create regions of `cirq.GridQid`s. For example, to create a 2x2 grid of ququarts, use `cirq.GridQid.rect(2, 2, dimension=4)`.- `cirq.LineQid` - Example: Create a qutrit at location 1 on the line by specifying the dimension in the constructor: `cirq.LineQid(0, dimension=3)`. - Example: You can create ranges of `cirq.LineQid`s. For example, to create qutrits on a line with locations from 0 to 4, use `cirq.LineQid.range(5, dimension=3)`. By default `cirq.Qid` classes in cirq will default to qubits unless their `dimension` parameter is specified in creation. Thus a `cirq.Qid` like `cirq.NamedQid('a')` is a qubit. The `cirq.qid_shape` protocolQuantum gates, operations, and other types that act on a sequence of qudits can specify the dimension of each qudit they act on by implementing the `_qid_shape_` magic method. This method returns a tuple of integers corresponding to the required dimension of each qudit it operates on, e.g. `(2, 3, 3)` means an object that acts on a qubit, a qutrit, and another qutrit. When you specify `_qid_shape_` we say that the object implements the `qid_shape` protocol.When `cirq.Qid`s are used with `cirq.Gate`s, `cirq.Operation`s, and `cirq.Circuit`s, the dimension of each qid must match the corresponding entry in the qid shape. An error is raised otherwise.Callers can query the qid shape of an object or a list of `Qid`s by calling `cirq.qid_shape` on it. By default, `cirq.qid_shape` will return the equivalent qid shape for qubits if `_qid_shape_` is not defined. In particular, for a qubit-only gate the qid shape is a tuple of 2s containing one 2 for each qubit e.g. `(2,) * cirq.num_qubits(gate)`.
###Code
# Create an instance of the qutrit gate defined above.
gate = QutritPlusGate()
# Verify that it acts on a single qutrit.
print(cirq.qid_shape(gate))
###Output
_____no_output_____
###Markdown
Unitaries, mixtures, and channels on quditsThe magic methods `_unitary_`, `_apply_unitary_`, `_mixture_`, and `_kraus_` can be used to define unitary gates, mixtures, and channels can be used with qudits (see [protocols](protocols.md) for how these work.)Because the state space for qudits for $d>2$ live on larger dimensional spaces, the corresponding objects returned by the magic methods will be of corresponding higher dimension.
###Code
# Create an instance of the qutrit gate defined above. This gate implements _unitary_.
gate = QutritPlusGate()
# Because it acts on qutrits, its unitary is a 3 by 3 matrix.
print(cirq.unitary(gate))
###Output
_____no_output_____
###Markdown
For a single qubit gate, its unitary is a 2x2 matrix, whereas for a single qutrit gate its unitary is a 3x3 matrix. A two qutrit gate will have a unitary that is a 9x9 matrix (3 * 3 = 9) and a qubit-ququart gate will have a unitary that is an 8x8 matrix (2 * 4 = 8). The size of the matrices involved in defining mixtures and channels follow the same pattern. Simulating quditsCirq's simulators can be used to simulate or sample from circuits which act on qudits.Simulators like `cirq.Simulator` and `cirq.DensityMatrixSimulator` will return simulation results with larger states than the same size qubit circuit when simulating qudit circuits. The size of the state returned is determined by the product of the dimensions of the qudits being simulated. For example, the state vector output of `cirq.Simulator` after simulating a circuit on a qubit, a qutrit, and a qutrit will have 2 * 3 * 3 = 18 elements. You can call `cirq.qid_shape(simulation_result)` to check the qudit dimensions.
###Code
# Create a circuit from the gate we defined above.
q0 = cirq.LineQid(0, dimension=3)
circuit = cirq.Circuit(QutritPlusGate()(q0))
# Run a simulation of this circuit.
sim = cirq.Simulator()
result = sim.simulate(circuit)
# Verify that the returned state is that of a qutrit.
print(cirq.qid_shape(result))
###Output
_____no_output_____
###Markdown
Circuits on qudits are always assumed to start in the $|0\rangle$ computational basis state, and all the computational basis states of a qudit are assumed to be $|0\rangle$, $|1\rangle$, ..., $|d-1\rangle$. Correspondingly, measurements of qudits are assumed to be in the computational basis and for each qudit return an integer corresponding to these basis states. Thus measurement results for each qudit are assumed to run from $0$ to $d-1$.
###Code
# Create a circuit with three qutrit gates.
q0, q1 = cirq.LineQid.range(2, dimension=3)
circuit = cirq.Circuit([
QutritPlusGate()(q0),
QutritPlusGate()(q1),
QutritPlusGate()(q1),
cirq.measure(q0, q1, key="x")
])
# Sample from this circuit.
result = cirq.sample(circuit, repetitions=3)
# See that the results are all integers from 0 to 2.
print(result)
###Output
_____no_output_____
###Markdown
Copyright 2020 The Cirq Developers
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Qudits View on QuantumAI Run in Google Colab View source on GitHub Download notebook
###Code
try:
import cirq
except ImportError:
print("installing cirq...")
!pip install --quiet cirq
print("installed cirq.")
###Output
_____no_output_____
###Markdown
Most of the time in quantum computation, we work with qubits, which are 2-level quantum systems. A qu-*d*-it is a generalization of a qubit to a d-level or d-dimension system.Qudits with known values for d have specific names. A qubit has dimension 2, a qutrit has dimension 3, a ququart has dimension 4, and so on.In Cirq, qudits work exactly like qubits except they have a `dimension` attribute other than 2, and they can only be used with gates specific to that dimension.Both qubits and qudits are represented by a `Qid` object.To apply a gate to some qudits, the dimensions of the qudits must match the dimensions it works on. For example, if a gate represents a unitary evolution on three qudits, a qubit, a qutrit, and another qutrit, the gate's "qid shape" is `(2, 3, 3)` and its `on` method will accept exactly 3 `Qid`s with dimension 2, 3, and 3. This is an example single qutrit gate used in a circuit:
###Code
import cirq
class QutritPlusGate(cirq.SingleQubitGate):
def _qid_shape_(self):
return (3,)
def _unitary_(self):
return np.array([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
def _circuit_diagram_info_(self, args):
return '[+1]'
q0 = cirq.LineQid(0, dimension=3)
circuit = cirq.Circuit(
QutritPlusGate().on(q0)
)
print(circuit)
###Output
0 (d=3): โโโ[+1]โโโ
###Markdown
Copyright 2020 The Cirq Developers
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Qudits View on QuantumLib Run in Google Colab View source on GitHub Download notebook Most of the time in quantum computation, we work with qubits, which are 2-level quantum systems. A qu-*d*-it is a generalization of a qubit to a d-level or d-dimension system.Qudits with known values for d have specific names. A qubit has dimension 2, a qutrit has dimension 3, a ququart has dimension 4, and so on.In Cirq, qudits work exactly like qubits except they have a `dimension` attribute other than 2, and they can only be used with gates specific to that dimension.Both qubits and qudits are represented by a `Qid` object.To apply a gate to some qudits, the dimensions of the qudits must match the dimensions it works on. For example, if a gate represents a unitary evolution on three qudits, a qubit, a qutrit, and another qutrit, the gate's "qid shape" is `(2, 3, 3)` and its `on` method will accept exactly 3 `Qid`s with dimension 2, 3, and 3. This is an example single qutrit gate used in a circuit:
###Code
import cirq
class QutritPlusGate(cirq.SingleQubitGate):
def _qid_shape_(self):
return (3,)
def _unitary_(self):
return np.array([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
def _circuit_diagram_info_(self, args):
return '[+1]'
q0 = cirq.LineQid(0, dimension=3)
circuit = cirq.Circuit(
QutritPlusGate().on(q0)
)
print(circuit)
###Output
0 (d=3): โโโ[+1]โโโ
|
5.Sign Language_csv_image_file.ipynb | ###Markdown
**Multi class clasisification for Sign Language** **Abstract**Aim fo the notebook is to demonstarte the multiclass clasification for data set of Sign Language data in CSV file **Dataset**Sign Language dataset from https://www.kaggle.com/datamunge/sign-language-mnist, and attempt to build a multi-class classifier to recognize sign language!
###Code
import csv
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from os import getcwd
def get_data(filename):
with open(filename) as training_file:
reader = csv.reader(training_file, delimiter=',')
imgs = []
labels = []
next(reader, None)
for row in reader:
label = row[0]
data = row[1:]
img = np.array(data).reshape((28, 28))
imgs.append(img)
labels.append(label)
images = np.array(imgs).astype(float)
labels = np.array(labels).astype(float)
return images, labels
path_sign_mnist_train = f"{getcwd()}/../tmp2/sign_mnist_train.csv"
path_sign_mnist_test = f"{getcwd()}/../tmp2/sign_mnist_test.csv"
training_images, training_labels = get_data(path_sign_mnist_train)
testing_images, testing_labels = get_data(path_sign_mnist_test)
print(training_images.shape)
print(training_labels.shape)
print(testing_images.shape)
print(testing_labels.shape)
training_images = np.expand_dims(training_images, axis=3)
testing_images = np.expand_dims(testing_images, axis=3)
train_datagen = ImageDataGenerator(
rescale=1. / 255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest'
)
validation_datagen = ImageDataGenerator(
rescale=1 / 255
)
print(training_images.shape)
print(testing_images.shape)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(26, activation='softmax')
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
train_gen = train_datagen.flow(
training_images,
training_labels,
batch_size=64
)
val_gen = validation_datagen.flow(
testing_images,
testing_labels,
batch_size=64
)
history = model.fit_generator(
train_gen,
epochs=15,
validation_data=val_gen
)
model.evaluate(testing_images, testing_labels, verbose=0)
%matplotlib inline
import matplotlib.pyplot as plt
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
###Output
_____no_output_____ |
W6D6/nn-from-scratch_walkthrough.ipynb | ###Markdown
Implementing a Neural Network from Scratch - An IntroductionLet's implement a simple 3-layer neural network from scratch. Implementing a network from scratch at least once is an extremely valuable exercise. It helps you gain an understanding of how neural networks work, and that is essential to designing effective models.
###Code
# Package imports
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.linear_model
import matplotlib
# Display plots inline and change default figure size
%matplotlib inline
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)
###Output
_____no_output_____
###Markdown
Generating a dataset1. Let's start by generating a toy dataset we can play with. Let's use the [`make_moons`](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html) function.
###Code
# Generate a dataset and plot it
np.random.seed(0)
X, y = sklearn.datasets.make_moons(200, noise=0.20)
plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral)
###Output
_____no_output_____
###Markdown
This dataset has two classes: `red` and `blue`. You can think of the blue dots as male patients and the red dots as female patients, with the `x` and `y` axis being medical measurements. GOAL: Create an ML classifier that predicts the correct class (male or female) given the `x` and `y` coordinates. The data is not *linearly separable*.With NN, you don't need to worry about [feature engineering](http://machinelearningmastery.com/discover-feature-engineering-how-to-engineer-features-and-how-to-get-good-at-it/). The hidden layer of a neural network will learn features for you. Logistic RegressionTo demonstrate the point let's train a Logistic Regression classifier. It's input will be the `x` and `y` values and the output the predicted class (0 or 1).
###Code
# Train the logistic regression classifier
clf = sklearn.linear_model.LogisticRegressionCV()
clf.fit(X, y)
# Helper function to plot a decision boundary.
# If you don't fully understand this function don't worry, it just generates the contour plot below.
def plot_decision_boundary(pred_func):
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
# Plot the decision boundary
plot_decision_boundary(lambda x: clf.predict(x))
plt.title("Logistic Regression")
###Output
_____no_output_____
###Markdown
The graph shows the decision boundary learned by our Logistic Regression classifier. It separates the data as good as it can using a straight line, but it's unable to capture the "moon shape" of our data. Training a Neural Network Let's build a 3-layer NN with one input layer, one hidden layer, and one output layer. * The number of nodes in the input layer is determined by the dimensionality of our data, 2. * Similarly, the number of nodes in the output layer is determined by the number of classes we have, also 2. (*The input to the network will be `x` and `y` coordinates and its output will be two probabilities, one for class 0 ("female") and one for class 1 ("male"). We can choose the dimensionality (the number of nodes) of the hidden layer: - The more nodes we put into the hidden layer the more complex functions we will be able fit. - Higher dimensionality comes at a cost: * More computation is required to make predictions and learn the network parameters. * A bigger number of parameters also means we become more prone to overfitting our data. How to choose the size of the hidden layer? While there are some general guidelines and recommendations, it always depends on your specific problem and is more of an art than a science. Your task will be to play with the number of nodes in the hidden layer and see how it affects the output. We also need an *activation function* for our hidden layer. The activation function transforms the inputs of the layer into its outputs. A nonlinear activation function is what allows us to fit nonlinear hypotheses. We will use `tanh` but you should try to use `ReLu` as we have in previous tasks. Play with this function as well. For the output layer, we will use a[softmax function](https://en.wikipedia.org/wiki/Softmax_function), which is simply a way to convert raw scores to probabilities. It is a generalization of the logistic function for multiple classes. How our network makes predictionsOur network makes predictions using *forward propagation*, which is just a lot of matrix multiplications and the application of the activation functions we defined above. If $x$ is the 2-dimensional input to our network then we calculate our prediction $\hat{y}$ (also two-dimensional) as follows: $$\begin{aligned}z_1 & = xW_1 + b_1 \\a_1 & = \tanh(z_1) \\z_2 & = a_1W_2 + b_2 \\a_2 & = \hat{y} = \mathrm{softmax}(z_2)\end{aligned}$$ $z_i$ is the weighted sum of inputs of layer $i$ (bias included) $a_i$ is the output of layer $i$ after applying the activation function. $W_1, b_1, W_2, b_2$ are parameters of our network, which we learn from our training data. Learning the ParametersNow we need to find parameters ($W_1, b_1, W_2, b_2$) that minimize the error on our training data. What is the error? We call the function that measures our error the *loss function*. A common choice with the softmax output is the [cross-entropy loss](https://en.wikipedia.org/wiki/Cross_entropyCross-entropy_error_function_and_logistic_regression). If we have $N$ training examples and $C$ classes then the loss for our prediction $\hat{y}$ with respect to the true labels $y$ is given by:$$\begin{aligned}L(y,\hat{y}) = - \frac{1}{N} \sum_{n \in N} \sum_{i \in C} y_{n,i} \log\hat{y}_{n,i}\end{aligned}$$ The formula looks complicated, but all it really does is sum over our training examples and add to the loss if we predicted the incorrect class. So, the further away $y$ (the correct labels) and $\hat{y}$ (our predictions) are, the greater our loss will be. The goal is to find the parameters that minimize our loss function. With [gradient descent](http://cs231n.github.io/optimization-1/) we can find its minimum. Let's implement gradient descent. As an input, gradient descent needs the gradients (vector of derivatives) of the loss function with respect to our parameters: $\frac{\partial{L}}{\partial{W_1}}$, $\frac{\partial{L}}{\partial{b_1}}$, $\frac{\partial{L}}{\partial{W_2}}$, $\frac{\partial{L}}{\partial{b_2}}$. To calculate these gradients we use the famous *backpropagation algorithm*, which is a way to efficiently calculate the gradients starting from the output. Applying the backpropagation formula we find the following (trust me on this): $$\begin{aligned}& \delta_3 = \hat{y} - y \\& \delta_2 = (1 - \tanh^2z_1) \circ \delta_3W_2^T \\& \frac{\partial{L}}{\partial{W_2}} = a_1^T \delta_3 \\& \frac{\partial{L}}{\partial{b_2}} = \delta_3\\& \frac{\partial{L}}{\partial{W_1}} = x^T \delta_2\\& \frac{\partial{L}}{\partial{b_1}} = \delta_2 \\\end{aligned}$$ ImplementationWe start by defining some useful variables and parameters for gradient descent:
###Code
num_examples = len(X) # training set size
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
# Gradient descent parameters (I picked these by hand)
epsilon = 0.01 # learning rate for gradient descent
reg_lambda = 0.01 # regularization strength
###Output
_____no_output_____
###Markdown
First let's implement the loss function we defined above. We use this to evaluate how well our model is doing:
###Code
# Helper function to evaluate the total loss on the dataset
def calculate_loss(model):
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation to calculate our predictions
z1 = X.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Calculating the loss
corect_logprobs = -np.log(probs[range(num_examples), y])
data_loss = np.sum(corect_logprobs)
# Add regulatization term to loss (optional)
data_loss += reg_lambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2)))
return 1./num_examples * data_loss
###Output
_____no_output_____
###Markdown
We also implement a helper function to calculate the output of the network. It does forward propagation as defined above and returns the class with the highest probability.
###Code
# Helper function to predict an output (0 or 1)
def predict(model, x):
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation
z1 = x.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
return np.argmax(probs, axis=1)
###Output
_____no_output_____
###Markdown
Finally, here comes the function to train our Neural Network. It implements batch gradient descent using the backpropagation derivates we found above.
###Code
# This function learns parameters for the neural network and returns the model.
# - nn_hdim: Number of nodes in the hidden layer
# - num_passes: Number of passes through the training data for gradient descent
# - print_loss: If True, print the loss every 1000 iterations
def build_model(nn_hdim, num_passes=20000, print_loss=False):
# Initialize the parameters to random values. We need to learn these.
np.random.seed(0)
W1 = np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim)
b1 = np.zeros((1, nn_hdim))
W2 = np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim)
b2 = np.zeros((1, nn_output_dim))
# This is what we return at the end
model = {}
# Gradient descent. For each batch...
for i in range(0, num_passes):
# Forward propagation
z1 = X.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Backpropagation
delta3 = probs
delta3[range(num_examples), y] -= 1
dW2 = (a1.T).dot(delta3)
db2 = np.sum(delta3, axis=0, keepdims=True)
delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2))
dW1 = np.dot(X.T, delta2)
db1 = np.sum(delta2, axis=0)
# Add regularization terms (b1 and b2 don't have regularization terms)
dW2 += reg_lambda * W2
dW1 += reg_lambda * W1
# Gradient descent parameter update
W1 += -epsilon * dW1
b1 += -epsilon * db1
W2 += -epsilon * dW2
b2 += -epsilon * db2
# Assign new parameters to the model
model = { 'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2}
# Optionally print the loss.
# This is expensive because it uses the whole dataset, so we don't want to do it too often.
if print_loss and i % 1000 == 0:
print("Loss after iteration %i: %f" %(i, calculate_loss(model)))
return model
###Output
_____no_output_____
###Markdown
A network with a hidden layer of size 3Let's see what happens if we train a network with a hidden layer size of 3.
###Code
# Build a model with a 3-dimensional hidden layer
model = build_model(3, print_loss=True)
# Plot the decision boundary
plot_decision_boundary(lambda x: predict(model, x))
plt.title("Decision Boundary for hidden layer size 3")
###Output
Loss after iteration 0: 0.432387
Loss after iteration 1000: 0.068947
Loss after iteration 2000: 0.068901
Loss after iteration 3000: 0.071218
Loss after iteration 4000: 0.071253
Loss after iteration 5000: 0.071278
Loss after iteration 6000: 0.071293
Loss after iteration 7000: 0.071303
Loss after iteration 8000: 0.071308
Loss after iteration 9000: 0.071312
Loss after iteration 10000: 0.071314
Loss after iteration 11000: 0.071315
Loss after iteration 12000: 0.071315
Loss after iteration 13000: 0.071316
Loss after iteration 14000: 0.071316
Loss after iteration 15000: 0.071316
Loss after iteration 16000: 0.071316
Loss after iteration 17000: 0.071316
Loss after iteration 18000: 0.071316
Loss after iteration 19000: 0.071316
###Markdown
Yay! This looks pretty good. Our neural networks was able to find a decision boundary that successfully separates the classes. Varying the hidden layer sizeIn the example above we picked a hidden layer size of 3. Let's now get a sense of how varying the hidden layer size affects the result.
###Code
plt.figure(figsize=(16, 32))
hidden_layer_dimensions = [1, 2, 3, 4, 5, 20, 50]
for i, nn_hdim in enumerate(hidden_layer_dimensions):
plt.subplot(5, 2, i+1)
plt.title('Hidden Layer size %d' % nn_hdim)
model = build_model(nn_hdim)
plot_decision_boundary(lambda x: predict(model, x))
plt.show()
###Output
_____no_output_____ |
Convert_h5_to_tfjs_model_and_tflite.ipynb | ###Markdown
**CONVERT MODEL H5 INTO TFLITE**
###Code
import tensorflow as tf
converter = tf.lite.TFLiteConverter.from_keras_model_file("my_model_BAGUS_SOUND.h5")
tflite_model = converter.convert()
open("my_model_BAGUS_SOUND.tflite", "wb").write(tflite_model)
ls
###Output
converted_model.tflite my_model_BAGUS_SOUND.tflite
Convert_h5_to_tfjs_model.ipynb sound_classification_Bagus.hdf5
'Copy of sound_classification_Bagus.hdf5' [0m[01;34mtf_js_model[0m/
my_model_BAGUS_SOUND.h5
###Markdown
**CONVERT MODEL H5 into TF.JS**
###Code
!tensorflowjs_converter --input_format=keras my_model_BAGUS_SOUND.h5 my_model_BAGUS_SOUND_TFJS/
ls
###Output
converted_model.tflite [0m[01;34mmy_model_BAGUS_SOUND_TFJS[0m/
Convert_h5_to_tfjs_model.ipynb my_model_BAGUS_SOUND.tflite
'Copy of sound_classification_Bagus.hdf5' sound_classification_Bagus.hdf5
my_model_BAGUS_SOUND.h5 [01;34mtf_js_model[0m/
|
kannada mnist/archive/script v1.0.1.ipynb | ###Markdown
Imports
###Code
import keras
print(keras.__version__)
import tensorflow
print(tensorflow.__version__)
import numpy as np
import os
from keras import callbacks
from keras.utils.vis_utils import plot_model
import warnings
warnings.filterwarnings('ignore')
import random
###Output
_____no_output_____
###Markdown
Layers in CapsNet Define the key layers required to build any capsnet architecture
###Code
import keras.backend as K
import tensorflow as tf
from keras import initializers, layers
class Length(layers.Layer):
"""
Compute the length of vectors. This is used to compute a Tensor that has the same shape with y_true in margin_loss
inputs: shape=[dim_1, ..., dim_{n-1}, dim_n]
output: shape=[dim_1, ..., dim_{n-1}]
"""
def call(self, inputs, **kwargs):
return K.sqrt(K.sum(K.square(inputs), -1))
def compute_output_shape(self, input_shape):
return input_shape[:-1]
class Mask(layers.Layer):
"""
Mask a Tensor with shape=[None, d1, d2] by the max value in axis=1.
Output shape: [None, d2]
"""
def call(self, inputs, **kwargs):
# use true label to select target capsule, shape=[batch_size, num_capsule]
if type(inputs) is list: # true label is provided with shape = [batch_size, n_classes], i.e. one-hot code.
assert len(inputs) == 2
inputs, mask = inputs
else: # if no true label, mask by the max length of vectors of capsules
x = inputs
# Enlarge the range of values in x to make max(new_x)=1 and others < 0
x = (x - K.max(x, 1, True)) / K.epsilon() + 1
mask = K.clip(x, 0, 1) # the max value in x clipped to 1 and other to 0
# masked inputs, shape = [batch_size, dim_vector]
inputs_masked = K.batch_dot(inputs, mask, [1, 1])
return inputs_masked
def compute_output_shape(self, input_shape):
if type(input_shape[0]) is tuple: # true label provided
return tuple([None, input_shape[0][-1]])
else:
return tuple([None, input_shape[-1]])
def squash(vectors, axis=-1):
"""
The non-linear activation used in Capsule. It drives the length of a large vector to near 1 and small vector to 0
:param vectors: some vectors to be squashed, N-dim tensor
:param axis: the axis to squash
:return: a Tensor with same shape as input vectors
"""
s_squared_norm = K.sum(K.square(vectors), axis, keepdims=True)
scale = s_squared_norm / (0.5 + s_squared_norm) / K.sqrt(s_squared_norm)
return scale * vectors
class CapsuleLayer(layers.Layer):
"""
The capsule layer. It is similar to Dense layer. Dense layer has `in_num` inputs, each is a scalar, the output of the
neuron from the former layer, and it has `out_num` output neurons. CapsuleLayer just expand the output of the neuron
from scalar to vector. So its input shape = [None, input_num_capsule, input_dim_vector] and output shape = \
[None, num_capsule, dim_vector]. For Dense Layer, input_dim_vector = dim_vector = 1.
:param num_capsule: number of capsules in this layer
:param dim_vector: dimension of the output vectors of the capsules in this layer
:param num_routings: number of iterations for the routing algorithm
"""
def __init__(self, num_capsule, dim_vector, num_routing=3,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
super(CapsuleLayer, self).__init__(**kwargs)
self.num_capsule = num_capsule
self.dim_vector = dim_vector
self.num_routing = num_routing
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape):
assert len(input_shape) >= 3, "The input Tensor should have shape=[None, input_num_capsule, input_dim_vector]"
self.input_num_capsule = input_shape[1]
self.input_dim_vector = input_shape[2]
# Transformation matrix/Weight matrix
self.W = self.add_weight(shape=[self.input_num_capsule, self.num_capsule, self.input_dim_vector, self.dim_vector],
initializer=self.kernel_initializer,
name='W')
# Coupling coefficient. The redundant dimensions are just to facilitate subsequent matrix calculation.
self.bias = self.add_weight(shape=[1, self.input_num_capsule, self.num_capsule, 1, 1],
initializer=self.bias_initializer,
name='bias',
trainable=False)
self.built = True
def call(self, inputs, training=None):
# inputs.shape=[None, input_num_capsule, input_dim_vector]
# Expand dims to [None, input_num_capsule, 1, 1, input_dim_vector]
inputs_expand = K.expand_dims(K.expand_dims(inputs, 2), 2)
# Replicate(tile) num_capsule dimension to prepare being multiplied by W
# Now it has shape = [None, input_num_capsule, num_capsule, 1, input_dim_vector]
inputs_tiled = K.tile(inputs_expand, [1, 1, self.num_capsule, 1, 1])
"""
# Compute `inputs * W` by expanding the first dim of W. More time-consuming and need batch_size.
# Now W has shape = [batch_size, input_num_capsule, num_capsule, input_dim_vector, dim_vector]
w_tiled = K.tile(K.expand_dims(self.W, 0), [self.batch_size, 1, 1, 1, 1])
# Transformed vectors, inputs_hat.shape = [None, input_num_capsule, num_capsule, 1, dim_vector]
inputs_hat = K.batch_dot(inputs_tiled, w_tiled, [4, 3])
"""
# inputs_hat.shape = [None, input_num_capsule, num_capsule, 1, dim_vector]
inputs_hat = tf.scan(lambda ac, x: K.batch_dot(x, self.W, [3, 2]),
elems=inputs_tiled,
initializer=K.zeros([self.input_num_capsule, self.num_capsule, 1, self.dim_vector]))
# DYNAMIC ROUTING
assert self.num_routing > 0, 'The num_routing should be > 0.'
for i in range(self.num_routing):
c = tf.nn.softmax(self.bias, dim=2) # dim=2 is the num_capsule dimension
outputs = squash(K.sum(c * inputs_hat, 1, keepdims=True))
# last iteration needs not compute bias which will not be passed to the graph any more anyway.
if i != self.num_routing - 1:
# update the raw weights for the next routing iteration
# by adding the agreement to the previous raw weights
self.bias += K.sum(inputs_hat * outputs, -1, keepdims=True)
return K.reshape(outputs, [-1, self.num_capsule, self.dim_vector])
def compute_output_shape(self, input_shape):
return tuple([None, self.num_capsule, self.dim_vector])
def PrimaryCap(inputs, dim_vector, n_channels, kernel_size, strides, padding):
"""
Apply Conv2D `n_channels` times and concatenate all capsules
:param inputs: 4D tensor, shape=[None, width, height, channels]
:param dim_vector: the dim of the output vector of capsule
:param n_channels: the number of types of capsules
:return: output tensor, shape=[None, num_capsule, dim_vector]
"""
output = layers.Conv2D(filters=dim_vector*n_channels, kernel_size=kernel_size, strides=strides, padding=padding)(inputs)
outputs = layers.Reshape(target_shape=[-1, dim_vector])(output)
return layers.Lambda(squash)(outputs)
###Output
_____no_output_____
###Markdown
Build the network Build the required capsnet architecture using the layers defined above
###Code
from keras import layers, models
from keras import backend as K
from keras.utils import to_categorical
def CapsNet(input_shape, n_class, num_routing):
"""
A Capsule Network on MNIST.
:param input_shape: data shape, 4d, [None, width, height, channels]
:param n_class: number of classes
:param num_routing: number of routing iterations
:return: A Keras Model with 2 inputs and 2 outputs
"""
x = layers.Input(shape=input_shape)
# Layer 1: Conventional Conv2D layer
conv1 = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(x)
conv1 = layers.BatchNormalization(name='batch_norm1')(conv1)
# Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_vector]
primarycaps = PrimaryCap(conv1, dim_vector=8, n_channels=32, kernel_size=9, strides=2, padding='valid')
# Layer 3: Capsule layer. Dynamic Routing algorithm works here.
digitcaps = CapsuleLayer(num_capsule=n_class, dim_vector=16, num_routing=num_routing, name='digit_caps')(primarycaps)
# Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
out_caps = Length(name='out_caps')(digitcaps)
# Decoder network.
y = layers.Input(shape=(n_class,))
masked = Mask()([digitcaps, y]) # The true label is used to mask the output of capsule layer.
x_recon = layers.Dense(512, activation='relu')(masked)
x_recon = layers.Dense(1024, activation='relu')(x_recon)
x_recon = layers.Dense(784, activation='sigmoid')(x_recon)
x_recon = layers.Reshape(target_shape=[28, 28, 1], name='out_recon')(x_recon)
# two-input-two-output keras Model
return models.Model([x, y], [out_caps, x_recon])
###Output
_____no_output_____
###Markdown
Margin Loss Use custom margin loss as proposed in the original paper for gradient updates
###Code
def margin_loss(y_true, y_pred):
"""
Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it.
:param y_true: [None, n_classes]
:param y_pred: [None, num_capsule]
:return: a scalar loss value.
"""
L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))
return K.mean(K.sum(L, 1))
num_classes = 10
routings = 3
# define model
model = CapsNet(input_shape=[28, 28, 1],
n_class=num_classes,
num_routing=routings)
model.summary()
try:
plot_model(model, to_file='model.png', show_shapes=True)
except Exception as e:
print('No fancy plot {}'.format(e))
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_5 (InputLayer) (None, 28, 28, 1) 0
__________________________________________________________________________________________________
conv1 (Conv2D) (None, 20, 20, 256) 20992 input_5[0][0]
__________________________________________________________________________________________________
batch_norm1 (BatchNormalization (None, 20, 20, 256) 1024 conv1[0][0]
__________________________________________________________________________________________________
conv2d_3 (Conv2D) (None, 6, 6, 256) 5308672 batch_norm1[0][0]
__________________________________________________________________________________________________
reshape_3 (Reshape) (None, 1152, 8) 0 conv2d_3[0][0]
__________________________________________________________________________________________________
lambda_3 (Lambda) (None, 1152, 8) 0 reshape_3[0][0]
__________________________________________________________________________________________________
digit_caps (CapsuleLayer) (None, 10, 16) 1486080 lambda_3[0][0]
__________________________________________________________________________________________________
input_6 (InputLayer) (None, 10) 0
__________________________________________________________________________________________________
mask_3 (Mask) (None, 16) 0 digit_caps[0][0]
input_6[0][0]
__________________________________________________________________________________________________
dense_7 (Dense) (None, 512) 8704 mask_3[0][0]
__________________________________________________________________________________________________
dense_8 (Dense) (None, 1024) 525312 dense_7[0][0]
__________________________________________________________________________________________________
dense_9 (Dense) (None, 784) 803600 dense_8[0][0]
__________________________________________________________________________________________________
out_caps (Length) (None, 10) 0 digit_caps[0][0]
__________________________________________________________________________________________________
out_recon (Reshape) (None, 28, 28, 1) 0 dense_9[0][0]
==================================================================================================
Total params: 8,154,384
Trainable params: 8,142,352
Non-trainable params: 12,032
__________________________________________________________________________________________________
###Markdown
Load Data
###Code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from matplotlib import pyplot as plt
import gzip
import os
from keras.utils.data_utils import get_file
import numpy as np
def load_data():
"""Loads the Kannada-MNIST dataset.
# Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
dirname = os.path.join('datasets', 'Kannada-mnist')
base = 'https://github.com/vinayprabhu/Kannada_MNIST/blob/master/data/output_tensors/MNIST_format/'
files = ['y_kannada_MNIST_train-idx1-ubyte.gz', 'X_kannada_MNIST_train-idx3-ubyte.gz',
'y_kannada_MNIST_test-idx1-ubyte.gz', 'X_kannada_MNIST_test-idx3-ubyte.gz',
'y_dig_MNIST-idx1-ubyte.gz', 'X_dig_MNIST-idx3-ubyte.gz']
paths = []
for fname in files:
paths.append(get_file(fname,
origin=base + fname+'?raw=true',
cache_subdir=dirname))
with gzip.open(paths[0], 'rb') as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], 'rb') as imgpath:
x_train = np.frombuffer(imgpath.read(), np.uint8,
offset=16).reshape(len(y_train), 28, 28)
with gzip.open(paths[2], 'rb') as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[3], 'rb') as imgpath:
x_test = np.frombuffer(imgpath.read(), np.uint8,
offset=16).reshape(len(y_test), 28, 28)
with gzip.open(paths[2], 'rb') as lbpath:
y_dig = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[5], 'rb') as imgpath:
x_dig = np.frombuffer(imgpath.read(), np.uint8,
offset=16).reshape(10240, 28, 28)
return (x_train, y_train), (x_test, y_test), (x_dig, y_dig)
(x_train, y_train), (x_test, y_test), (x_dig, y_dig)=load_data()
import tensorflow as tf
(train_images, train_labels), (test_images, test_labels), (dig_images, dig_labels) = load_data()
print(train_images.shape)
train_images = train_images.reshape((-1, 28, 28, 1)).astype('float32') / 255.
test_images = test_images.reshape((-1, 28, 28, 1)).astype('float32') / 255.
dig_images = dig_images.reshape((-1, 28, 28, 1)).astype('float32') / 255.
train_labels = to_categorical(train_labels.astype('float32'))
test_labels = to_categorical(test_labels.astype('float32'))
dig_labels = to_categorical(dig_labels.astype('float32'))
val_images = train_images[50000:]
val_labels = train_labels[50000:]
train_images = train_images[:50000]
train_labels = train_labels[:50000]
print(train_images.shape)
print(type(train_images))
print(test_images.shape)
print(type(test_images))
print(val_images.shape)
print(type(val_images))
print(dig_images.shape)
print(type(dig_images))
x_train = train_images
y_train = train_labels
x_val = val_images
y_val = val_labels
x_test = test_images
y_test = test_labels
x_dig = dig_images
y_dig = dig_labels
###Output
(50000, 28, 28, 1)
<class 'numpy.ndarray'>
(10000, 28, 28, 1)
<class 'numpy.ndarray'>
(10000, 28, 28, 1)
<class 'numpy.ndarray'>
(10240, 28, 28, 1)
<class 'numpy.ndarray'>
###Markdown
Visualise a few samples from each of the datasets
###Code
from matplotlib import pyplot as plt
plt.figure(figsize=(10,4))
for i in range(50):
plt.subplot(5,10,i+1)
plt.imshow(x_train[i].reshape(28,28), cmap= 'gray')
plt.axis('Off')
plt.title(np.argmax(y_train[i]))
# plt.tight_layout()
plt.suptitle('Training set')
plt.figure(figsize=(10,4))
for i in range(50):
plt.subplot(5,10,i+1)
plt.imshow(x_val[i].reshape(28,28), cmap= 'gray')
plt.axis('Off')
plt.title(np.argmax(y_val[i]))
# plt.tight_layout()
plt.suptitle('Validation set')
plt.figure(figsize=(10,4))
for i in range(50):
plt.subplot(5,10,i+1)
plt.imshow(x_test[i].reshape(28,28), cmap= 'gray')
plt.axis('Off')
plt.title(np.argmax(y_test[i]))
# plt.tight_layout()
plt.suptitle('Test set')
plt.figure(figsize=(10,4))
for i in range(50):
plt.subplot(5,10,i+1)
plt.imshow(x_dig[i].reshape(28,28), cmap= 'gray')
plt.axis('Off')
plt.title(np.argmax(y_dig[i]))
# plt.tight_layout()
plt.suptitle('DIG set')
###Output
_____no_output_____
###Markdown
Train the network
###Code
batch_size = 128
epochs = 12
from keras.preprocessing.image import ImageDataGenerator
def train(model, data):
"""
Training a CapsuleNet
:param model: the CapsuleNet model
:param data: a tuple containing training and testing data, like `((x_train, y_train), (x_test, y_test))`
:param args: arguments
:return: The trained model
"""
# unpacking the data
(x_train, y_train), (x_val, y_val) = data
# callbacks
log = callbacks.CSVLogger('log.csv')
checkpoint = callbacks.ModelCheckpoint('weights-{epoch:02d}.h5', save_best_only=True, save_weights_only=True, verbose=1)
lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: 0.001 * np.exp(-epoch / 10.))
# compile the model
model.compile(optimizer='adam',
loss=[margin_loss, 'mse'],
loss_weights=[1., 0.0005],
metrics={'out_caps': 'accuracy'})
# Training without data augmentation:
# history = model.fit([x_train, y_train], [y_train, x_train], batch_size=batch_size, epochs=epochs,
# validation_data=[[x_val, y_val], [y_val, x_val]], callbacks=[lr_decay, checkpoint, log])
# Begin: Training with data augmentation ---------------------------------------------------------------------#
def train_generator(x, y, batch_size, shift_fraction=0.2):
train_datagen = ImageDataGenerator(width_shift_range=shift_fraction,
height_shift_range=shift_fraction, # shift up to 2 pixel for MNIST
zca_)
generator = train_datagen.flow(x, y, batch_size=batch_size)
while 1:
x_batch, y_batch = generator.next()
yield ([x_batch, y_batch], [y_batch, x_batch])
# Training with data augmentation. If shift_fraction=0., also no augmentation.
model.fit_generator(generator=train_generator(x_train, y_train, batch_size, 0.1),
steps_per_epoch=int(y_train.shape[0] / batch_size),
epochs=epochs,
validation_data=[[x_val, y_val], [y_val, x_val]], callbacks=[lr_decay, checkpoint, log])
# End: Training with data augmentation -----------------------------------------------------------------------#
model.save_weights('trained_model.h5')
print('Trained model saved to \'trained_model.h5\'')
return model
train(model=model, data=((x_train, y_train), (x_val, y_val)))
def test(model, data):
x_test, y_test = data
y_pred, x_recon = model.predict([x_test, y_test], batch_size=128)
print('-' * 30 + 'Begin: test' + '-' * 30)
print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1)) / y_test.shape[0])
return (y_pred, x_recon)
y_pred, x_recon = test(model, (x_test, y_test))
dig_pred, dig_recon = test(model, (x_dig[:10000], y_dig[:10000]))
###Output
------------------------------Begin: test------------------------------
Test acc: 0.8112
###Markdown
Visualisation
###Code
from IPython.display import Image
Image('model.png', width=750, height=750)
import pandas as pd
pd.set_option('display.max_columns', None)
df = pd.read_csv('log.csv')
print(df.columns)
###Output
Index(['epoch', 'loss', 'lr', 'out_caps_acc', 'out_caps_loss',
'out_recon_loss', 'val_loss', 'val_out_caps_acc', 'val_out_caps_loss',
'val_out_recon_loss'],
dtype='object')
###Markdown
Filter the values to visualise
###Code
# loss
loss = df['loss'].to_list()
val_loss = df['val_loss'].to_list()
# Break-down the losses into components
out_caps_loss = df['out_caps_loss'].to_list()
val_out_caps_loss = df['val_out_caps_loss'].to_list()
out_recon_loss = df['out_recon_loss'].to_list()
val_out_recon_loss = df['val_out_recon_loss'].to_list()
# accuracy
out_caps_acc = df['out_caps_acc'].to_list()
val_out_caps_acc = df['val_out_caps_acc'].to_list()
###Output
_____no_output_____
###Markdown
Configure plotting tool(here: matplotlib)
###Code
import matplotlib.pyplot as plt
from pylab import rcParams
rcParams['figure.figsize'] = 5, 10
plt.style.use('fivethirtyeight')
plt.rc('grid', color='k', linestyle='--')
plt.rc('xtick', direction='out', color='black')
plt.rc('ytick', direction='out', color='black')
plt.rc('axes', facecolor='#E6E6E6', edgecolor='gray', axisbelow=True, grid=True)
###Output
_____no_output_____
###Markdown
Plot the loss and accuracy values
###Code
epochs = range(len(loss))
params = {'left' : 1,
'right' : 3,
'bottom' : 1,
'top' : 3,
'wspace' : 1,
'hspace' : 0.2}
plt.subplots_adjust(**params)
# Plot retrieved data : accuracy
plt.subplot(221)
plt.plot(epochs, out_caps_acc)
plt.plot(epochs, val_out_caps_acc)
plt.title("Training and Validation Accuracy")
# Plot retrieved data : loss
plt.subplot(222)
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title("Training and Validation Loss")
# Plot retrieved data : capsule loss
plt.subplot(223)
plt.plot(epochs, out_caps_loss)
plt.plot(epochs, val_out_caps_loss)
plt.title("Training and Validation Capsule Loss")
# Plot retrieved data : capsule loss
plt.subplot(224)
plt.plot(epochs, out_recon_loss)
plt.plot(epochs, val_out_recon_loss)
plt.title("Training and Validation Reconstruction Loss")
###Output
_____no_output_____
###Markdown
Reconstruction
###Code
import random
index = random.randint(1, 10000)
plt.imshow(x_test[index].reshape(28, 28), cmap=plt.cm.binary)
plt.title(f'Actual image at {index}, actual number is {np.argmax(y_test[index])}')
plt.show()
plt.imshow(x_recon[index].reshape(28, 28), cmap=plt.cm.binary)
plt.title(f'Reconstructed image at {index}, predicted number is {np.argmax(y_pred[index])}')
plt.show()
index = random.randint(1, 10000)
plt.imshow(x_dig[index].reshape(28, 28), cmap=plt.cm.binary)
plt.title(f'Actual DIG image at {index}, actual number is {np.argmax(y_dig[index])}')
plt.show()
plt.imshow(dig_recon[index].reshape(28, 28), cmap=plt.cm.binary)
plt.title(f'Reconstructed DIG image at {index}, predicted number is {np.argmax(dig_pred[index])}')
plt.show()
###Output
_____no_output_____ |
c4b/C4B.ipynb | ###Markdown
Resultado del analisisSe encontro que las conlumnas en etapas anteriores de limpieza no se omitio el aรฑo del registro. Por ello marca que las columnas no son iguales. Pero esto se omitira devido a que la informacion que se contiene es igual .
###Code
# Verificacion que toda la columna "estado_1" tenga el mismo orden en los estados
list(df_2008['estado_1']) == list(df_2010['estado_1']) == list(df_2012['estado_1']) == list(df_2014['estado_1']) == list(df_2016['estado_1']) == list(df_2018['estado_1'])
# Creacion del diccionario para datos faltantes
diccionario_anios_faltantes = {
'estado_1':list(df_2008['estado_1']),
'anio':list(np.nan for i in range(0,32)),
'porcentaje_poblacion_vulnerable_carencias_sociales':list(np.nan for i in range(0,32)),
'miles_personas_vulnerables_carencias_sociales':list(np.nan for i in range(0,32)),
'porcentaje_poblacion_vulnerable_ingresos':list(np.nan for i in range(0,32)),
'miles_personas_vulnerables_ingresos':list(np.nan for i in range(0,32)),
'porcentaje_poblacion_no_pobre_y_no_vulnerable':list(np.nan for i in range(0,32)),
'miles_personas_no_pobresy_no_vulnerableslist':list(np.nan for i in range(0,32))
}
# Creacion de diccionario con datos faltantes
df_2009 = pd.DataFrame(diccionario_anios_faltantes)
df_2009['anio'] = 2009
df_2011 = pd.DataFrame(diccionario_anios_faltantes)
df_2011['anio'] = 2011
df_2013 = pd.DataFrame(diccionario_anios_faltantes)
df_2013['anio'] = 2013
df_2015 = pd.DataFrame(diccionario_anios_faltantes)
df_2015['anio'] = 2015
df_2017 = pd.DataFrame(diccionario_anios_faltantes)
df_2017['anio'] = 2017
df_general = pd.DataFrame()
for i in range(2,len(list(df_2008.columns))):
#Recorrer para interpolacion
aux_2008 = df_2008.iloc[:,lambda df_2008:[i]]
aux_2009 = df_2009.iloc[:,lambda df_2008:[i]]
aux_2010 = df_2010.iloc[:,lambda df_2008:[i]]
aux_2011 = df_2011.iloc[:,lambda df_2008:[i]]
aux_2012 = df_2012.iloc[:,lambda df_2008:[i]]
aux_2013 = df_2013.iloc[:,lambda df_2008:[i]]
aux_2014 = df_2014.iloc[:,lambda df_2008:[i]]
aux_2015 = df_2015.iloc[:,lambda df_2008:[i]]
aux_2016 = df_2016.iloc[:,lambda df_2008:[i]]
aux_2017 = df_2017.iloc[:,lambda df_2008:[i]]
aux_2018 = df_2018.iloc[:,lambda df_2008:[i]]
df_general_aux = pd.concat([aux_2008,aux_2009,aux_2010,aux_2011,aux_2012,aux_2013,aux_2014,aux_2015,aux_2016,aux_2017,aux_2018],axis=1)
df_general_aux = df_general_aux.interpolate(method='linear',axis = 1).copy()
lista_nombre_columnas = [list(aux_2008.columns)[0]+"_"+str(df_2008['anio'][0]),
list(aux_2009.columns)[0]+"_"+str(df_2009['anio'][0]),list(aux_2010.columns)[0]+"_"+str(df_2010['anio'][0]),
list(aux_2011.columns)[0]+"_"+str(df_2011['anio'][0]),list(aux_2012.columns)[0]+"_"+str(df_2012['anio'][0]),
list(aux_2013.columns)[0]+"_"+str(df_2013['anio'][0]),list(aux_2014.columns)[0]+"_"+str(df_2014['anio'][0]),
list(aux_2015.columns)[0]+"_"+str(df_2015['anio'][0]),list(aux_2016.columns)[0]+"_"+str(df_2016['anio'][0]),
list(aux_2017.columns)[0]+"_"+str(df_2017['anio'][0]),list(aux_2018.columns)[0]+"_"+str(df_2018['anio'][0])]
df_general_aux.columns = lista_nombre_columnas
df_general=pd.concat([df_general,df_general_aux],axis=1)
df_general.head()
df_nombre = pd.concat([df_2008.iloc[:,[0]],df_2008.iloc[:,[0]]],axis = 1)
df_nombre.columns = ['Id','Estado']
df_nombre['Estado'] = df_nombre['Estado'].str.replace('_',' ')
df_nombre['Estado'] = df_nombre['Estado'].str.title()
df_nombre['Id'] = df_nombre['Estado']
diccionario_id = {
'Aguascalientes':1,
'Baja California':2,
'Baja California Sur':3,
'Campeche':4,
'Chiapas':5,
'Chihuahua':6,
'Ciudad De Mexico':7,
'Coahuila':8,
'Colima':9,
'Durango':10,
'Guanajuato':11,
'Guerrero':12,
'Hidalgo':13,
'Jalisco':14,
'Mexico':15,
'Michoacan':16,
'Morelos':17,
'Nayarit':18,
'Nuevo Leon':19,
'Oaxaca':20,
'Puebla':21,
'Queretaro':22,
'Quintana Roo':23,
'San Luis Potosi':24,
'Sinaloa':25,
'Sonora':26,
'Tabasco':27,
'Tamaulipas':28,
'Tlaxcala':29,
'Veracruz':30,
'Yucatan':31,
'Zacatecas':32
}
df_nombre['Id'] = df_nombre['Id'].replace(diccionario_id)
df_unido = pd.concat([df_nombre,df_general],axis=1)
df_unido['Estado'] = df_unido['Estado'].str.replace('Ciudad Mexico','Ciudad De Mexico')
df_unido['Id'] = df_unido['Id'].replace('Ciudad Mexico',7)
df_unido
df_final = df_unido.sort_values(by=['Id'])
df_final.to_csv("DataSet_Datos_Indice_C4B.csv", index = False)
df_final
###Output
_____no_output_____ |
apphub/anomaly_detection/alocc/alocc.ipynb | ###Markdown
Anomaly Detection with FastestimatorIn this notebook we will demonstrate how to do anomaly detection using one class classifier as described in [Adversarially Learned One-Class Classifier for Novelty Detection](https://arxiv.org/pdf/1802.09088.pdf). In real world, outliers or novelty class is often absent from the training dataset. Such problems can be efficiently modeled using one class classifiers.In the algorihm demonstrated below, two networks are trained to compete with each other where one network acts as a novelty detector and other enhaces the inliers and distorts the outliers. We use images of digit "1" from MNIST dataset for training and images of other digits as outliers.
###Code
import tempfile
import fastestimator as fe
import numpy as np
import tensorflow as tf
from fastestimator.backend import binary_crossentropy
from fastestimator.op.numpyop import LambdaOp
from fastestimator.op.numpyop.univariate import ExpandDims, Normalize
from fastestimator.op.tensorop import TensorOp
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
from fastestimator.trace import Trace
from fastestimator.trace.io import BestModelSaver
from fastestimator.util import to_number
from sklearn.metrics import auc, f1_score, roc_curve
from tensorflow.python.keras import layers
# Parameters
epochs=20
batch_size=128
train_steps_per_epoch=None
save_dir=tempfile.mkdtemp()
###Output
_____no_output_____
###Markdown
Building Components Downloading the dataFirst, we will download training images using tensorflow API. We will use images of digit `1` for training and test images of `1` as inliers and images of other digits as outliers. Outliers comprise 50% of our validation dataset.
###Code
(x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.mnist.load_data()
# Create Training Dataset
x_train, y_train = x_train[np.where((y_train == 1))], np.zeros(y_train[np.where((y_train == 1))].shape)
train_data = fe.dataset.NumpyDataset({"x": x_train, "y": y_train})
# Create Validation Dataset
x_eval0, y_eval0 = x_eval[np.where((y_eval == 1))], np.ones(y_eval[np.where((y_eval == 1))].shape)
x_eval1, y_eval1 = x_eval[np.where((y_eval != 1))], y_eval[np.where((y_eval != 1))]
# Ensuring outliers comprise 50% of the dataset
index = np.random.choice(x_eval1.shape[0], int(x_eval0.shape[0]), replace=False)
x_eval1, y_eval1 = x_eval1[index], np.zeros(y_eval1[index].shape)
x_eval, y_eval = np.concatenate([x_eval0, x_eval1]), np.concatenate([y_eval0, y_eval1])
eval_data = fe.dataset.NumpyDataset({"x": x_eval, "y": y_eval})
###Output
_____no_output_____
###Markdown
Step 1: Create `Pipeline`We will use the `LambdaOp` to add noise to the images during training.
###Code
pipeline = fe.Pipeline(
train_data=train_data,
eval_data=eval_data,
batch_size=batch_size,
ops=[
ExpandDims(inputs="x", outputs="x"),
Normalize(inputs="x", outputs="x", mean=1.0, std=1.0, max_pixel_value=127.5),
LambdaOp(fn=lambda x: x + np.random.normal(loc=0.0, scale=0.155, size=(28, 28, 1)),
inputs="x",
outputs="x_w_noise",
mode="train")
])
###Output
_____no_output_____
###Markdown
We can visualize sample images from our `Pipeline` using the 'get_results' method.
###Code
sample_batch = pipeline.get_results()
img = fe.util.ImgData(Image=sample_batch["x"][0].numpy().reshape(1, 28, 28, 1),
Noisy_Image=sample_batch["x_w_noise"][0].numpy().reshape(1, 28, 28, 1))
fig = img.paint_figure()
###Output
_____no_output_____
###Markdown
Step 2: Create `Network`The architecture of our model consists of an Autoencoder (ecoder-decoder) network and a Discriminator network.[Credit: https://arxiv.org/pdf/1802.09088.pdf]
###Code
def reconstructor(input_shape=(28, 28, 1)):
model = tf.keras.Sequential()
# Encoder Block
model.add(
layers.Conv2D(32, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
input_shape=input_shape))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
model.add(
layers.Conv2D(64, (5, 5),
strides=(2, 2),
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
model.add(
layers.Conv2D(128, (5, 5),
strides=(2, 2),
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
# Decoder Block
model.add(
layers.Conv2DTranspose(32, (5, 5),
strides=(2, 2),
output_padding=(0, 0),
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.02)))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(
layers.Conv2DTranspose(16, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.02)))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(
layers.Conv2DTranspose(1, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.02),
activation='tanh'))
return model
def discriminator(input_shape=(28, 28, 1)):
model = tf.keras.Sequential()
model.add(
layers.Conv2D(16, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
input_shape=input_shape))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
model.add(
layers.Conv2D(32, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
model.add(
layers.Conv2D(64, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
model.add(
layers.Conv2D(128, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)))
model.add(layers.LeakyReLU(0.2))
model.add(layers.Flatten())
model.add(layers.Dense(1, activation="sigmoid"))
return model
recon_model = fe.build(model_fn=reconstructor, optimizer_fn=lambda: tf.optimizers.RMSprop(2e-4), model_name="reconstructor")
disc_model = fe.build(model_fn=discriminator,
optimizer_fn=lambda: tf.optimizers.RMSprop(1e-4),
model_name="discriminator")
###Output
_____no_output_____
###Markdown
Defining LossThe losses of both the networks are smilar to a standarad GAN network with the exception of the autoencoder having and additional *reconstruction* loss term to enforce similarity between the input and the reconstructed image.We first define custom `TensorOp`s to calculate the losses of both the networks.
###Code
class RLoss(TensorOp):
def __init__(self, alpha=0.2, inputs=None, outputs=None, mode=None):
super().__init__(inputs, outputs, mode)
self.alpha = alpha
def forward(self, data, state):
fake_score, x_fake, x = data
recon_loss = binary_crossentropy(y_true=x, y_pred=x_fake, from_logits=True)
adv_loss = binary_crossentropy(y_pred=fake_score, y_true=tf.ones_like(fake_score), from_logits=True)
return adv_loss + self.alpha * recon_loss
class DLoss(TensorOp):
def forward(self, data, state):
true_score, fake_score = data
real_loss = binary_crossentropy(y_pred=true_score, y_true=tf.ones_like(true_score), from_logits=True)
fake_loss = binary_crossentropy(y_pred=fake_score, y_true=tf.zeros_like(fake_score), from_logits=True)
total_loss = real_loss + fake_loss
return total_loss
###Output
_____no_output_____
###Markdown
We now define the `Network` object:
###Code
network = fe.Network(ops=[
ModelOp(model=recon_model, inputs="x_w_noise", outputs="x_fake", mode="train"),
ModelOp(model=recon_model, inputs="x", outputs="x_fake", mode="eval"),
ModelOp(model=disc_model, inputs="x_fake", outputs="fake_score"),
ModelOp(model=disc_model, inputs="x", outputs="true_score"),
RLoss(inputs=("fake_score", "x_fake", "x"), outputs="rloss"),
UpdateOp(model=recon_model, loss_name="rloss"),
DLoss(inputs=("true_score", "fake_score"), outputs="dloss"),
UpdateOp(model=disc_model, loss_name="dloss")
])
###Output
_____no_output_____
###Markdown
In this example we will also use the following traces:1. BestModelSaver for saving the best model. For illustration purpose, we will save these models in a temporary directory.2. A custom trace to calculate Area Under the Curve and F1-Score.
###Code
class F1AUCScores(Trace):
"""Computes F1-Score and AUC Score for a classification task and reports it back to the logger.
"""
def __init__(self, true_key, pred_key, mode=("eval", "test"), output_name=["auc_score", "f1_score"]):
super().__init__(inputs=(true_key, pred_key), outputs=output_name, mode=mode)
self.y_true = []
self.y_pred = []
@property
def true_key(self):
return self.inputs[0]
@property
def pred_key(self):
return self.inputs[1]
def on_epoch_begin(self, data):
self.y_true = []
self.y_pred = []
def on_batch_end(self, data):
y_true, y_pred = to_number(data[self.true_key]), to_number(data[self.pred_key])
assert y_pred.size == y_true.size
self.y_pred.extend(y_pred.ravel())
self.y_true.extend(y_true.ravel())
def on_epoch_end(self, data):
fpr, tpr, thresholds = roc_curve(self.y_true, self.y_pred, pos_label=1)
roc_auc = auc(fpr, tpr)
eer_threshold = thresholds[np.nanargmin(np.absolute((1 - tpr - fpr)))]
y_pred_class = np.copy(self.y_pred)
y_pred_class[y_pred_class >= eer_threshold] = 1
y_pred_class[y_pred_class < eer_threshold] = 0
f_score = f1_score(self.y_true, y_pred_class)
data.write_with_log(self.outputs[0], roc_auc)
data.write_with_log(self.outputs[1], f_score)
traces = [
F1AUCScores(true_key="y", pred_key="fake_score", mode="eval", output_name=["auc_score", "f1_score"]),
BestModelSaver(model=recon_model, save_dir=save_dir, metric='f1_score', save_best_mode='max', load_best_final=True),
BestModelSaver(model=disc_model, save_dir=save_dir, metric='f1_score', save_best_mode='max', load_best_final=True)
]
###Output
_____no_output_____
###Markdown
Step 3: Create `Estimator`
###Code
estimator = fe.Estimator(pipeline=pipeline,
network=network,
epochs=epochs,
traces=traces,
train_steps_per_epoch=train_steps_per_epoch)
###Output
_____no_output_____
###Markdown
Training
###Code
estimator.fit()
###Output
______ __ ______ __ _ __
/ ____/___ ______/ /_/ ____/____/ /_(_)___ ___ ____ _/ /_____ _____
/ /_ / __ `/ ___/ __/ __/ / ___/ __/ / __ `__ \/ __ `/ __/ __ \/ ___/
/ __/ / /_/ (__ ) /_/ /___(__ ) /_/ / / / / / / /_/ / /_/ /_/ / /
/_/ \__,_/____/\__/_____/____/\__/_/_/ /_/ /_/\__,_/\__/\____/_/
FastEstimator-Start: step: 1; num_device: 1; logging_interval: 100;
FastEstimator-Train: step: 1; dloss: 1.4547124; rloss: 0.6044176;
FastEstimator-Train: step: 53; epoch: 1; epoch_time: 6.41 sec;
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/reconstructor_best_f1_score.h5
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/discriminator_best_f1_score.h5
FastEstimator-Eval: step: 53; epoch: 1; dloss: 1.4323395; rloss: 0.6304608; auc_score: 0.758243707426886; f1_score: 0.6554770318021201; since_best_f1_score: 0; max_f1_score: 0.6554770318021201;
FastEstimator-Train: step: 100; dloss: 1.0820444; rloss: 0.72240007; steps/sec: 17.02;
FastEstimator-Train: step: 106; epoch: 2; epoch_time: 2.1 sec;
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/reconstructor_best_f1_score.h5
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/discriminator_best_f1_score.h5
FastEstimator-Eval: step: 106; epoch: 2; dloss: 1.418492; rloss: 0.66902; auc_score: 0.8146631993634652; f1_score: 0.7268722466960352; since_best_f1_score: 0; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 159; epoch: 3; epoch_time: 2.1 sec;
FastEstimator-Eval: step: 159; epoch: 3; dloss: 1.4147544; rloss: 0.7020666; auc_score: 0.15153680451784432; f1_score: 0.2431718061674009; since_best_f1_score: 1; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 200; dloss: 1.0067211; rloss: 0.660446; steps/sec: 25.29;
FastEstimator-Train: step: 212; epoch: 4; epoch_time: 2.1 sec;
FastEstimator-Eval: step: 212; epoch: 4; dloss: 1.3572774; rloss: 0.7195197; auc_score: 0.019186089386559024; f1_score: 0.06696035242290749; since_best_f1_score: 2; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 265; epoch: 5; epoch_time: 2.1 sec;
FastEstimator-Eval: step: 265; epoch: 5; dloss: 1.1044953; rloss: 0.6936346; auc_score: 0.013531409497564477; f1_score: 0.0599647266313933; since_best_f1_score: 3; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 300; dloss: 1.0064102; rloss: 0.59952056; steps/sec: 25.26;
FastEstimator-Train: step: 318; epoch: 6; epoch_time: 2.1 sec;
FastEstimator-Eval: step: 318; epoch: 6; dloss: 1.0178115; rloss: 0.64911795; auc_score: 0.4008981350307594; f1_score: 0.4140969162995595; since_best_f1_score: 4; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 371; epoch: 7; epoch_time: 2.09 sec;
FastEstimator-Eval: step: 371; epoch: 7; dloss: 1.0237744; rloss: 0.6199698; auc_score: 0.37563857245434606; f1_score: 0.3857331571994716; since_best_f1_score: 5; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 400; dloss: 1.0064098; rloss: 0.5864141; steps/sec: 25.25;
FastEstimator-Train: step: 424; epoch: 8; epoch_time: 2.11 sec;
FastEstimator-Eval: step: 424; epoch: 8; dloss: 1.010116; rloss: 0.6061601; auc_score: 0.7393793786023405; f1_score: 0.6822183098591549; since_best_f1_score: 6; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 477; epoch: 9; epoch_time: 2.1 sec;
FastEstimator-Eval: step: 477; epoch: 9; dloss: 1.0113664; rloss: 0.6000464; auc_score: 0.790935589667954; f1_score: 0.7227112676056338; since_best_f1_score: 7; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 500; dloss: 1.0064089; rloss: 0.58218443; steps/sec: 25.19;
FastEstimator-Train: step: 530; epoch: 10; epoch_time: 2.11 sec;
FastEstimator-Eval: step: 530; epoch: 10; dloss: 1.0653309; rloss: 0.59754586; auc_score: 0.37388422053600884; f1_score: 0.39136183340678715; since_best_f1_score: 8; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 583; epoch: 11; epoch_time: 2.11 sec;
FastEstimator-Eval: step: 583; epoch: 11; dloss: 1.0164112; rloss: 0.5950321; auc_score: 0.5598245648081662; f1_score: 0.5395842547545334; since_best_f1_score: 9; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 600; dloss: 1.006409; rloss: 0.5809828; steps/sec: 25.18;
FastEstimator-Train: step: 636; epoch: 12; epoch_time: 2.11 sec;
FastEstimator-Eval: step: 636; epoch: 12; dloss: 1.0633637; rloss: 0.5527591; auc_score: 0.7476892623571193; f1_score: 0.678996036988111; since_best_f1_score: 10; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 689; epoch: 13; epoch_time: 2.1 sec;
FastEstimator-Eval: step: 689; epoch: 13; dloss: 1.2391297; rloss: 0.6055295; auc_score: 0.2452661608026548; f1_score: 0.2874779541446208; since_best_f1_score: 11; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 700; dloss: 1.0498809; rloss: 0.57184714; steps/sec: 25.21;
FastEstimator-Train: step: 742; epoch: 14; epoch_time: 2.1 sec;
FastEstimator-Eval: step: 742; epoch: 14; dloss: 1.1737943; rloss: 0.60413545; auc_score: 0.4565204059849794; f1_score: 0.4269960299955889; since_best_f1_score: 12; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 795; epoch: 15; epoch_time: 2.11 sec;
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/reconstructor_best_f1_score.h5
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/discriminator_best_f1_score.h5
FastEstimator-Eval: step: 795; epoch: 15; dloss: 1.0669132; rloss: 0.60187024; auc_score: 0.9049614780026781; f1_score: 0.8089788732394366; since_best_f1_score: 0; max_f1_score: 0.8089788732394366;
FastEstimator-Train: step: 800; dloss: 1.08307; rloss: 0.5877172; steps/sec: 25.13;
FastEstimator-Train: step: 848; epoch: 16; epoch_time: 2.11 sec;
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/reconstructor_best_f1_score.h5
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/discriminator_best_f1_score.h5
FastEstimator-Eval: step: 848; epoch: 16; dloss: 1.3530512; rloss: 0.6081449; auc_score: 0.941642958334142; f1_score: 0.8656979304271246; since_best_f1_score: 0; max_f1_score: 0.8656979304271246;
FastEstimator-Train: step: 900; dloss: 1.3662006; rloss: 0.58985895; steps/sec: 25.06;
FastEstimator-Train: step: 901; epoch: 17; epoch_time: 2.14 sec;
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/reconstructor_best_f1_score.h5
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/discriminator_best_f1_score.h5
FastEstimator-Eval: step: 901; epoch: 17; dloss: 1.227813; rloss: 0.5782926; auc_score: 0.9882008189563158; f1_score: 0.948526176858777; since_best_f1_score: 0; max_f1_score: 0.948526176858777;
FastEstimator-Train: step: 954; epoch: 18; epoch_time: 2.11 sec;
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/reconstructor_best_f1_score.h5
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/discriminator_best_f1_score.h5
FastEstimator-Eval: step: 954; epoch: 18; dloss: 1.328288; rloss: 0.5912411; auc_score: 0.9875805856896116; f1_score: 0.9559471365638766; since_best_f1_score: 0; max_f1_score: 0.9559471365638766;
FastEstimator-Train: step: 1000; dloss: 1.2895771; rloss: 0.53317624; steps/sec: 24.93;
FastEstimator-Train: step: 1007; epoch: 19; epoch_time: 2.11 sec;
FastEstimator-Eval: step: 1007; epoch: 19; dloss: 1.3832934; rloss: 0.6033389; auc_score: 0.9015963826194958; f1_score: 0.813215859030837; since_best_f1_score: 1; max_f1_score: 0.9559471365638766;
FastEstimator-Train: step: 1060; epoch: 20; epoch_time: 2.11 sec;
FastEstimator-Eval: step: 1060; epoch: 20; dloss: 1.3230872; rloss: 0.5129401; auc_score: 0.4634978361699238; f1_score: 0.48722466960352423; since_best_f1_score: 2; max_f1_score: 0.9559471365638766;
FastEstimator-BestModelSaver: Restoring model from /tmp/tmpf8gmdf9j/reconstructor_best_f1_score.h5
FastEstimator-BestModelSaver: Restoring model from /tmp/tmpf8gmdf9j/discriminator_best_f1_score.h5
FastEstimator-Finish: step: 1060; total_time: 62.06 sec; discriminator_lr: 1e-04; reconstructor_lr: 0.0002;
###Markdown
InferencingOnce the training is finished, we will apply the model to visualize the reconstructed image of the inliers and outliers.
###Code
idx0 = np.random.randint(len(x_eval0))
idx1 = np.random.randint(len(x_eval1))
data = [{"x": x_eval0[idx0]}, {"x": x_eval1[idx1]}]
result = [pipeline.transform(data[i], mode="infer") for i in range(len(data))]
network = fe.Network(ops=[
ModelOp(model=recon_model, inputs="x", outputs="x_fake"),
ModelOp(model=disc_model, inputs="x_fake", outputs="fake_score")
])
output_imgs = [network.transform(result[i], mode="infer") for i in range(len(result))]
base_image = output_imgs[0]["x"].numpy()
anomaly_image = output_imgs[1]["x"].numpy()
recon_base_image = output_imgs[0]["x_fake"].numpy()
recon_anomaly_image = output_imgs[1]["x_fake"].numpy()
img1 = fe.util.ImgData(Input_Image=base_image, Reconstructed_Image=recon_base_image)
fig1 = img1.paint_figure()
img2 = fe.util.ImgData(Input_Image=anomaly_image, Reconstructed_Image=recon_anomaly_image)
fig2 = img2.paint_figure()
###Output
_____no_output_____
###Markdown
Anomaly Detection with FastestimatorIn this notebook we will demonstrate how to do anomaly detection using one class classifier as described in [Adversarially Learned One-Class Classifier for Novelty Detection](https://arxiv.org/pdf/1802.09088.pdf). In real world, outliers or novelty class is often absent from the training dataset. Such problems can be efficiently modeled using one class classifiers.In the algorihm demonstrated below, two networks are trained to compete with each other where one network acts as a novelty detector and other enhaces the inliers and distorts the outliers. We use images of digit "1" from MNIST dataset for training and images of other digits as outliers.
###Code
import tempfile
import fastestimator as fe
import numpy as np
import tensorflow as tf
from fastestimator.backend import binary_crossentropy
from fastestimator.op.numpyop import LambdaOp
from fastestimator.op.numpyop.univariate import ExpandDims, Normalize
from fastestimator.op.tensorop import TensorOp
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
from fastestimator.trace import Trace
from fastestimator.trace.io import BestModelSaver
from fastestimator.util import to_number
from sklearn.metrics import auc, f1_score, roc_curve
from tensorflow.keras import layers
# Parameters
epochs=20
batch_size=128
train_steps_per_epoch=None
save_dir=tempfile.mkdtemp()
###Output
_____no_output_____
###Markdown
Building Components Downloading the dataFirst, we will download training images using tensorflow API. We will use images of digit `1` for training and test images of `1` as inliers and images of other digits as outliers. Outliers comprise 50% of our validation dataset.
###Code
(x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.mnist.load_data()
# Create Training Dataset
x_train, y_train = x_train[np.where((y_train == 1))], np.zeros(y_train[np.where((y_train == 1))].shape)
train_data = fe.dataset.NumpyDataset({"x": x_train, "y": y_train})
# Create Validation Dataset
x_eval0, y_eval0 = x_eval[np.where((y_eval == 1))], np.ones(y_eval[np.where((y_eval == 1))].shape)
x_eval1, y_eval1 = x_eval[np.where((y_eval != 1))], y_eval[np.where((y_eval != 1))]
# Ensuring outliers comprise 50% of the dataset
index = np.random.choice(x_eval1.shape[0], int(x_eval0.shape[0]), replace=False)
x_eval1, y_eval1 = x_eval1[index], np.zeros(y_eval1[index].shape)
x_eval, y_eval = np.concatenate([x_eval0, x_eval1]), np.concatenate([y_eval0, y_eval1])
eval_data = fe.dataset.NumpyDataset({"x": x_eval, "y": y_eval})
###Output
_____no_output_____
###Markdown
Step 1: Create `Pipeline`We will use the `LambdaOp` to add noise to the images during training.
###Code
pipeline = fe.Pipeline(
train_data=train_data,
eval_data=eval_data,
batch_size=batch_size,
ops=[
ExpandDims(inputs="x", outputs="x"),
Normalize(inputs="x", outputs="x", mean=1.0, std=1.0, max_pixel_value=127.5),
LambdaOp(fn=lambda x: x + np.random.normal(loc=0.0, scale=0.155, size=(28, 28, 1)),
inputs="x",
outputs="x_w_noise",
mode="train")
])
###Output
_____no_output_____
###Markdown
We can visualize sample images from our `Pipeline` using the 'get_results' method.
###Code
sample_batch = pipeline.get_results()
img = fe.util.ImgData(Image=sample_batch["x"][0].numpy().reshape(1, 28, 28, 1),
Noisy_Image=sample_batch["x_w_noise"][0].numpy().reshape(1, 28, 28, 1))
fig = img.paint_figure()
###Output
_____no_output_____
###Markdown
Step 2: Create `Network`The architecture of our model consists of an Autoencoder (ecoder-decoder) network and a Discriminator network.[Credit: https://arxiv.org/pdf/1802.09088.pdf]
###Code
def reconstructor(input_shape=(28, 28, 1)):
model = tf.keras.Sequential()
# Encoder Block
model.add(
layers.Conv2D(32, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
input_shape=input_shape))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
model.add(
layers.Conv2D(64, (5, 5),
strides=(2, 2),
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
model.add(
layers.Conv2D(128, (5, 5),
strides=(2, 2),
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
# Decoder Block
model.add(
layers.Conv2DTranspose(32, (5, 5),
strides=(2, 2),
output_padding=(0, 0),
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.02)))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(
layers.Conv2DTranspose(16, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.02)))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(
layers.Conv2DTranspose(1, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.02),
activation='tanh'))
return model
def discriminator(input_shape=(28, 28, 1)):
model = tf.keras.Sequential()
model.add(
layers.Conv2D(16, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
input_shape=input_shape))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
model.add(
layers.Conv2D(32, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
model.add(
layers.Conv2D(64, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
model.add(
layers.Conv2D(128, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)))
model.add(layers.LeakyReLU(0.2))
model.add(layers.Flatten())
model.add(layers.Dense(1, activation="sigmoid"))
return model
recon_model = fe.build(model_fn=reconstructor, optimizer_fn=lambda: tf.optimizers.RMSprop(2e-4), model_name="reconstructor")
disc_model = fe.build(model_fn=discriminator,
optimizer_fn=lambda: tf.optimizers.RMSprop(1e-4),
model_name="discriminator")
###Output
2022-02-22 16:37:30.450543: I tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.cc:305] Could not identify NUMA node of platform GPU ID 0, defaulting to 0. Your kernel may not have been built with NUMA support.
2022-02-22 16:37:30.450670: I tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.cc:271] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 0 MB memory) -> physical PluggableDevice (device: 0, name: METAL, pci bus id: <undefined>)
###Markdown
Defining LossThe losses of both the networks are smilar to a standarad GAN network with the exception of the autoencoder having and additional *reconstruction* loss term to enforce similarity between the input and the reconstructed image.We first define custom `TensorOp`s to calculate the losses of both the networks.
###Code
class RLoss(TensorOp):
def __init__(self, alpha=0.2, inputs=None, outputs=None, mode=None):
super().__init__(inputs, outputs, mode)
self.alpha = alpha
def forward(self, data, state):
fake_score, x_fake, x = data
recon_loss = binary_crossentropy(y_true=x, y_pred=x_fake, from_logits=True)
adv_loss = binary_crossentropy(y_pred=fake_score, y_true=tf.ones_like(fake_score), from_logits=True)
return adv_loss + self.alpha * recon_loss
class DLoss(TensorOp):
def forward(self, data, state):
true_score, fake_score = data
real_loss = binary_crossentropy(y_pred=true_score, y_true=tf.ones_like(true_score), from_logits=True)
fake_loss = binary_crossentropy(y_pred=fake_score, y_true=tf.zeros_like(fake_score), from_logits=True)
total_loss = real_loss + fake_loss
return total_loss
###Output
_____no_output_____
###Markdown
We now define the `Network` object:
###Code
network = fe.Network(ops=[
ModelOp(model=recon_model, inputs="x_w_noise", outputs="x_fake", mode="train"),
ModelOp(model=recon_model, inputs="x", outputs="x_fake", mode="eval"),
ModelOp(model=disc_model, inputs="x_fake", outputs="fake_score"),
ModelOp(model=disc_model, inputs="x", outputs="true_score"),
RLoss(inputs=("fake_score", "x_fake", "x"), outputs="rloss"),
UpdateOp(model=recon_model, loss_name="rloss"),
DLoss(inputs=("true_score", "fake_score"), outputs="dloss"),
UpdateOp(model=disc_model, loss_name="dloss")
])
###Output
2022-02-22 16:37:34.773678: W tensorflow/python/util/util.cc:368] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.
###Markdown
In this example we will also use the following traces:1. BestModelSaver for saving the best model. For illustration purpose, we will save these models in a temporary directory.2. A custom trace to calculate Area Under the Curve and F1-Score.
###Code
class F1AUCScores(Trace):
"""Computes F1-Score and AUC Score for a classification task and reports it back to the logger.
"""
def __init__(self, true_key, pred_key, mode=("eval", "test"), output_name=["auc_score", "f1_score"]):
super().__init__(inputs=(true_key, pred_key), outputs=output_name, mode=mode)
self.y_true = []
self.y_pred = []
@property
def true_key(self):
return self.inputs[0]
@property
def pred_key(self):
return self.inputs[1]
def on_epoch_begin(self, data):
self.y_true = []
self.y_pred = []
def on_batch_end(self, data):
y_true, y_pred = to_number(data[self.true_key]), to_number(data[self.pred_key])
assert y_pred.size == y_true.size
self.y_pred.extend(y_pred.ravel())
self.y_true.extend(y_true.ravel())
def on_epoch_end(self, data):
fpr, tpr, thresholds = roc_curve(self.y_true, self.y_pred, pos_label=1)
roc_auc = auc(fpr, tpr)
eer_threshold = thresholds[np.nanargmin(np.absolute((1 - tpr - fpr)))]
y_pred_class = np.copy(self.y_pred)
y_pred_class[y_pred_class >= eer_threshold] = 1
y_pred_class[y_pred_class < eer_threshold] = 0
f_score = f1_score(self.y_true, y_pred_class)
data.write_with_log(self.outputs[0], roc_auc)
data.write_with_log(self.outputs[1], f_score)
traces = [
F1AUCScores(true_key="y", pred_key="fake_score", mode="eval", output_name=["auc_score", "f1_score"]),
BestModelSaver(model=recon_model, save_dir=save_dir, metric='f1_score', save_best_mode='max', load_best_final=True),
BestModelSaver(model=disc_model, save_dir=save_dir, metric='f1_score', save_best_mode='max', load_best_final=True)
]
###Output
_____no_output_____
###Markdown
Step 3: Create `Estimator`
###Code
estimator = fe.Estimator(pipeline=pipeline,
network=network,
epochs=epochs,
traces=traces,
train_steps_per_epoch=train_steps_per_epoch)
###Output
_____no_output_____
###Markdown
Training
###Code
estimator.fit()
###Output
______ __ ______ __ _ __
/ ____/___ ______/ /_/ ____/____/ /_(_)___ ___ ____ _/ /_____ _____
/ /_ / __ `/ ___/ __/ __/ / ___/ __/ / __ `__ \/ __ `/ __/ __ \/ ___/
/ __/ / /_/ (__ ) /_/ /___(__ ) /_/ / / / / / / /_/ / /_/ /_/ / /
/_/ \__,_/____/\__/_____/____/\__/_/_/ /_/ /_/\__,_/\__/\____/_/
###Markdown
InferencingOnce the training is finished, we will apply the model to visualize the reconstructed image of the inliers and outliers.
###Code
idx0 = np.random.randint(len(x_eval0))
idx1 = np.random.randint(len(x_eval1))
data = [{"x": x_eval0[idx0]}, {"x": x_eval1[idx1]}]
result = [pipeline.transform(data[i], mode="infer") for i in range(len(data))]
network = fe.Network(ops=[
ModelOp(model=recon_model, inputs="x", outputs="x_fake"),
ModelOp(model=disc_model, inputs="x_fake", outputs="fake_score")
])
output_imgs = [network.transform(result[i], mode="infer") for i in range(len(result))]
base_image = output_imgs[0]["x"].numpy()
anomaly_image = output_imgs[1]["x"].numpy()
recon_base_image = output_imgs[0]["x_fake"].numpy()
recon_anomaly_image = output_imgs[1]["x_fake"].numpy()
img1 = fe.util.ImgData(Input_Image=base_image, Reconstructed_Image=recon_base_image)
fig1 = img1.paint_figure()
img2 = fe.util.ImgData(Input_Image=anomaly_image, Reconstructed_Image=recon_anomaly_image)
fig2 = img2.paint_figure()
###Output
_____no_output_____
###Markdown
Anomaly Detection with FastestimatorIn this notebook we will demonstrate how to do anomaly detection using one class classifier as described in [Adversarially Learned One-Class Classifier for Novelty Detection](https://arxiv.org/pdf/1802.09088.pdf). In real world, outliers or novelty class is often absent from the training dataset. Such problems can be efficiently modeled using one class classifiers.In the algorihm demonstrated below, two networks are trained to compete with each other where one network acts as a novelty detector and other enhaces the inliers and distorts the outliers. We use images of digit "1" from MNIST dataset for training and images of other digits as outliers.
###Code
import tempfile
import fastestimator as fe
import numpy as np
import tensorflow as tf
from fastestimator.backend import binary_crossentropy
from fastestimator.op.numpyop import LambdaOp
from fastestimator.op.numpyop.univariate import ExpandDims, Normalize
from fastestimator.op.tensorop import TensorOp
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
from fastestimator.trace import Trace
from fastestimator.trace.io import BestModelSaver
from fastestimator.util import to_number
from sklearn.metrics import auc, f1_score, roc_curve
from tensorflow.python.keras import layers
# Parameters
epochs=20
batch_size=128
max_train_steps_per_epoch=None
save_dir=tempfile.mkdtemp()
###Output
_____no_output_____
###Markdown
Building Components Downloading the dataFirst, we will download training images using tensorflow API. We will use images of digit `1` for training and test images of `1` as inliers and images of other digits as outliers. Outliers comprise 50% of our validation dataset.
###Code
(x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.mnist.load_data()
# Create Training Dataset
x_train, y_train = x_train[np.where((y_train == 1))], np.zeros(y_train[np.where((y_train == 1))].shape)
train_data = fe.dataset.NumpyDataset({"x": x_train, "y": y_train})
# Create Validation Dataset
x_eval0, y_eval0 = x_eval[np.where((y_eval == 1))], np.ones(y_eval[np.where((y_eval == 1))].shape)
x_eval1, y_eval1 = x_eval[np.where((y_eval != 1))], y_eval[np.where((y_eval != 1))]
# Ensuring outliers comprise 50% of the dataset
index = np.random.choice(x_eval1.shape[0], int(x_eval0.shape[0]), replace=False)
x_eval1, y_eval1 = x_eval1[index], np.zeros(y_eval1[index].shape)
x_eval, y_eval = np.concatenate([x_eval0, x_eval1]), np.concatenate([y_eval0, y_eval1])
eval_data = fe.dataset.NumpyDataset({"x": x_eval, "y": y_eval})
###Output
_____no_output_____
###Markdown
Step 1: Create `Pipeline`We will use the `LambdaOp` to add noise to the images during training.
###Code
pipeline = fe.Pipeline(
train_data=train_data,
eval_data=eval_data,
batch_size=batch_size,
ops=[
ExpandDims(inputs="x", outputs="x"),
Normalize(inputs="x", outputs="x", mean=1.0, std=1.0, max_pixel_value=127.5),
LambdaOp(fn=lambda x: x + np.random.normal(loc=0.0, scale=0.155, size=(28, 28, 1)),
inputs="x",
outputs="x_w_noise",
mode="train")
])
###Output
_____no_output_____
###Markdown
We can visualize sample images from our `Pipeline` using the 'get_results' method.
###Code
sample_batch = pipeline.get_results()
img = fe.util.ImgData(Image=sample_batch["x"][0].numpy().reshape(1, 28, 28, 1),
Noisy_Image=sample_batch["x_w_noise"][0].numpy().reshape(1, 28, 28, 1))
fig = img.paint_figure()
###Output
_____no_output_____
###Markdown
Step 2: Create `Network`The architecture of our model consists of an Autoencoder (ecoder-decoder) network and a Discriminator network.[Credit: https://arxiv.org/pdf/1802.09088.pdf]
###Code
def reconstructor(input_shape=(28, 28, 1)):
model = tf.keras.Sequential()
# Encoder Block
model.add(
layers.Conv2D(32, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
input_shape=input_shape))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
model.add(
layers.Conv2D(64, (5, 5),
strides=(2, 2),
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
model.add(
layers.Conv2D(128, (5, 5),
strides=(2, 2),
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
# Decoder Block
model.add(
layers.Conv2DTranspose(32, (5, 5),
strides=(2, 2),
output_padding=(0, 0),
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.02)))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(
layers.Conv2DTranspose(16, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.02)))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(
layers.Conv2DTranspose(1, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.02),
activation='tanh'))
return model
def discriminator(input_shape=(28, 28, 1)):
model = tf.keras.Sequential()
model.add(
layers.Conv2D(16, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
input_shape=input_shape))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
model.add(
layers.Conv2D(32, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
model.add(
layers.Conv2D(64, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(0.2))
model.add(
layers.Conv2D(128, (5, 5),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)))
model.add(layers.LeakyReLU(0.2))
model.add(layers.Flatten())
model.add(layers.Dense(1, activation="sigmoid"))
return model
recon_model = fe.build(model_fn=reconstructor, optimizer_fn=lambda: tf.optimizers.RMSprop(2e-4), model_name="reconstructor")
disc_model = fe.build(model_fn=discriminator,
optimizer_fn=lambda: tf.optimizers.RMSprop(1e-4),
model_name="discriminator")
###Output
_____no_output_____
###Markdown
Defining LossThe losses of both the networks are smilar to a standarad GAN network with the exception of the autoencoder having and additional *reconstruction* loss term to enforce similarity between the input and the reconstructed image.We first define custom `TensorOp`s to calculate the losses of both the networks.
###Code
class RLoss(TensorOp):
def __init__(self, alpha=0.2, inputs=None, outputs=None, mode=None):
super().__init__(inputs, outputs, mode)
self.alpha = alpha
def forward(self, data, state):
fake_score, x_fake, x = data
recon_loss = binary_crossentropy(y_true=x, y_pred=x_fake, from_logits=True)
adv_loss = binary_crossentropy(y_pred=fake_score, y_true=tf.ones_like(fake_score), from_logits=True)
return adv_loss + self.alpha * recon_loss
class DLoss(TensorOp):
def forward(self, data, state):
true_score, fake_score = data
real_loss = binary_crossentropy(y_pred=true_score, y_true=tf.ones_like(true_score), from_logits=True)
fake_loss = binary_crossentropy(y_pred=fake_score, y_true=tf.zeros_like(fake_score), from_logits=True)
total_loss = real_loss + fake_loss
return total_loss
###Output
_____no_output_____
###Markdown
We now define the `Network` object:
###Code
network = fe.Network(ops=[
ModelOp(model=recon_model, inputs="x_w_noise", outputs="x_fake", mode="train"),
ModelOp(model=recon_model, inputs="x", outputs="x_fake", mode="eval"),
ModelOp(model=disc_model, inputs="x_fake", outputs="fake_score"),
ModelOp(model=disc_model, inputs="x", outputs="true_score"),
RLoss(inputs=("fake_score", "x_fake", "x"), outputs="rloss"),
UpdateOp(model=recon_model, loss_name="rloss"),
DLoss(inputs=("true_score", "fake_score"), outputs="dloss"),
UpdateOp(model=disc_model, loss_name="dloss")
])
###Output
_____no_output_____
###Markdown
In this example we will also use the following traces:1. BestModelSaver for saving the best model. For illustration purpose, we will save these models in a temporary directory.2. A custom trace to calculate Area Under the Curve and F1-Score.
###Code
class F1AUCScores(Trace):
"""Computes F1-Score and AUC Score for a classification task and reports it back to the logger.
"""
def __init__(self, true_key, pred_key, mode=("eval", "test"), output_name=["auc_score", "f1_score"]):
super().__init__(inputs=(true_key, pred_key), outputs=output_name, mode=mode)
self.y_true = []
self.y_pred = []
@property
def true_key(self):
return self.inputs[0]
@property
def pred_key(self):
return self.inputs[1]
def on_epoch_begin(self, data):
self.y_true = []
self.y_pred = []
def on_batch_end(self, data):
y_true, y_pred = to_number(data[self.true_key]), to_number(data[self.pred_key])
assert y_pred.size == y_true.size
self.y_pred.extend(y_pred.ravel())
self.y_true.extend(y_true.ravel())
def on_epoch_end(self, data):
fpr, tpr, thresholds = roc_curve(self.y_true, self.y_pred, pos_label=1)
roc_auc = auc(fpr, tpr)
eer_threshold = thresholds[np.nanargmin(np.absolute((1 - tpr - fpr)))]
y_pred_class = np.copy(self.y_pred)
y_pred_class[y_pred_class >= eer_threshold] = 1
y_pred_class[y_pred_class < eer_threshold] = 0
f_score = f1_score(self.y_true, y_pred_class)
data.write_with_log(self.outputs[0], roc_auc)
data.write_with_log(self.outputs[1], f_score)
traces = [
F1AUCScores(true_key="y", pred_key="fake_score", mode="eval", output_name=["auc_score", "f1_score"]),
BestModelSaver(model=recon_model, save_dir=save_dir, metric='f1_score', save_best_mode='max', load_best_final=True),
BestModelSaver(model=disc_model, save_dir=save_dir, metric='f1_score', save_best_mode='max', load_best_final=True)
]
###Output
_____no_output_____
###Markdown
Step 3: Create `Estimator`
###Code
estimator = fe.Estimator(pipeline=pipeline,
network=network,
epochs=epochs,
traces=traces,
max_train_steps_per_epoch=max_train_steps_per_epoch)
###Output
_____no_output_____
###Markdown
Training
###Code
estimator.fit()
###Output
______ __ ______ __ _ __
/ ____/___ ______/ /_/ ____/____/ /_(_)___ ___ ____ _/ /_____ _____
/ /_ / __ `/ ___/ __/ __/ / ___/ __/ / __ `__ \/ __ `/ __/ __ \/ ___/
/ __/ / /_/ (__ ) /_/ /___(__ ) /_/ / / / / / / /_/ / /_/ /_/ / /
/_/ \__,_/____/\__/_____/____/\__/_/_/ /_/ /_/\__,_/\__/\____/_/
FastEstimator-Start: step: 1; num_device: 1; logging_interval: 100;
FastEstimator-Train: step: 1; dloss: 1.4547124; rloss: 0.6044176;
FastEstimator-Train: step: 53; epoch: 1; epoch_time: 6.41 sec;
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/reconstructor_best_f1_score.h5
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/discriminator_best_f1_score.h5
FastEstimator-Eval: step: 53; epoch: 1; dloss: 1.4323395; rloss: 0.6304608; auc_score: 0.758243707426886; f1_score: 0.6554770318021201; since_best_f1_score: 0; max_f1_score: 0.6554770318021201;
FastEstimator-Train: step: 100; dloss: 1.0820444; rloss: 0.72240007; steps/sec: 17.02;
FastEstimator-Train: step: 106; epoch: 2; epoch_time: 2.1 sec;
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/reconstructor_best_f1_score.h5
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/discriminator_best_f1_score.h5
FastEstimator-Eval: step: 106; epoch: 2; dloss: 1.418492; rloss: 0.66902; auc_score: 0.8146631993634652; f1_score: 0.7268722466960352; since_best_f1_score: 0; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 159; epoch: 3; epoch_time: 2.1 sec;
FastEstimator-Eval: step: 159; epoch: 3; dloss: 1.4147544; rloss: 0.7020666; auc_score: 0.15153680451784432; f1_score: 0.2431718061674009; since_best_f1_score: 1; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 200; dloss: 1.0067211; rloss: 0.660446; steps/sec: 25.29;
FastEstimator-Train: step: 212; epoch: 4; epoch_time: 2.1 sec;
FastEstimator-Eval: step: 212; epoch: 4; dloss: 1.3572774; rloss: 0.7195197; auc_score: 0.019186089386559024; f1_score: 0.06696035242290749; since_best_f1_score: 2; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 265; epoch: 5; epoch_time: 2.1 sec;
FastEstimator-Eval: step: 265; epoch: 5; dloss: 1.1044953; rloss: 0.6936346; auc_score: 0.013531409497564477; f1_score: 0.0599647266313933; since_best_f1_score: 3; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 300; dloss: 1.0064102; rloss: 0.59952056; steps/sec: 25.26;
FastEstimator-Train: step: 318; epoch: 6; epoch_time: 2.1 sec;
FastEstimator-Eval: step: 318; epoch: 6; dloss: 1.0178115; rloss: 0.64911795; auc_score: 0.4008981350307594; f1_score: 0.4140969162995595; since_best_f1_score: 4; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 371; epoch: 7; epoch_time: 2.09 sec;
FastEstimator-Eval: step: 371; epoch: 7; dloss: 1.0237744; rloss: 0.6199698; auc_score: 0.37563857245434606; f1_score: 0.3857331571994716; since_best_f1_score: 5; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 400; dloss: 1.0064098; rloss: 0.5864141; steps/sec: 25.25;
FastEstimator-Train: step: 424; epoch: 8; epoch_time: 2.11 sec;
FastEstimator-Eval: step: 424; epoch: 8; dloss: 1.010116; rloss: 0.6061601; auc_score: 0.7393793786023405; f1_score: 0.6822183098591549; since_best_f1_score: 6; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 477; epoch: 9; epoch_time: 2.1 sec;
FastEstimator-Eval: step: 477; epoch: 9; dloss: 1.0113664; rloss: 0.6000464; auc_score: 0.790935589667954; f1_score: 0.7227112676056338; since_best_f1_score: 7; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 500; dloss: 1.0064089; rloss: 0.58218443; steps/sec: 25.19;
FastEstimator-Train: step: 530; epoch: 10; epoch_time: 2.11 sec;
FastEstimator-Eval: step: 530; epoch: 10; dloss: 1.0653309; rloss: 0.59754586; auc_score: 0.37388422053600884; f1_score: 0.39136183340678715; since_best_f1_score: 8; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 583; epoch: 11; epoch_time: 2.11 sec;
FastEstimator-Eval: step: 583; epoch: 11; dloss: 1.0164112; rloss: 0.5950321; auc_score: 0.5598245648081662; f1_score: 0.5395842547545334; since_best_f1_score: 9; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 600; dloss: 1.006409; rloss: 0.5809828; steps/sec: 25.18;
FastEstimator-Train: step: 636; epoch: 12; epoch_time: 2.11 sec;
FastEstimator-Eval: step: 636; epoch: 12; dloss: 1.0633637; rloss: 0.5527591; auc_score: 0.7476892623571193; f1_score: 0.678996036988111; since_best_f1_score: 10; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 689; epoch: 13; epoch_time: 2.1 sec;
FastEstimator-Eval: step: 689; epoch: 13; dloss: 1.2391297; rloss: 0.6055295; auc_score: 0.2452661608026548; f1_score: 0.2874779541446208; since_best_f1_score: 11; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 700; dloss: 1.0498809; rloss: 0.57184714; steps/sec: 25.21;
FastEstimator-Train: step: 742; epoch: 14; epoch_time: 2.1 sec;
FastEstimator-Eval: step: 742; epoch: 14; dloss: 1.1737943; rloss: 0.60413545; auc_score: 0.4565204059849794; f1_score: 0.4269960299955889; since_best_f1_score: 12; max_f1_score: 0.7268722466960352;
FastEstimator-Train: step: 795; epoch: 15; epoch_time: 2.11 sec;
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/reconstructor_best_f1_score.h5
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/discriminator_best_f1_score.h5
FastEstimator-Eval: step: 795; epoch: 15; dloss: 1.0669132; rloss: 0.60187024; auc_score: 0.9049614780026781; f1_score: 0.8089788732394366; since_best_f1_score: 0; max_f1_score: 0.8089788732394366;
FastEstimator-Train: step: 800; dloss: 1.08307; rloss: 0.5877172; steps/sec: 25.13;
FastEstimator-Train: step: 848; epoch: 16; epoch_time: 2.11 sec;
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/reconstructor_best_f1_score.h5
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/discriminator_best_f1_score.h5
FastEstimator-Eval: step: 848; epoch: 16; dloss: 1.3530512; rloss: 0.6081449; auc_score: 0.941642958334142; f1_score: 0.8656979304271246; since_best_f1_score: 0; max_f1_score: 0.8656979304271246;
FastEstimator-Train: step: 900; dloss: 1.3662006; rloss: 0.58985895; steps/sec: 25.06;
FastEstimator-Train: step: 901; epoch: 17; epoch_time: 2.14 sec;
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/reconstructor_best_f1_score.h5
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/discriminator_best_f1_score.h5
FastEstimator-Eval: step: 901; epoch: 17; dloss: 1.227813; rloss: 0.5782926; auc_score: 0.9882008189563158; f1_score: 0.948526176858777; since_best_f1_score: 0; max_f1_score: 0.948526176858777;
FastEstimator-Train: step: 954; epoch: 18; epoch_time: 2.11 sec;
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/reconstructor_best_f1_score.h5
FastEstimator-BestModelSaver: Saved model to /tmp/tmpf8gmdf9j/discriminator_best_f1_score.h5
FastEstimator-Eval: step: 954; epoch: 18; dloss: 1.328288; rloss: 0.5912411; auc_score: 0.9875805856896116; f1_score: 0.9559471365638766; since_best_f1_score: 0; max_f1_score: 0.9559471365638766;
FastEstimator-Train: step: 1000; dloss: 1.2895771; rloss: 0.53317624; steps/sec: 24.93;
FastEstimator-Train: step: 1007; epoch: 19; epoch_time: 2.11 sec;
FastEstimator-Eval: step: 1007; epoch: 19; dloss: 1.3832934; rloss: 0.6033389; auc_score: 0.9015963826194958; f1_score: 0.813215859030837; since_best_f1_score: 1; max_f1_score: 0.9559471365638766;
FastEstimator-Train: step: 1060; epoch: 20; epoch_time: 2.11 sec;
FastEstimator-Eval: step: 1060; epoch: 20; dloss: 1.3230872; rloss: 0.5129401; auc_score: 0.4634978361699238; f1_score: 0.48722466960352423; since_best_f1_score: 2; max_f1_score: 0.9559471365638766;
FastEstimator-BestModelSaver: Restoring model from /tmp/tmpf8gmdf9j/reconstructor_best_f1_score.h5
FastEstimator-BestModelSaver: Restoring model from /tmp/tmpf8gmdf9j/discriminator_best_f1_score.h5
FastEstimator-Finish: step: 1060; total_time: 62.06 sec; discriminator_lr: 1e-04; reconstructor_lr: 0.0002;
###Markdown
InferencingOnce the training is finished, we will apply the model to visualize the reconstructed image of the inliers and outliers.
###Code
idx0 = np.random.randint(len(x_eval0))
idx1 = np.random.randint(len(x_eval1))
data = [{"x": x_eval0[idx0]}, {"x": x_eval1[idx1]}]
result = [pipeline.transform(data[i], mode="infer") for i in range(len(data))]
network = fe.Network(ops=[
ModelOp(model=recon_model, inputs="x", outputs="x_fake"),
ModelOp(model=disc_model, inputs="x_fake", outputs="fake_score")
])
output_imgs = [network.transform(result[i], mode="infer") for i in range(len(result))]
base_image = output_imgs[0]["x"].numpy()
anomaly_image = output_imgs[1]["x"].numpy()
recon_base_image = output_imgs[0]["x_fake"].numpy()
recon_anomaly_image = output_imgs[1]["x_fake"].numpy()
img1 = fe.util.ImgData(Input_Image=base_image, Reconstructed_Image=recon_base_image)
fig1 = img1.paint_figure()
img2 = fe.util.ImgData(Input_Image=anomaly_image, Reconstructed_Image=recon_anomaly_image)
fig2 = img2.paint_figure()
###Output
_____no_output_____ |
jupyter/historical.ipynb | ###Markdown
[accounts](./accounts.ipynb) | [orders](./orders.ipynb) | [trades](./trades.ipynb) | [positions](./positions.ipynb) | [historical](./historical.ipynb) | [streams](./streams.ipynb) | [errors](./exceptions.ipynb) Historical dataOANDA provides access to historical data. The *oandapyV20* has a class to access this data: *oandapyV20.endpoints.instruments.InstrumentsCandles*.Lets give it a try and download some data for: + instrument: EUR_USD + granularity: H1 + from: 2017-01-01T00:00:00
###Code
import json
import oandapyV20
import oandapyV20.endpoints.instruments as instruments
from exampleauth import exampleauth
accountID, access_token = exampleauth.exampleAuth()
client = oandapyV20.API(access_token=access_token)
instrument = "EUR_USD"
params = {
"from": "2017-01-01T00:00:00Z",
"granularity": "H1",
"count": 10,
}
r = instruments.InstrumentsCandles(instrument=instrument, params=params)
response = client.request(r)
print("Request: {} #candles received: {}".format(r, len(r.response.get('candles'))))
print(json.dumps(response, indent=2))
###Output
Request: v3/instruments/EUR_USD/candles #candles received: 9
{
"instrument": "EUR_USD",
"candles": [
{
"volume": 481,
"mid": {
"h": "1.04712",
"c": "1.04662",
"l": "1.04572",
"o": "1.04577"
},
"complete": true,
"time": "2017-01-03T00:00:00.000000000Z"
},
{
"volume": 664,
"mid": {
"h": "1.04808",
"c": "1.04758",
"l": "1.04646",
"o": "1.04665"
},
"complete": true,
"time": "2017-01-03T01:00:00.000000000Z"
},
{
"volume": 392,
"mid": {
"h": "1.04780",
"c": "1.04721",
"l": "1.04709",
"o": "1.04761"
},
"complete": true,
"time": "2017-01-03T02:00:00.000000000Z"
},
{
"volume": 394,
"mid": {
"h": "1.04848",
"c": "1.04848",
"l": "1.04715",
"o": "1.04718"
},
"complete": true,
"time": "2017-01-03T03:00:00.000000000Z"
},
{
"volume": 285,
"mid": {
"h": "1.04898",
"c": "1.04884",
"l": "1.04820",
"o": "1.04852"
},
"complete": true,
"time": "2017-01-03T04:00:00.000000000Z"
},
{
"volume": 250,
"mid": {
"h": "1.04902",
"c": "1.04824",
"l": "1.04816",
"o": "1.04886"
},
"complete": true,
"time": "2017-01-03T05:00:00.000000000Z"
},
{
"volume": 368,
"mid": {
"h": "1.04892",
"c": "1.04882",
"l": "1.04813",
"o": "1.04821"
},
"complete": true,
"time": "2017-01-03T06:00:00.000000000Z"
},
{
"volume": 1639,
"mid": {
"h": "1.04888",
"c": "1.04602",
"l": "1.04536",
"o": "1.04885"
},
"complete": true,
"time": "2017-01-03T07:00:00.000000000Z"
},
{
"volume": 2830,
"mid": {
"h": "1.04658",
"c": "1.04353",
"l": "1.04207",
"o": "1.04606"
},
"complete": true,
"time": "2017-01-03T08:00:00.000000000Z"
}
],
"granularity": "H1"
}
###Markdown
So, that is 9 records?... that can be fixed by including the parameter *includeFirst*, see the OANDA documentation for details.
###Code
instrument = "EUR_USD"
params = {
"from": "2017-01-01T00:00:00Z",
"granularity": "H1",
"includeFirst": True,
"count": 10,
}
r = instruments.InstrumentsCandles(instrument=instrument, params=params)
response = client.request(r)
print("Request: {} #candles received: {}".format(r, len(r.response.get('candles'))))
print(json.dumps(response, indent=2))
###Output
Request: v3/instruments/EUR_USD/candles #candles received: 10
{
"instrument": "EUR_USD",
"candles": [
{
"volume": 974,
"mid": {
"h": "1.04711",
"c": "1.04575",
"l": "1.04567",
"o": "1.04684"
},
"complete": true,
"time": "2017-01-02T23:00:00.000000000Z"
},
{
"volume": 481,
"mid": {
"h": "1.04712",
"c": "1.04662",
"l": "1.04572",
"o": "1.04577"
},
"complete": true,
"time": "2017-01-03T00:00:00.000000000Z"
},
{
"volume": 664,
"mid": {
"h": "1.04808",
"c": "1.04758",
"l": "1.04646",
"o": "1.04665"
},
"complete": true,
"time": "2017-01-03T01:00:00.000000000Z"
},
{
"volume": 392,
"mid": {
"h": "1.04780",
"c": "1.04721",
"l": "1.04709",
"o": "1.04761"
},
"complete": true,
"time": "2017-01-03T02:00:00.000000000Z"
},
{
"volume": 394,
"mid": {
"h": "1.04848",
"c": "1.04848",
"l": "1.04715",
"o": "1.04718"
},
"complete": true,
"time": "2017-01-03T03:00:00.000000000Z"
},
{
"volume": 285,
"mid": {
"h": "1.04898",
"c": "1.04884",
"l": "1.04820",
"o": "1.04852"
},
"complete": true,
"time": "2017-01-03T04:00:00.000000000Z"
},
{
"volume": 250,
"mid": {
"h": "1.04902",
"c": "1.04824",
"l": "1.04816",
"o": "1.04886"
},
"complete": true,
"time": "2017-01-03T05:00:00.000000000Z"
},
{
"volume": 368,
"mid": {
"h": "1.04892",
"c": "1.04882",
"l": "1.04813",
"o": "1.04821"
},
"complete": true,
"time": "2017-01-03T06:00:00.000000000Z"
},
{
"volume": 1639,
"mid": {
"h": "1.04888",
"c": "1.04602",
"l": "1.04536",
"o": "1.04885"
},
"complete": true,
"time": "2017-01-03T07:00:00.000000000Z"
},
{
"volume": 2830,
"mid": {
"h": "1.04658",
"c": "1.04353",
"l": "1.04207",
"o": "1.04606"
},
"complete": true,
"time": "2017-01-03T08:00:00.000000000Z"
}
],
"granularity": "H1"
}
###Markdown
Bulk history InstrumentsCandles classIt is likely that you want to retrieve more than 10 records. The OANDA docs say that the default number of recordsis 500, in case you do not specify. You can specify the number of records to retrieve by using *count*, with a maximum of 5000. The *InstrumentsCandles* class enables you to retrieve the records. InstrumentsCandlesFactoryNow if you would like to retrieve a lot of history, you have to make consecutive requests. To make this an easy process the *oandapyV20* library comes with a so called *factory* named *InstrumentsCandlesFactory*.Using this class you can retrieve all history of an instrument from a certain date. The *InstrumentsCandlesFactory* acts as a generator generating *InstrumentCandles* requests until all data is retrieved. The number of requests can be influenced by specifying *count*. Setting *count* to 5000 would generate a tenth of the requests vs. the default of 500.Back to our example: lets make sure we request a lot of data, so we set the *granularity* to *M5* and leave the date at 2017-01-01T00:00:00. The will retrieve all records from that date up to today, because we did not specify the *to* parameter.
###Code
import json
import oandapyV20
import oandapyV20.endpoints.instruments as instruments
from oandapyV20.contrib.factories import InstrumentsCandlesFactory
from exampleauth import exampleauth
accountID, access_token = exampleauth.exampleAuth()
client = oandapyV20.API(access_token=access_token)
instrument = "EUR_USD"
params = {
"from": "2017-01-01T00:00:00Z",
"granularity": "M5",
}
def cnv(r, h):
# get all candles from the response and write them as a record to the filehandle h
for candle in r.get('candles'):
ctime = candle.get('time')[0:19]
try:
rec = "{time},{complete},{o},{h},{l},{c},{v}".format(
time=ctime,
complete=candle['complete'],
o=candle['mid']['o'],
h=candle['mid']['h'],
l=candle['mid']['l'],
c=candle['mid']['c'],
v=candle['volume'],
)
except Exception as e:
print(e, r)
else:
h.write(rec+"\n")
datafile = "/tmp/{}.{}.out".format(instrument, params['granularity'])
with open(datafile, "w") as O:
n = 0
for r in InstrumentsCandlesFactory(instrument=instrument, params=params):
rv = client.request(r)
cnt = len(r.response.get('candles'))
print("REQUEST: {} {} {}, received: {}".format(r, r.__class__.__name__, r.params, cnt))
n += cnt
cnv(r.response, O)
print("Check the datafile: {} under /tmp!, it contains {} records".format(datafile, n))
###Output
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-02T17:40:00Z', 'from': '2017-01-01T00:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-04T11:25:00Z', 'from': '2017-01-02T17:45:00Z', 'granularity': 'M5'}, received: 436
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-06T05:10:00Z', 'from': '2017-01-04T11:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-07T22:55:00Z', 'from': '2017-01-06T05:15:00Z', 'granularity': 'M5'}, received: 200
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-09T16:40:00Z', 'from': '2017-01-07T23:00:00Z', 'granularity': 'M5'}, received: 222
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-11T10:25:00Z', 'from': '2017-01-09T16:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-13T04:10:00Z', 'from': '2017-01-11T10:30:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-14T21:55:00Z', 'from': '2017-01-13T04:15:00Z', 'granularity': 'M5'}, received: 212
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-16T15:40:00Z', 'from': '2017-01-14T22:00:00Z', 'granularity': 'M5'}, received: 211
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-18T09:25:00Z', 'from': '2017-01-16T15:45:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-20T03:10:00Z', 'from': '2017-01-18T09:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-21T20:55:00Z', 'from': '2017-01-20T03:15:00Z', 'granularity': 'M5'}, received: 224
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-23T14:40:00Z', 'from': '2017-01-21T21:00:00Z', 'granularity': 'M5'}, received: 193
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-25T08:25:00Z', 'from': '2017-01-23T14:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-27T02:10:00Z', 'from': '2017-01-25T08:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-28T19:55:00Z', 'from': '2017-01-27T02:15:00Z', 'granularity': 'M5'}, received: 236
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-30T13:40:00Z', 'from': '2017-01-28T20:00:00Z', 'granularity': 'M5'}, received: 187
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-01T07:25:00Z', 'from': '2017-01-30T13:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-03T01:10:00Z', 'from': '2017-02-01T07:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-04T18:55:00Z', 'from': '2017-02-03T01:15:00Z', 'granularity': 'M5'}, received: 248
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-06T12:40:00Z', 'from': '2017-02-04T19:00:00Z', 'granularity': 'M5'}, received: 175
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-08T06:25:00Z', 'from': '2017-02-06T12:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-10T00:10:00Z', 'from': '2017-02-08T06:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-11T17:55:00Z', 'from': '2017-02-10T00:15:00Z', 'granularity': 'M5'}, received: 260
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-13T11:40:00Z', 'from': '2017-02-11T18:00:00Z', 'granularity': 'M5'}, received: 163
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-15T05:25:00Z', 'from': '2017-02-13T11:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-16T23:10:00Z', 'from': '2017-02-15T05:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-18T16:55:00Z', 'from': '2017-02-16T23:15:00Z', 'granularity': 'M5'}, received: 272
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-20T10:40:00Z', 'from': '2017-02-18T17:00:00Z', 'granularity': 'M5'}, received: 151
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-22T04:25:00Z', 'from': '2017-02-20T10:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-23T22:10:00Z', 'from': '2017-02-22T04:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-25T15:55:00Z', 'from': '2017-02-23T22:15:00Z', 'granularity': 'M5'}, received: 284
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-27T09:40:00Z', 'from': '2017-02-25T16:00:00Z', 'granularity': 'M5'}, received: 139
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-01T03:25:00Z', 'from': '2017-02-27T09:45:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-02T21:10:00Z', 'from': '2017-03-01T03:30:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-04T14:55:00Z', 'from': '2017-03-02T21:15:00Z', 'granularity': 'M5'}, received: 296
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-06T08:40:00Z', 'from': '2017-03-04T15:00:00Z', 'granularity': 'M5'}, received: 127
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-08T02:25:00Z', 'from': '2017-03-06T08:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-09T20:10:00Z', 'from': '2017-03-08T02:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-11T13:55:00Z', 'from': '2017-03-09T20:15:00Z', 'granularity': 'M5'}, received: 308
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-13T07:40:00Z', 'from': '2017-03-11T14:00:00Z', 'granularity': 'M5'}, received: 126
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-15T01:25:00Z', 'from': '2017-03-13T07:45:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-16T19:10:00Z', 'from': '2017-03-15T01:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-18T12:55:00Z', 'from': '2017-03-16T19:15:00Z', 'granularity': 'M5'}, received: 308
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-20T06:40:00Z', 'from': '2017-03-18T13:00:00Z', 'granularity': 'M5'}, received: 115
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-22T00:25:00Z', 'from': '2017-03-20T06:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-23T18:10:00Z', 'from': '2017-03-22T00:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-25T11:55:00Z', 'from': '2017-03-23T18:15:00Z', 'granularity': 'M5'}, received: 319
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-27T05:40:00Z', 'from': '2017-03-25T12:00:00Z', 'granularity': 'M5'}, received: 103
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-28T23:25:00Z', 'from': '2017-03-27T05:45:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-30T17:10:00Z', 'from': '2017-03-28T23:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-01T10:55:00Z', 'from': '2017-03-30T17:15:00Z', 'granularity': 'M5'}, received: 331
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-03T04:40:00Z', 'from': '2017-04-01T11:00:00Z', 'granularity': 'M5'}, received: 91
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-04T22:25:00Z', 'from': '2017-04-03T04:45:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-06T16:10:00Z', 'from': '2017-04-04T22:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-08T09:55:00Z', 'from': '2017-04-06T16:15:00Z', 'granularity': 'M5'}, received: 343
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-10T03:40:00Z', 'from': '2017-04-08T10:00:00Z', 'granularity': 'M5'}, received: 79
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-11T21:25:00Z', 'from': '2017-04-10T03:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-13T15:10:00Z', 'from': '2017-04-11T21:30:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-15T08:55:00Z', 'from': '2017-04-13T15:15:00Z', 'granularity': 'M5'}, received: 352
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-17T02:40:00Z', 'from': '2017-04-15T09:00:00Z', 'granularity': 'M5'}, received: 67
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-18T20:25:00Z', 'from': '2017-04-17T02:45:00Z', 'granularity': 'M5'}, received: 496
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-20T14:10:00Z', 'from': '2017-04-18T20:30:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-22T07:55:00Z', 'from': '2017-04-20T14:15:00Z', 'granularity': 'M5'}, received: 366
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-24T01:40:00Z', 'from': '2017-04-22T08:00:00Z', 'granularity': 'M5'}, received: 55
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-25T19:25:00Z', 'from': '2017-04-24T01:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-27T13:10:00Z', 'from': '2017-04-25T19:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-29T06:55:00Z', 'from': '2017-04-27T13:15:00Z', 'granularity': 'M5'}, received: 379
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-01T00:40:00Z', 'from': '2017-04-29T07:00:00Z', 'granularity': 'M5'}, received: 43
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-02T18:25:00Z', 'from': '2017-05-01T00:45:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-04T12:10:00Z', 'from': '2017-05-02T18:30:00Z', 'granularity': 'M5'}, received: 496
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-06T05:55:00Z', 'from': '2017-05-04T12:15:00Z', 'granularity': 'M5'}, received: 392
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-07T23:40:00Z', 'from': '2017-05-06T06:00:00Z', 'granularity': 'M5'}, received: 31
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-09T17:25:00Z', 'from': '2017-05-07T23:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-11T11:10:00Z', 'from': '2017-05-09T17:30:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-13T04:55:00Z', 'from': '2017-05-11T11:15:00Z', 'granularity': 'M5'}, received: 402
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-14T22:40:00Z', 'from': '2017-05-13T05:00:00Z', 'granularity': 'M5'}, received: 19
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-16T16:25:00Z', 'from': '2017-05-14T22:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-18T10:10:00Z', 'from': '2017-05-16T16:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-20T03:55:00Z', 'from': '2017-05-18T10:15:00Z', 'granularity': 'M5'}, received: 416
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-21T21:40:00Z', 'from': '2017-05-20T04:00:00Z', 'granularity': 'M5'}, received: 7
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-23T15:25:00Z', 'from': '2017-05-21T21:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-25T09:10:00Z', 'from': '2017-05-23T15:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-27T02:55:00Z', 'from': '2017-05-25T09:15:00Z', 'granularity': 'M5'}, received: 428
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-28T20:40:00Z', 'from': '2017-05-27T03:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-30T14:25:00Z', 'from': '2017-05-28T20:45:00Z', 'granularity': 'M5'}, received: 491
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-01T08:10:00Z', 'from': '2017-05-30T14:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-03T01:55:00Z', 'from': '2017-06-01T08:15:00Z', 'granularity': 'M5'}, received: 440
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-04T19:40:00Z', 'from': '2017-06-03T02:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-06T13:25:00Z', 'from': '2017-06-04T19:45:00Z', 'granularity': 'M5'}, received: 483
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-08T07:10:00Z', 'from': '2017-06-06T13:30:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-10T00:55:00Z', 'from': '2017-06-08T07:15:00Z', 'granularity': 'M5'}, received: 452
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-11T18:40:00Z', 'from': '2017-06-10T01:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-13T12:25:00Z', 'from': '2017-06-11T18:45:00Z', 'granularity': 'M5'}, received: 471
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-15T06:10:00Z', 'from': '2017-06-13T12:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-16T23:55:00Z', 'from': '2017-06-15T06:15:00Z', 'granularity': 'M5'}, received: 464
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-18T17:40:00Z', 'from': '2017-06-17T00:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-20T11:25:00Z', 'from': '2017-06-18T17:45:00Z', 'granularity': 'M5'}, received: 458
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-22T05:10:00Z', 'from': '2017-06-20T11:30:00Z', 'granularity': 'M5'}, received: 495
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-23T22:55:00Z', 'from': '2017-06-22T05:15:00Z', 'granularity': 'M5'}, received: 474
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-25T16:40:00Z', 'from': '2017-06-23T23:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-27T10:25:00Z', 'from': '2017-06-25T16:45:00Z', 'granularity': 'M5'}, received: 444
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-29T04:10:00Z', 'from': '2017-06-27T10:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-30T21:55:00Z', 'from': '2017-06-29T04:15:00Z', 'granularity': 'M5'}, received: 488
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-02T15:40:00Z', 'from': '2017-06-30T22:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-04T09:25:00Z', 'from': '2017-07-02T15:45:00Z', 'granularity': 'M5'}, received: 433
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-06T03:10:00Z', 'from': '2017-07-04T09:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-07T20:55:00Z', 'from': '2017-07-06T03:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-09T14:40:00Z', 'from': '2017-07-07T21:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-11T08:25:00Z', 'from': '2017-07-09T14:45:00Z', 'granularity': 'M5'}, received: 422
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-13T02:10:00Z', 'from': '2017-07-11T08:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-14T19:55:00Z', 'from': '2017-07-13T02:15:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-16T13:40:00Z', 'from': '2017-07-14T20:00:00Z', 'granularity': 'M5'}, received: 11
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-18T07:25:00Z', 'from': '2017-07-16T13:45:00Z', 'granularity': 'M5'}, received: 412
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-20T01:10:00Z', 'from': '2017-07-18T07:30:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-21T18:55:00Z', 'from': '2017-07-20T01:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-23T12:40:00Z', 'from': '2017-07-21T19:00:00Z', 'granularity': 'M5'}, received: 23
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-25T06:25:00Z', 'from': '2017-07-23T12:45:00Z', 'granularity': 'M5'}, received: 400
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-27T00:10:00Z', 'from': '2017-07-25T06:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-28T17:55:00Z', 'from': '2017-07-27T00:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-30T11:40:00Z', 'from': '2017-07-28T18:00:00Z', 'granularity': 'M5'}, received: 35
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-01T05:25:00Z', 'from': '2017-07-30T11:45:00Z', 'granularity': 'M5'}, received: 388
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-02T23:10:00Z', 'from': '2017-08-01T05:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-04T16:55:00Z', 'from': '2017-08-02T23:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-06T10:40:00Z', 'from': '2017-08-04T17:00:00Z', 'granularity': 'M5'}, received: 47
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-08T04:25:00Z', 'from': '2017-08-06T10:45:00Z', 'granularity': 'M5'}, received: 376
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-09T22:10:00Z', 'from': '2017-08-08T04:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-11T15:55:00Z', 'from': '2017-08-09T22:15:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-13T09:40:00Z', 'from': '2017-08-11T16:00:00Z', 'granularity': 'M5'}, received: 59
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-15T03:25:00Z', 'from': '2017-08-13T09:45:00Z', 'granularity': 'M5'}, received: 364
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-16T21:10:00Z', 'from': '2017-08-15T03:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-18T14:55:00Z', 'from': '2017-08-16T21:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-20T08:40:00Z', 'from': '2017-08-18T15:00:00Z', 'granularity': 'M5'}, received: 71
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-22T02:25:00Z', 'from': '2017-08-20T08:45:00Z', 'granularity': 'M5'}, received: 352
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-23T20:10:00Z', 'from': '2017-08-22T02:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-25T13:55:00Z', 'from': '2017-08-23T20:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-27T07:40:00Z', 'from': '2017-08-25T14:00:00Z', 'granularity': 'M5'}, received: 83
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-29T01:25:00Z', 'from': '2017-08-27T07:45:00Z', 'granularity': 'M5'}, received: 340
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-30T19:10:00Z', 'from': '2017-08-29T01:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-01T12:55:00Z', 'from': '2017-08-30T19:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-03T06:40:00Z', 'from': '2017-09-01T13:00:00Z', 'granularity': 'M5'}, received: 95
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-05T00:25:00Z', 'from': '2017-09-03T06:45:00Z', 'granularity': 'M5'}, received: 325
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-06T18:10:00Z', 'from': '2017-09-05T00:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-08T11:55:00Z', 'from': '2017-09-06T18:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-10T05:40:00Z', 'from': '2017-09-08T12:00:00Z', 'granularity': 'M5'}, received: 107
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-11T23:25:00Z', 'from': '2017-09-10T05:45:00Z', 'granularity': 'M5'}, received: 315
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-13T17:10:00Z', 'from': '2017-09-11T23:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-15T10:55:00Z', 'from': '2017-09-13T17:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-17T04:40:00Z', 'from': '2017-09-15T11:00:00Z', 'granularity': 'M5'}, received: 119
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-18T22:25:00Z', 'from': '2017-09-17T04:45:00Z', 'granularity': 'M5'}, received: 303
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-20T16:10:00Z', 'from': '2017-09-18T22:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-22T09:55:00Z', 'from': '2017-09-20T16:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-24T03:40:00Z', 'from': '2017-09-22T10:00:00Z', 'granularity': 'M5'}, received: 131
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-24T17:54:30Z', 'from': '2017-09-24T03:45:00Z', 'granularity': 'M5'}, received: 0
Check the datafile: /tmp/EUR_USD.M5.out under /tmp!, it contains 54090 records
###Markdown
... that was easy ...All request were made on the default of max. 500 records per request. With a granularity of *M5* this means that we have 288 records per day. The algorithm of the factory does not check on weekends or holidays. Therefore some request only return a part of the 500 requested records because there simply are no more records within the specified timespan.If you want to decrease the number of requests and increase the number of records returned for a request, just specify *count* as a number higher than 500 and up to max. 5000. Create Pandas DataFrame
###Code
import json
import oandapyV20
import oandapyV20.endpoints.instruments as instruments
from exampleauth import exampleauth
import pandas as pd
accountID, access_token = exampleauth.exampleAuth()
client = oandapyV20.API(access_token=access_token)
instrument = "EUR_USD"
params = {
"from": "2017-01-01T00:00:00Z",
"granularity": "H1",
# "count": 10,
}
r = instruments.InstrumentsCandles(instrument=instrument, params=params)
response = client.request(r)
print("Request: {} #candles received: {}".format(r, len(r.response.get('candles'))))
candles = {}
for candle in r.response.get('candles'):
try:
ohlcv = candle['mid']
ohlcv['v'] = candle['volume']
candles[candle['time']] = ohlcv
except Exception as e:
print(e)
df = pd.DataFrame.from_dict(candles,orient='index')
df["o"] = pd.to_numeric(df.o, errors='coerce')
df["h"] = pd.to_numeric(df.h, errors='coerce')
df["l"] = pd.to_numeric(df.l, errors='coerce')
df["c"] = pd.to_numeric(df.c, errors='coerce')
df.columns = ['open','high','low','close','volume']
df.index = pd.DatetimeIndex(df.index)
df.index.names = ['Date']
df.info()
df
###Output
Request: v3/instruments/EUR_USD/candles #candles received: 500
<class 'pandas.core.frame.DataFrame'>
DatetimeIndex: 500 entries, 2017-01-02 23:00:00+00:00 to 2017-01-31 18:00:00+00:00
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 open 500 non-null float64
1 high 500 non-null float64
2 low 500 non-null float64
3 close 500 non-null float64
4 volume 500 non-null int64
dtypes: float64(4), int64(1)
memory usage: 23.4 KB
###Markdown
Add cufflink Plot
###Code
%pip install cufflinks
%matplotlib inline
import warnings
import matplotlib.pyplot as plt
plt.style.use('seaborn')
plt.rcParams['figure.figsize'] = [16, 9]
plt.rcParams['figure.dpi'] = 300
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings('ignore')
import cufflinks as cf
import plotly.offline
cf.go_offline()
cf.set_config_file(offline=False, world_readable=True)
qf = cf.QuantFig(df, title="EUR_USD", legend='top', name='EUR_USD')
qf.add_sma(periods=20, column='close', color='red')
qf.add_ema(periods=20, color='green')
qf.add_volume()
qf.iplot()
###Output
_____no_output_____
###Markdown
[accounts](./accounts.ipynb) | [orders](./orders.ipynb) | [trades](./trades.ipynb) | [positions](./positions.ipynb) | [historical](./historical.ipynb) | [streams](./streams.ipynb) | [errors](./exceptions.ipynb) Historical dataOANDA provides access to historical data. The *oandapyV20* has a class to access this data: *oandapyV20.endpoints.instruments.InstrumentsCandles*.Lets give it a try and download some data for: + instrument: EUR_USD + granularity: H1 + from: 2017-01-01T00:00:00
###Code
import json
import oandapyV20
import oandapyV20.endpoints.instruments as instruments
from exampleauth import exampleauth
accountID, access_token = exampleauth.exampleAuth()
client = oandapyV20.API(access_token=access_token)
instrument = "EUR_USD"
params = {
"from": "2017-01-01T00:00:00Z",
"granularity": "H1",
"count": 10,
}
r = instruments.InstrumentsCandles(instrument=instrument, params=params)
response = client.request(r)
print("Request: {} #candles received: {}".format(r, len(r.response.get('candles'))))
print(json.dumps(response, indent=2))
###Output
Request: v3/instruments/EUR_USD/candles #candles received: 9
{
"instrument": "EUR_USD",
"candles": [
{
"volume": 481,
"mid": {
"h": "1.04712",
"c": "1.04662",
"l": "1.04572",
"o": "1.04577"
},
"complete": true,
"time": "2017-01-03T00:00:00.000000000Z"
},
{
"volume": 664,
"mid": {
"h": "1.04808",
"c": "1.04758",
"l": "1.04646",
"o": "1.04665"
},
"complete": true,
"time": "2017-01-03T01:00:00.000000000Z"
},
{
"volume": 392,
"mid": {
"h": "1.04780",
"c": "1.04721",
"l": "1.04709",
"o": "1.04761"
},
"complete": true,
"time": "2017-01-03T02:00:00.000000000Z"
},
{
"volume": 394,
"mid": {
"h": "1.04848",
"c": "1.04848",
"l": "1.04715",
"o": "1.04718"
},
"complete": true,
"time": "2017-01-03T03:00:00.000000000Z"
},
{
"volume": 285,
"mid": {
"h": "1.04898",
"c": "1.04884",
"l": "1.04820",
"o": "1.04852"
},
"complete": true,
"time": "2017-01-03T04:00:00.000000000Z"
},
{
"volume": 250,
"mid": {
"h": "1.04902",
"c": "1.04824",
"l": "1.04816",
"o": "1.04886"
},
"complete": true,
"time": "2017-01-03T05:00:00.000000000Z"
},
{
"volume": 368,
"mid": {
"h": "1.04892",
"c": "1.04882",
"l": "1.04813",
"o": "1.04821"
},
"complete": true,
"time": "2017-01-03T06:00:00.000000000Z"
},
{
"volume": 1639,
"mid": {
"h": "1.04888",
"c": "1.04602",
"l": "1.04536",
"o": "1.04885"
},
"complete": true,
"time": "2017-01-03T07:00:00.000000000Z"
},
{
"volume": 2830,
"mid": {
"h": "1.04658",
"c": "1.04353",
"l": "1.04207",
"o": "1.04606"
},
"complete": true,
"time": "2017-01-03T08:00:00.000000000Z"
}
],
"granularity": "H1"
}
###Markdown
So, that is 9 records?... that can be fixed by including the parameter *includeFirst*, see the OANDA documentation for details.
###Code
instrument = "EUR_USD"
params = {
"from": "2017-01-01T00:00:00Z",
"granularity": "H1",
"includeFirst": True,
"count": 10,
}
r = instruments.InstrumentsCandles(instrument=instrument, params=params)
response = client.request(r)
print("Request: {} #candles received: {}".format(r, len(r.response.get('candles'))))
print(json.dumps(response, indent=2))
###Output
Request: v3/instruments/EUR_USD/candles #candles received: 10
{
"instrument": "EUR_USD",
"candles": [
{
"volume": 974,
"mid": {
"h": "1.04711",
"c": "1.04575",
"l": "1.04567",
"o": "1.04684"
},
"complete": true,
"time": "2017-01-02T23:00:00.000000000Z"
},
{
"volume": 481,
"mid": {
"h": "1.04712",
"c": "1.04662",
"l": "1.04572",
"o": "1.04577"
},
"complete": true,
"time": "2017-01-03T00:00:00.000000000Z"
},
{
"volume": 664,
"mid": {
"h": "1.04808",
"c": "1.04758",
"l": "1.04646",
"o": "1.04665"
},
"complete": true,
"time": "2017-01-03T01:00:00.000000000Z"
},
{
"volume": 392,
"mid": {
"h": "1.04780",
"c": "1.04721",
"l": "1.04709",
"o": "1.04761"
},
"complete": true,
"time": "2017-01-03T02:00:00.000000000Z"
},
{
"volume": 394,
"mid": {
"h": "1.04848",
"c": "1.04848",
"l": "1.04715",
"o": "1.04718"
},
"complete": true,
"time": "2017-01-03T03:00:00.000000000Z"
},
{
"volume": 285,
"mid": {
"h": "1.04898",
"c": "1.04884",
"l": "1.04820",
"o": "1.04852"
},
"complete": true,
"time": "2017-01-03T04:00:00.000000000Z"
},
{
"volume": 250,
"mid": {
"h": "1.04902",
"c": "1.04824",
"l": "1.04816",
"o": "1.04886"
},
"complete": true,
"time": "2017-01-03T05:00:00.000000000Z"
},
{
"volume": 368,
"mid": {
"h": "1.04892",
"c": "1.04882",
"l": "1.04813",
"o": "1.04821"
},
"complete": true,
"time": "2017-01-03T06:00:00.000000000Z"
},
{
"volume": 1639,
"mid": {
"h": "1.04888",
"c": "1.04602",
"l": "1.04536",
"o": "1.04885"
},
"complete": true,
"time": "2017-01-03T07:00:00.000000000Z"
},
{
"volume": 2830,
"mid": {
"h": "1.04658",
"c": "1.04353",
"l": "1.04207",
"o": "1.04606"
},
"complete": true,
"time": "2017-01-03T08:00:00.000000000Z"
}
],
"granularity": "H1"
}
###Markdown
Bulk history InstrumentsCandles classIt is likely that you want to retrieve more than 10 records. The OANDA docs say that the default number of recordsis 500, in case you do not specify. You can specify the number of records to retrieve by using *count*, with a maximum of 5000. The *InstrumentsCandles* class enables you to retrieve the records. InstrumentsCandlesFactoryNow if you would like to retrieve a lot of history, you have to make consecutive requests. To make this an easy process the *oandapyV20* library comes with a so called *factory* named *InstrumentsCandlesFactory*.Using this class you can retrieve all history of an instrument from a certain date. The *InstrumentsCandlesFactory* acts as a generator generating *InstrumentCandles* requests until all data is retrieved. The number of requests can be influenced by specifying *count*. Setting *count* to 5000 would generate a tenth of the requests vs. the default of 500.Back to our example: lets make sure we request a lot of data, so we set the *granularity* to *M5* and leave the date at 2017-01-01T00:00:00. The will retrieve all records from that date up to today, because we did not specify the *to* parameter.
###Code
import json
import oandapyV20
import oandapyV20.endpoints.instruments as instruments
from oandapyV20.contrib.factories import InstrumentsCandlesFactory
from exampleauth import exampleauth
accountID, access_token = exampleauth.exampleAuth()
client = oandapyV20.API(access_token=access_token)
instrument = "EUR_USD"
params = {
"from": "2017-01-01T00:00:00Z",
"granularity": "M5",
}
def cnv(r, h):
# get all candles from the response and write them as a record to the filehandle h
for candle in r.get('candles'):
ctime = candle.get('time')[0:19]
try:
rec = "{time},{complete},{o},{h},{l},{c},{v}".format(
time=ctime,
complete=candle['complete'],
o=candle['mid']['o'],
h=candle['mid']['h'],
l=candle['mid']['l'],
c=candle['mid']['c'],
v=candle['volume'],
)
except Exception as e:
print(e, r)
else:
h.write(rec+"\n")
datafile = "/tmp/{}.{}.out".format(instrument, params['granularity'])
with open(datafile, "w") as O:
n = 0
for r in InstrumentsCandlesFactory(instrument=instrument, params=params):
rv = client.request(r)
cnt = len(r.response.get('candles'))
print("REQUEST: {} {} {}, received: {}".format(r, r.__class__.__name__, r.params, cnt))
n += cnt
cnv(r.response, O)
print("Check the datafile: {} under /tmp!, it contains {} records".format(datafile, n))
###Output
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-02T17:40:00Z', 'from': '2017-01-01T00:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-04T11:25:00Z', 'from': '2017-01-02T17:45:00Z', 'granularity': 'M5'}, received: 436
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-06T05:10:00Z', 'from': '2017-01-04T11:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-07T22:55:00Z', 'from': '2017-01-06T05:15:00Z', 'granularity': 'M5'}, received: 200
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-09T16:40:00Z', 'from': '2017-01-07T23:00:00Z', 'granularity': 'M5'}, received: 222
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-11T10:25:00Z', 'from': '2017-01-09T16:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-13T04:10:00Z', 'from': '2017-01-11T10:30:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-14T21:55:00Z', 'from': '2017-01-13T04:15:00Z', 'granularity': 'M5'}, received: 212
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-16T15:40:00Z', 'from': '2017-01-14T22:00:00Z', 'granularity': 'M5'}, received: 211
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-18T09:25:00Z', 'from': '2017-01-16T15:45:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-20T03:10:00Z', 'from': '2017-01-18T09:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-21T20:55:00Z', 'from': '2017-01-20T03:15:00Z', 'granularity': 'M5'}, received: 224
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-23T14:40:00Z', 'from': '2017-01-21T21:00:00Z', 'granularity': 'M5'}, received: 193
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-25T08:25:00Z', 'from': '2017-01-23T14:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-27T02:10:00Z', 'from': '2017-01-25T08:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-28T19:55:00Z', 'from': '2017-01-27T02:15:00Z', 'granularity': 'M5'}, received: 236
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-30T13:40:00Z', 'from': '2017-01-28T20:00:00Z', 'granularity': 'M5'}, received: 187
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-01T07:25:00Z', 'from': '2017-01-30T13:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-03T01:10:00Z', 'from': '2017-02-01T07:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-04T18:55:00Z', 'from': '2017-02-03T01:15:00Z', 'granularity': 'M5'}, received: 248
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-06T12:40:00Z', 'from': '2017-02-04T19:00:00Z', 'granularity': 'M5'}, received: 175
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-08T06:25:00Z', 'from': '2017-02-06T12:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-10T00:10:00Z', 'from': '2017-02-08T06:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-11T17:55:00Z', 'from': '2017-02-10T00:15:00Z', 'granularity': 'M5'}, received: 260
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-13T11:40:00Z', 'from': '2017-02-11T18:00:00Z', 'granularity': 'M5'}, received: 163
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-15T05:25:00Z', 'from': '2017-02-13T11:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-16T23:10:00Z', 'from': '2017-02-15T05:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-18T16:55:00Z', 'from': '2017-02-16T23:15:00Z', 'granularity': 'M5'}, received: 272
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-20T10:40:00Z', 'from': '2017-02-18T17:00:00Z', 'granularity': 'M5'}, received: 151
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-22T04:25:00Z', 'from': '2017-02-20T10:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-23T22:10:00Z', 'from': '2017-02-22T04:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-25T15:55:00Z', 'from': '2017-02-23T22:15:00Z', 'granularity': 'M5'}, received: 284
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-27T09:40:00Z', 'from': '2017-02-25T16:00:00Z', 'granularity': 'M5'}, received: 139
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-01T03:25:00Z', 'from': '2017-02-27T09:45:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-02T21:10:00Z', 'from': '2017-03-01T03:30:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-04T14:55:00Z', 'from': '2017-03-02T21:15:00Z', 'granularity': 'M5'}, received: 296
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-06T08:40:00Z', 'from': '2017-03-04T15:00:00Z', 'granularity': 'M5'}, received: 127
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-08T02:25:00Z', 'from': '2017-03-06T08:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-09T20:10:00Z', 'from': '2017-03-08T02:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-11T13:55:00Z', 'from': '2017-03-09T20:15:00Z', 'granularity': 'M5'}, received: 308
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-13T07:40:00Z', 'from': '2017-03-11T14:00:00Z', 'granularity': 'M5'}, received: 126
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-15T01:25:00Z', 'from': '2017-03-13T07:45:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-16T19:10:00Z', 'from': '2017-03-15T01:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-18T12:55:00Z', 'from': '2017-03-16T19:15:00Z', 'granularity': 'M5'}, received: 308
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-20T06:40:00Z', 'from': '2017-03-18T13:00:00Z', 'granularity': 'M5'}, received: 115
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-22T00:25:00Z', 'from': '2017-03-20T06:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-23T18:10:00Z', 'from': '2017-03-22T00:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-25T11:55:00Z', 'from': '2017-03-23T18:15:00Z', 'granularity': 'M5'}, received: 319
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-27T05:40:00Z', 'from': '2017-03-25T12:00:00Z', 'granularity': 'M5'}, received: 103
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-28T23:25:00Z', 'from': '2017-03-27T05:45:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-30T17:10:00Z', 'from': '2017-03-28T23:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-01T10:55:00Z', 'from': '2017-03-30T17:15:00Z', 'granularity': 'M5'}, received: 331
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-03T04:40:00Z', 'from': '2017-04-01T11:00:00Z', 'granularity': 'M5'}, received: 91
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-04T22:25:00Z', 'from': '2017-04-03T04:45:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-06T16:10:00Z', 'from': '2017-04-04T22:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-08T09:55:00Z', 'from': '2017-04-06T16:15:00Z', 'granularity': 'M5'}, received: 343
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-10T03:40:00Z', 'from': '2017-04-08T10:00:00Z', 'granularity': 'M5'}, received: 79
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-11T21:25:00Z', 'from': '2017-04-10T03:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-13T15:10:00Z', 'from': '2017-04-11T21:30:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-15T08:55:00Z', 'from': '2017-04-13T15:15:00Z', 'granularity': 'M5'}, received: 352
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-17T02:40:00Z', 'from': '2017-04-15T09:00:00Z', 'granularity': 'M5'}, received: 67
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-18T20:25:00Z', 'from': '2017-04-17T02:45:00Z', 'granularity': 'M5'}, received: 496
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-20T14:10:00Z', 'from': '2017-04-18T20:30:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-22T07:55:00Z', 'from': '2017-04-20T14:15:00Z', 'granularity': 'M5'}, received: 366
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-24T01:40:00Z', 'from': '2017-04-22T08:00:00Z', 'granularity': 'M5'}, received: 55
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-25T19:25:00Z', 'from': '2017-04-24T01:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-27T13:10:00Z', 'from': '2017-04-25T19:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-29T06:55:00Z', 'from': '2017-04-27T13:15:00Z', 'granularity': 'M5'}, received: 379
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-01T00:40:00Z', 'from': '2017-04-29T07:00:00Z', 'granularity': 'M5'}, received: 43
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-02T18:25:00Z', 'from': '2017-05-01T00:45:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-04T12:10:00Z', 'from': '2017-05-02T18:30:00Z', 'granularity': 'M5'}, received: 496
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-06T05:55:00Z', 'from': '2017-05-04T12:15:00Z', 'granularity': 'M5'}, received: 392
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-07T23:40:00Z', 'from': '2017-05-06T06:00:00Z', 'granularity': 'M5'}, received: 31
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-09T17:25:00Z', 'from': '2017-05-07T23:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-11T11:10:00Z', 'from': '2017-05-09T17:30:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-13T04:55:00Z', 'from': '2017-05-11T11:15:00Z', 'granularity': 'M5'}, received: 402
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-14T22:40:00Z', 'from': '2017-05-13T05:00:00Z', 'granularity': 'M5'}, received: 19
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-16T16:25:00Z', 'from': '2017-05-14T22:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-18T10:10:00Z', 'from': '2017-05-16T16:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-20T03:55:00Z', 'from': '2017-05-18T10:15:00Z', 'granularity': 'M5'}, received: 416
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-21T21:40:00Z', 'from': '2017-05-20T04:00:00Z', 'granularity': 'M5'}, received: 7
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-23T15:25:00Z', 'from': '2017-05-21T21:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-25T09:10:00Z', 'from': '2017-05-23T15:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-27T02:55:00Z', 'from': '2017-05-25T09:15:00Z', 'granularity': 'M5'}, received: 428
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-28T20:40:00Z', 'from': '2017-05-27T03:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-30T14:25:00Z', 'from': '2017-05-28T20:45:00Z', 'granularity': 'M5'}, received: 491
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-01T08:10:00Z', 'from': '2017-05-30T14:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-03T01:55:00Z', 'from': '2017-06-01T08:15:00Z', 'granularity': 'M5'}, received: 440
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-04T19:40:00Z', 'from': '2017-06-03T02:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-06T13:25:00Z', 'from': '2017-06-04T19:45:00Z', 'granularity': 'M5'}, received: 483
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-08T07:10:00Z', 'from': '2017-06-06T13:30:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-10T00:55:00Z', 'from': '2017-06-08T07:15:00Z', 'granularity': 'M5'}, received: 452
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-11T18:40:00Z', 'from': '2017-06-10T01:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-13T12:25:00Z', 'from': '2017-06-11T18:45:00Z', 'granularity': 'M5'}, received: 471
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-15T06:10:00Z', 'from': '2017-06-13T12:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-16T23:55:00Z', 'from': '2017-06-15T06:15:00Z', 'granularity': 'M5'}, received: 464
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-18T17:40:00Z', 'from': '2017-06-17T00:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-20T11:25:00Z', 'from': '2017-06-18T17:45:00Z', 'granularity': 'M5'}, received: 458
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-22T05:10:00Z', 'from': '2017-06-20T11:30:00Z', 'granularity': 'M5'}, received: 495
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-23T22:55:00Z', 'from': '2017-06-22T05:15:00Z', 'granularity': 'M5'}, received: 474
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-25T16:40:00Z', 'from': '2017-06-23T23:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-27T10:25:00Z', 'from': '2017-06-25T16:45:00Z', 'granularity': 'M5'}, received: 444
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-29T04:10:00Z', 'from': '2017-06-27T10:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-30T21:55:00Z', 'from': '2017-06-29T04:15:00Z', 'granularity': 'M5'}, received: 488
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-02T15:40:00Z', 'from': '2017-06-30T22:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-04T09:25:00Z', 'from': '2017-07-02T15:45:00Z', 'granularity': 'M5'}, received: 433
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-06T03:10:00Z', 'from': '2017-07-04T09:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-07T20:55:00Z', 'from': '2017-07-06T03:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-09T14:40:00Z', 'from': '2017-07-07T21:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-11T08:25:00Z', 'from': '2017-07-09T14:45:00Z', 'granularity': 'M5'}, received: 422
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-13T02:10:00Z', 'from': '2017-07-11T08:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-14T19:55:00Z', 'from': '2017-07-13T02:15:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-16T13:40:00Z', 'from': '2017-07-14T20:00:00Z', 'granularity': 'M5'}, received: 11
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-18T07:25:00Z', 'from': '2017-07-16T13:45:00Z', 'granularity': 'M5'}, received: 412
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-20T01:10:00Z', 'from': '2017-07-18T07:30:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-21T18:55:00Z', 'from': '2017-07-20T01:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-23T12:40:00Z', 'from': '2017-07-21T19:00:00Z', 'granularity': 'M5'}, received: 23
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-25T06:25:00Z', 'from': '2017-07-23T12:45:00Z', 'granularity': 'M5'}, received: 400
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-27T00:10:00Z', 'from': '2017-07-25T06:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-28T17:55:00Z', 'from': '2017-07-27T00:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-30T11:40:00Z', 'from': '2017-07-28T18:00:00Z', 'granularity': 'M5'}, received: 35
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-01T05:25:00Z', 'from': '2017-07-30T11:45:00Z', 'granularity': 'M5'}, received: 388
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-02T23:10:00Z', 'from': '2017-08-01T05:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-04T16:55:00Z', 'from': '2017-08-02T23:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-06T10:40:00Z', 'from': '2017-08-04T17:00:00Z', 'granularity': 'M5'}, received: 47
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-08T04:25:00Z', 'from': '2017-08-06T10:45:00Z', 'granularity': 'M5'}, received: 376
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-09T22:10:00Z', 'from': '2017-08-08T04:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-11T15:55:00Z', 'from': '2017-08-09T22:15:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-13T09:40:00Z', 'from': '2017-08-11T16:00:00Z', 'granularity': 'M5'}, received: 59
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-15T03:25:00Z', 'from': '2017-08-13T09:45:00Z', 'granularity': 'M5'}, received: 364
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-16T21:10:00Z', 'from': '2017-08-15T03:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-18T14:55:00Z', 'from': '2017-08-16T21:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-20T08:40:00Z', 'from': '2017-08-18T15:00:00Z', 'granularity': 'M5'}, received: 71
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-22T02:25:00Z', 'from': '2017-08-20T08:45:00Z', 'granularity': 'M5'}, received: 352
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-23T20:10:00Z', 'from': '2017-08-22T02:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-25T13:55:00Z', 'from': '2017-08-23T20:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-27T07:40:00Z', 'from': '2017-08-25T14:00:00Z', 'granularity': 'M5'}, received: 83
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-29T01:25:00Z', 'from': '2017-08-27T07:45:00Z', 'granularity': 'M5'}, received: 340
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-30T19:10:00Z', 'from': '2017-08-29T01:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-01T12:55:00Z', 'from': '2017-08-30T19:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-03T06:40:00Z', 'from': '2017-09-01T13:00:00Z', 'granularity': 'M5'}, received: 95
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-05T00:25:00Z', 'from': '2017-09-03T06:45:00Z', 'granularity': 'M5'}, received: 325
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-06T18:10:00Z', 'from': '2017-09-05T00:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-08T11:55:00Z', 'from': '2017-09-06T18:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-10T05:40:00Z', 'from': '2017-09-08T12:00:00Z', 'granularity': 'M5'}, received: 107
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-11T23:25:00Z', 'from': '2017-09-10T05:45:00Z', 'granularity': 'M5'}, received: 315
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-13T17:10:00Z', 'from': '2017-09-11T23:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-15T10:55:00Z', 'from': '2017-09-13T17:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-17T04:40:00Z', 'from': '2017-09-15T11:00:00Z', 'granularity': 'M5'}, received: 119
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-18T22:25:00Z', 'from': '2017-09-17T04:45:00Z', 'granularity': 'M5'}, received: 303
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-20T16:10:00Z', 'from': '2017-09-18T22:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-22T09:55:00Z', 'from': '2017-09-20T16:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-24T03:40:00Z', 'from': '2017-09-22T10:00:00Z', 'granularity': 'M5'}, received: 131
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-24T17:54:30Z', 'from': '2017-09-24T03:45:00Z', 'granularity': 'M5'}, received: 0
Check the datafile: /tmp/EUR_USD.M5.out under /tmp!, it contains 54090 records
###Markdown
[accounts](./accounts.ipynb) | [orders](./orders.ipynb) | [trades](./trades.ipynb) | [positions](./positions.ipynb) | [historical](./historical.ipynb) | [streams](./streams.ipynb) | [errors](./exceptions.ipynb) Historical dataOANDA provides access to historical data. The *oandapyV20* has a class to access this data: *oandapyV20.endpoints.instruments.InstrumentsCandles*.Lets give it a try and download some data for: + instrument: EUR_USD + granularity: H1 + from: 2017-01-01T00:00:00
###Code
import json
import oandapyV20
import oandapyV20.endpoints.instruments as instruments
from exampleauth import exampleauth
accountID, access_token = exampleauth.exampleAuth()
client = oandapyV20.API(access_token=access_token)
instrument = "EUR_USD"
params = {
"from": "2017-01-01T00:00:00Z",
"granularity": "H1",
"count": 10,
}
r = instruments.InstrumentsCandles(instrument=instrument, params=params)
response = client.request(r)
print("Request: {} #candles received: {}".format(r, len(r.response.get('candles'))))
print(json.dumps(response, indent=2))
###Output
Request: v3/instruments/EUR_USD/candles #candles received: 9
{
"instrument": "EUR_USD",
"candles": [
{
"volume": 481,
"mid": {
"h": "1.04712",
"c": "1.04662",
"l": "1.04572",
"o": "1.04577"
},
"complete": true,
"time": "2017-01-03T00:00:00.000000000Z"
},
{
"volume": 664,
"mid": {
"h": "1.04808",
"c": "1.04758",
"l": "1.04646",
"o": "1.04665"
},
"complete": true,
"time": "2017-01-03T01:00:00.000000000Z"
},
{
"volume": 392,
"mid": {
"h": "1.04780",
"c": "1.04721",
"l": "1.04709",
"o": "1.04761"
},
"complete": true,
"time": "2017-01-03T02:00:00.000000000Z"
},
{
"volume": 394,
"mid": {
"h": "1.04848",
"c": "1.04848",
"l": "1.04715",
"o": "1.04718"
},
"complete": true,
"time": "2017-01-03T03:00:00.000000000Z"
},
{
"volume": 285,
"mid": {
"h": "1.04898",
"c": "1.04884",
"l": "1.04820",
"o": "1.04852"
},
"complete": true,
"time": "2017-01-03T04:00:00.000000000Z"
},
{
"volume": 250,
"mid": {
"h": "1.04902",
"c": "1.04824",
"l": "1.04816",
"o": "1.04886"
},
"complete": true,
"time": "2017-01-03T05:00:00.000000000Z"
},
{
"volume": 368,
"mid": {
"h": "1.04892",
"c": "1.04882",
"l": "1.04813",
"o": "1.04821"
},
"complete": true,
"time": "2017-01-03T06:00:00.000000000Z"
},
{
"volume": 1639,
"mid": {
"h": "1.04888",
"c": "1.04602",
"l": "1.04536",
"o": "1.04885"
},
"complete": true,
"time": "2017-01-03T07:00:00.000000000Z"
},
{
"volume": 2830,
"mid": {
"h": "1.04658",
"c": "1.04353",
"l": "1.04207",
"o": "1.04606"
},
"complete": true,
"time": "2017-01-03T08:00:00.000000000Z"
}
],
"granularity": "H1"
}
###Markdown
So, that is 9 records?... that can be fixed by including the parameter *includeFirst*, see the OANDA documentation for details.
###Code
instrument = "EUR_USD"
params = {
"from": "2017-01-01T00:00:00Z",
"granularity": "H1",
"includeFirst": True,
"count": 10,
}
r = instruments.InstrumentsCandles(instrument=instrument, params=params)
response = client.request(r)
print("Request: {} #candles received: {}".format(r, len(r.response.get('candles'))))
print(json.dumps(response, indent=2))
###Output
Request: v3/instruments/EUR_USD/candles #candles received: 10
{
"instrument": "EUR_USD",
"candles": [
{
"volume": 974,
"mid": {
"h": "1.04711",
"c": "1.04575",
"l": "1.04567",
"o": "1.04684"
},
"complete": true,
"time": "2017-01-02T23:00:00.000000000Z"
},
{
"volume": 481,
"mid": {
"h": "1.04712",
"c": "1.04662",
"l": "1.04572",
"o": "1.04577"
},
"complete": true,
"time": "2017-01-03T00:00:00.000000000Z"
},
{
"volume": 664,
"mid": {
"h": "1.04808",
"c": "1.04758",
"l": "1.04646",
"o": "1.04665"
},
"complete": true,
"time": "2017-01-03T01:00:00.000000000Z"
},
{
"volume": 392,
"mid": {
"h": "1.04780",
"c": "1.04721",
"l": "1.04709",
"o": "1.04761"
},
"complete": true,
"time": "2017-01-03T02:00:00.000000000Z"
},
{
"volume": 394,
"mid": {
"h": "1.04848",
"c": "1.04848",
"l": "1.04715",
"o": "1.04718"
},
"complete": true,
"time": "2017-01-03T03:00:00.000000000Z"
},
{
"volume": 285,
"mid": {
"h": "1.04898",
"c": "1.04884",
"l": "1.04820",
"o": "1.04852"
},
"complete": true,
"time": "2017-01-03T04:00:00.000000000Z"
},
{
"volume": 250,
"mid": {
"h": "1.04902",
"c": "1.04824",
"l": "1.04816",
"o": "1.04886"
},
"complete": true,
"time": "2017-01-03T05:00:00.000000000Z"
},
{
"volume": 368,
"mid": {
"h": "1.04892",
"c": "1.04882",
"l": "1.04813",
"o": "1.04821"
},
"complete": true,
"time": "2017-01-03T06:00:00.000000000Z"
},
{
"volume": 1639,
"mid": {
"h": "1.04888",
"c": "1.04602",
"l": "1.04536",
"o": "1.04885"
},
"complete": true,
"time": "2017-01-03T07:00:00.000000000Z"
},
{
"volume": 2830,
"mid": {
"h": "1.04658",
"c": "1.04353",
"l": "1.04207",
"o": "1.04606"
},
"complete": true,
"time": "2017-01-03T08:00:00.000000000Z"
}
],
"granularity": "H1"
}
###Markdown
Bulk history InstrumentsCandles classIt is likely that you want to retrieve more than 10 records. The OANDA docs say that the default number of recordsis 500, in case you do not specify. You can specify the number of records to retrieve by using *count*, with a maximum of 5000. The *InstrumentsCandles* class enables you to retrieve the records. InstrumentsCandlesFactoryNow if you would like to retrieve a lot of history, you have to make consecutive requests. To make this an easy process the *oandapyV20* library comes with a so called *factory* named *InstrumentsCandlesFactory*.Using this class you can retrieve all history of an instrument from a certain date. The *InstrumentsCandlesFactory* acts as a generator generating *InstrumentCandles* requests until all data is retrieved. The number of requests can be influenced by specifying *count*. Setting *count* to 5000 would generate a tenth of the requests vs. the default of 500.Back to our example: lets make sure we request a lot of data, so we set the *granularity* to *M5* and leave the date at 2017-01-01T00:00:00. The will retrieve all records from that date up to today, because we did not specify the *to* parameter.
###Code
import json
import oandapyV20
import oandapyV20.endpoints.instruments as instruments
from oandapyV20.contrib.factories import InstrumentsCandlesFactory
from exampleauth import exampleauth
accountID, access_token = exampleauth.exampleAuth()
client = oandapyV20.API(access_token=access_token)
instrument = "EUR_USD"
params = {
"from": "2017-01-01T00:00:00Z",
"granularity": "M5",
}
def cnv(r, h):
# get all candles from the response and write them as a record to the filehandle h
for candle in r.get('candles'):
ctime = candle.get('time')[0:19]
try:
rec = "{time},{complete},{o},{h},{l},{c},{v}".format(
time=ctime,
complete=candle['complete'],
o=candle['mid']['o'],
h=candle['mid']['h'],
l=candle['mid']['l'],
c=candle['mid']['c'],
v=candle['volume'],
)
except Exception as e:
print(e, r)
else:
h.write(rec+"\n")
datafile = "/tmp/{}.{}.out".format(instrument, params['granularity'])
with open(datafile, "w") as O:
n = 0
for r in InstrumentsCandlesFactory(instrument=instrument, params=params):
rv = client.request(r)
cnt = len(r.response.get('candles'))
print("REQUEST: {} {} {}, received: {}".format(r, r.__class__.__name__, r.params, cnt))
n += cnt
cnv(r.response, O)
print("Check the datafile: {} under /tmp!, it contains {} records".format(datafile, n))
###Output
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-02T17:40:00Z', 'from': '2017-01-01T00:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-04T11:25:00Z', 'from': '2017-01-02T17:45:00Z', 'granularity': 'M5'}, received: 436
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-06T05:10:00Z', 'from': '2017-01-04T11:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-07T22:55:00Z', 'from': '2017-01-06T05:15:00Z', 'granularity': 'M5'}, received: 200
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-09T16:40:00Z', 'from': '2017-01-07T23:00:00Z', 'granularity': 'M5'}, received: 222
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-11T10:25:00Z', 'from': '2017-01-09T16:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-13T04:10:00Z', 'from': '2017-01-11T10:30:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-14T21:55:00Z', 'from': '2017-01-13T04:15:00Z', 'granularity': 'M5'}, received: 212
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-16T15:40:00Z', 'from': '2017-01-14T22:00:00Z', 'granularity': 'M5'}, received: 211
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-18T09:25:00Z', 'from': '2017-01-16T15:45:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-20T03:10:00Z', 'from': '2017-01-18T09:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-21T20:55:00Z', 'from': '2017-01-20T03:15:00Z', 'granularity': 'M5'}, received: 224
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-23T14:40:00Z', 'from': '2017-01-21T21:00:00Z', 'granularity': 'M5'}, received: 193
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-25T08:25:00Z', 'from': '2017-01-23T14:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-27T02:10:00Z', 'from': '2017-01-25T08:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-28T19:55:00Z', 'from': '2017-01-27T02:15:00Z', 'granularity': 'M5'}, received: 236
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-01-30T13:40:00Z', 'from': '2017-01-28T20:00:00Z', 'granularity': 'M5'}, received: 187
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-01T07:25:00Z', 'from': '2017-01-30T13:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-03T01:10:00Z', 'from': '2017-02-01T07:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-04T18:55:00Z', 'from': '2017-02-03T01:15:00Z', 'granularity': 'M5'}, received: 248
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-06T12:40:00Z', 'from': '2017-02-04T19:00:00Z', 'granularity': 'M5'}, received: 175
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-08T06:25:00Z', 'from': '2017-02-06T12:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-10T00:10:00Z', 'from': '2017-02-08T06:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-11T17:55:00Z', 'from': '2017-02-10T00:15:00Z', 'granularity': 'M5'}, received: 260
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-13T11:40:00Z', 'from': '2017-02-11T18:00:00Z', 'granularity': 'M5'}, received: 163
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-15T05:25:00Z', 'from': '2017-02-13T11:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-16T23:10:00Z', 'from': '2017-02-15T05:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-18T16:55:00Z', 'from': '2017-02-16T23:15:00Z', 'granularity': 'M5'}, received: 272
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-20T10:40:00Z', 'from': '2017-02-18T17:00:00Z', 'granularity': 'M5'}, received: 151
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-22T04:25:00Z', 'from': '2017-02-20T10:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-23T22:10:00Z', 'from': '2017-02-22T04:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-25T15:55:00Z', 'from': '2017-02-23T22:15:00Z', 'granularity': 'M5'}, received: 284
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-02-27T09:40:00Z', 'from': '2017-02-25T16:00:00Z', 'granularity': 'M5'}, received: 139
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-01T03:25:00Z', 'from': '2017-02-27T09:45:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-02T21:10:00Z', 'from': '2017-03-01T03:30:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-04T14:55:00Z', 'from': '2017-03-02T21:15:00Z', 'granularity': 'M5'}, received: 296
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-06T08:40:00Z', 'from': '2017-03-04T15:00:00Z', 'granularity': 'M5'}, received: 127
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-08T02:25:00Z', 'from': '2017-03-06T08:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-09T20:10:00Z', 'from': '2017-03-08T02:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-11T13:55:00Z', 'from': '2017-03-09T20:15:00Z', 'granularity': 'M5'}, received: 308
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-13T07:40:00Z', 'from': '2017-03-11T14:00:00Z', 'granularity': 'M5'}, received: 126
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-15T01:25:00Z', 'from': '2017-03-13T07:45:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-16T19:10:00Z', 'from': '2017-03-15T01:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-18T12:55:00Z', 'from': '2017-03-16T19:15:00Z', 'granularity': 'M5'}, received: 308
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-20T06:40:00Z', 'from': '2017-03-18T13:00:00Z', 'granularity': 'M5'}, received: 115
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-22T00:25:00Z', 'from': '2017-03-20T06:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-23T18:10:00Z', 'from': '2017-03-22T00:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-25T11:55:00Z', 'from': '2017-03-23T18:15:00Z', 'granularity': 'M5'}, received: 319
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-27T05:40:00Z', 'from': '2017-03-25T12:00:00Z', 'granularity': 'M5'}, received: 103
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-28T23:25:00Z', 'from': '2017-03-27T05:45:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-03-30T17:10:00Z', 'from': '2017-03-28T23:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-01T10:55:00Z', 'from': '2017-03-30T17:15:00Z', 'granularity': 'M5'}, received: 331
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-03T04:40:00Z', 'from': '2017-04-01T11:00:00Z', 'granularity': 'M5'}, received: 91
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-04T22:25:00Z', 'from': '2017-04-03T04:45:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-06T16:10:00Z', 'from': '2017-04-04T22:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-08T09:55:00Z', 'from': '2017-04-06T16:15:00Z', 'granularity': 'M5'}, received: 343
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-10T03:40:00Z', 'from': '2017-04-08T10:00:00Z', 'granularity': 'M5'}, received: 79
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-11T21:25:00Z', 'from': '2017-04-10T03:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-13T15:10:00Z', 'from': '2017-04-11T21:30:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-15T08:55:00Z', 'from': '2017-04-13T15:15:00Z', 'granularity': 'M5'}, received: 352
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-17T02:40:00Z', 'from': '2017-04-15T09:00:00Z', 'granularity': 'M5'}, received: 67
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-18T20:25:00Z', 'from': '2017-04-17T02:45:00Z', 'granularity': 'M5'}, received: 496
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-20T14:10:00Z', 'from': '2017-04-18T20:30:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-22T07:55:00Z', 'from': '2017-04-20T14:15:00Z', 'granularity': 'M5'}, received: 366
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-24T01:40:00Z', 'from': '2017-04-22T08:00:00Z', 'granularity': 'M5'}, received: 55
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-25T19:25:00Z', 'from': '2017-04-24T01:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-27T13:10:00Z', 'from': '2017-04-25T19:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-04-29T06:55:00Z', 'from': '2017-04-27T13:15:00Z', 'granularity': 'M5'}, received: 379
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-01T00:40:00Z', 'from': '2017-04-29T07:00:00Z', 'granularity': 'M5'}, received: 43
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-02T18:25:00Z', 'from': '2017-05-01T00:45:00Z', 'granularity': 'M5'}, received: 497
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-04T12:10:00Z', 'from': '2017-05-02T18:30:00Z', 'granularity': 'M5'}, received: 496
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-06T05:55:00Z', 'from': '2017-05-04T12:15:00Z', 'granularity': 'M5'}, received: 392
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-07T23:40:00Z', 'from': '2017-05-06T06:00:00Z', 'granularity': 'M5'}, received: 31
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-09T17:25:00Z', 'from': '2017-05-07T23:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-11T11:10:00Z', 'from': '2017-05-09T17:30:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-13T04:55:00Z', 'from': '2017-05-11T11:15:00Z', 'granularity': 'M5'}, received: 402
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-14T22:40:00Z', 'from': '2017-05-13T05:00:00Z', 'granularity': 'M5'}, received: 19
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-16T16:25:00Z', 'from': '2017-05-14T22:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-18T10:10:00Z', 'from': '2017-05-16T16:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-20T03:55:00Z', 'from': '2017-05-18T10:15:00Z', 'granularity': 'M5'}, received: 416
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-21T21:40:00Z', 'from': '2017-05-20T04:00:00Z', 'granularity': 'M5'}, received: 7
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-23T15:25:00Z', 'from': '2017-05-21T21:45:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-25T09:10:00Z', 'from': '2017-05-23T15:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-27T02:55:00Z', 'from': '2017-05-25T09:15:00Z', 'granularity': 'M5'}, received: 428
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-28T20:40:00Z', 'from': '2017-05-27T03:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-05-30T14:25:00Z', 'from': '2017-05-28T20:45:00Z', 'granularity': 'M5'}, received: 491
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-01T08:10:00Z', 'from': '2017-05-30T14:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-03T01:55:00Z', 'from': '2017-06-01T08:15:00Z', 'granularity': 'M5'}, received: 440
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-04T19:40:00Z', 'from': '2017-06-03T02:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-06T13:25:00Z', 'from': '2017-06-04T19:45:00Z', 'granularity': 'M5'}, received: 483
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-08T07:10:00Z', 'from': '2017-06-06T13:30:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-10T00:55:00Z', 'from': '2017-06-08T07:15:00Z', 'granularity': 'M5'}, received: 452
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-11T18:40:00Z', 'from': '2017-06-10T01:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-13T12:25:00Z', 'from': '2017-06-11T18:45:00Z', 'granularity': 'M5'}, received: 471
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-15T06:10:00Z', 'from': '2017-06-13T12:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-16T23:55:00Z', 'from': '2017-06-15T06:15:00Z', 'granularity': 'M5'}, received: 464
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-18T17:40:00Z', 'from': '2017-06-17T00:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-20T11:25:00Z', 'from': '2017-06-18T17:45:00Z', 'granularity': 'M5'}, received: 458
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-22T05:10:00Z', 'from': '2017-06-20T11:30:00Z', 'granularity': 'M5'}, received: 495
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-23T22:55:00Z', 'from': '2017-06-22T05:15:00Z', 'granularity': 'M5'}, received: 474
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-25T16:40:00Z', 'from': '2017-06-23T23:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-27T10:25:00Z', 'from': '2017-06-25T16:45:00Z', 'granularity': 'M5'}, received: 444
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-29T04:10:00Z', 'from': '2017-06-27T10:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-06-30T21:55:00Z', 'from': '2017-06-29T04:15:00Z', 'granularity': 'M5'}, received: 488
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-02T15:40:00Z', 'from': '2017-06-30T22:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-04T09:25:00Z', 'from': '2017-07-02T15:45:00Z', 'granularity': 'M5'}, received: 433
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-06T03:10:00Z', 'from': '2017-07-04T09:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-07T20:55:00Z', 'from': '2017-07-06T03:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-09T14:40:00Z', 'from': '2017-07-07T21:00:00Z', 'granularity': 'M5'}, received: 0
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-11T08:25:00Z', 'from': '2017-07-09T14:45:00Z', 'granularity': 'M5'}, received: 422
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-13T02:10:00Z', 'from': '2017-07-11T08:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-14T19:55:00Z', 'from': '2017-07-13T02:15:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-16T13:40:00Z', 'from': '2017-07-14T20:00:00Z', 'granularity': 'M5'}, received: 11
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-18T07:25:00Z', 'from': '2017-07-16T13:45:00Z', 'granularity': 'M5'}, received: 412
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-20T01:10:00Z', 'from': '2017-07-18T07:30:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-21T18:55:00Z', 'from': '2017-07-20T01:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-23T12:40:00Z', 'from': '2017-07-21T19:00:00Z', 'granularity': 'M5'}, received: 23
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-25T06:25:00Z', 'from': '2017-07-23T12:45:00Z', 'granularity': 'M5'}, received: 400
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-27T00:10:00Z', 'from': '2017-07-25T06:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-28T17:55:00Z', 'from': '2017-07-27T00:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-07-30T11:40:00Z', 'from': '2017-07-28T18:00:00Z', 'granularity': 'M5'}, received: 35
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-01T05:25:00Z', 'from': '2017-07-30T11:45:00Z', 'granularity': 'M5'}, received: 388
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-02T23:10:00Z', 'from': '2017-08-01T05:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-04T16:55:00Z', 'from': '2017-08-02T23:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-06T10:40:00Z', 'from': '2017-08-04T17:00:00Z', 'granularity': 'M5'}, received: 47
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-08T04:25:00Z', 'from': '2017-08-06T10:45:00Z', 'granularity': 'M5'}, received: 376
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-09T22:10:00Z', 'from': '2017-08-08T04:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-11T15:55:00Z', 'from': '2017-08-09T22:15:00Z', 'granularity': 'M5'}, received: 498
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-13T09:40:00Z', 'from': '2017-08-11T16:00:00Z', 'granularity': 'M5'}, received: 59
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-15T03:25:00Z', 'from': '2017-08-13T09:45:00Z', 'granularity': 'M5'}, received: 364
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-16T21:10:00Z', 'from': '2017-08-15T03:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-18T14:55:00Z', 'from': '2017-08-16T21:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-20T08:40:00Z', 'from': '2017-08-18T15:00:00Z', 'granularity': 'M5'}, received: 71
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-22T02:25:00Z', 'from': '2017-08-20T08:45:00Z', 'granularity': 'M5'}, received: 352
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-23T20:10:00Z', 'from': '2017-08-22T02:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-25T13:55:00Z', 'from': '2017-08-23T20:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-27T07:40:00Z', 'from': '2017-08-25T14:00:00Z', 'granularity': 'M5'}, received: 83
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-29T01:25:00Z', 'from': '2017-08-27T07:45:00Z', 'granularity': 'M5'}, received: 340
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-08-30T19:10:00Z', 'from': '2017-08-29T01:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-01T12:55:00Z', 'from': '2017-08-30T19:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-03T06:40:00Z', 'from': '2017-09-01T13:00:00Z', 'granularity': 'M5'}, received: 95
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-05T00:25:00Z', 'from': '2017-09-03T06:45:00Z', 'granularity': 'M5'}, received: 325
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-06T18:10:00Z', 'from': '2017-09-05T00:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-08T11:55:00Z', 'from': '2017-09-06T18:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-10T05:40:00Z', 'from': '2017-09-08T12:00:00Z', 'granularity': 'M5'}, received: 107
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-11T23:25:00Z', 'from': '2017-09-10T05:45:00Z', 'granularity': 'M5'}, received: 315
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-13T17:10:00Z', 'from': '2017-09-11T23:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-15T10:55:00Z', 'from': '2017-09-13T17:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-17T04:40:00Z', 'from': '2017-09-15T11:00:00Z', 'granularity': 'M5'}, received: 119
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-18T22:25:00Z', 'from': '2017-09-17T04:45:00Z', 'granularity': 'M5'}, received: 303
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-20T16:10:00Z', 'from': '2017-09-18T22:30:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-22T09:55:00Z', 'from': '2017-09-20T16:15:00Z', 'granularity': 'M5'}, received: 499
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-24T03:40:00Z', 'from': '2017-09-22T10:00:00Z', 'granularity': 'M5'}, received: 131
REQUEST: v3/instruments/EUR_USD/candles InstrumentsCandles {'to': '2017-09-24T17:54:30Z', 'from': '2017-09-24T03:45:00Z', 'granularity': 'M5'}, received: 0
Check the datafile: /tmp/EUR_USD.M5.out under /tmp!, it contains 54090 records
|
Tranformer_ourdataset.ipynb | ###Markdown
###Code
import IPython.display as ipd
# % pylab inline
import os
import pandas as pd
import librosa
import glob
import librosa.display
import random
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import Adam
from keras.utils import np_utils
from sklearn import metrics
from sklearn.datasets import make_regression
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
from sklearn.model_selection import train_test_split, GridSearchCV
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
from keras.callbacks import EarlyStopping
from keras import regularizers
from sklearn.preprocessing import LabelEncoder
from datetime import datetime
import os
import numpy
from keras.models import Sequential
from keras.layers import LSTM
from keras.datasets import imdb
from keras.layers import Dense
import tensorflow as tf
import numpy as np
import pandas as pd
import os
import librosa
import matplotlib.pyplot as plt
import gc
from tqdm import tqdm, tqdm_notebook
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
import zipfile
tqdm.pandas()
from google.colab import drive
drive.mount('/content/gdrive')
Directory = 'gdrive/MyDrive/Voice Inputs'
Dataset = os.listdir(Directory)
audio_list = []
speakers = []
for speaker in Dataset:
audios = os.listdir(Directory+'/'+speaker)
for audio in audios:
if(audio.endswith('.wav')):
audio_list.append(Directory+'/'+speaker+'/'+audio)
speakers.append(audio.split('_')[0])
audio_list = pd.DataFrame(audio_list)
audio_list = audio_list.rename(columns={0:'file'})
#len(audio_list)
len(speakers)
audio_list['speaker'] = speakers
df = audio_list.sample(frac=1, random_state=42).reset_index(drop=True)
df = df[:12000]
df_train = df[:8000] #19984:
df_validation = df[8000:11000] #19984:25694
df_test = df[11000:12000] #25694:
labels = df['speaker']
Counter = 1
df
def scaled_dot_product_attention(query, key, value, mask): #Query and Key are . to produce dynamically attended weights
matmul_qk = tf.matmul(query, key, transpose_b=True)
#Scaled is just a normalisation of the DPA to get the values [0,1]
depth = tf.cast(tf.shape(key)[-1], tf.float32)
logits = matmul_qk / tf.math.sqrt(depth) #To avoid a large magnitude of numbers with large dimensions
# add the mask zero out padding tokens.
if mask is not None:
logits += (mask * -1e9)
attention_weights = tf.nn.softmax(logits, axis=-1)
return tf.matmul(attention_weights, value)# mul with value to get context vector
class MultiHeadAttention(tf.keras.layers.Layer):
#Like you have several kernels in CNN, we can have several self attended layers in a transformer model
def __init__(self, d_model, num_heads, name="multi_head_attention"):
super(MultiHeadAttention, self).__init__(name=name)
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.query_dense = tf.keras.layers.Dense(units=d_model)
self.key_dense = tf.keras.layers.Dense(units=d_model)
self.value_dense = tf.keras.layers.Dense(units=d_model)
self.dense = tf.keras.layers.Dense(units=d_model)
def split_heads(self, inputs, batch_size):
inputs = tf.reshape(
inputs, shape=(batch_size, -1, self.num_heads, self.depth))
return tf.transpose(inputs, perm=[0, 2, 1, 3])
def call(self, inputs):
query, key, value, mask = inputs['query'], inputs['key'], inputs[
'value'], inputs['mask']
batch_size = tf.shape(query)[0]
# linear layers
query = self.query_dense(query)
key = self.key_dense(key)
value = self.value_dense(value)
# split heads
query = self.split_heads(query, batch_size)
key = self.split_heads(key, batch_size)
value = self.split_heads(value, batch_size)
scaled_attention = scaled_dot_product_attention(query, key, value, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])
#Concat each of the encodings and reduce their dimension to keep a track of less/more important features
#Like a projection layer
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model))
outputs = self.dense(concat_attention)
return outputs
class PositionalEncoding(tf.keras.layers.Layer):
#How do we keep a track of the sequence -> dogs chase cat, cat chase dog
def __init__(self, position, d_model):
super(PositionalEncoding, self).__init__()
self.pos_encoding = self.positional_encoding(position, d_model)
def get_angles(self, position, i, d_model):
angles = 1 / tf.pow(10000, (2 * (i // 2)) / tf.cast(d_model, tf.float32))
return position * angles
def positional_encoding(self, position, d_model):
angle_rads = self.get_angles(
position=tf.range(position, dtype=tf.float32)[:, tf.newaxis],
i=tf.range(d_model, dtype=tf.float32)[tf.newaxis, :],
d_model=d_model)
#Positional encodings typically encode the position of the tokens and they can be summed with the same dimensional token
#Effectively injecting the posiitonal encodings
# apply sin to even index in the array
sines = tf.math.sin(angle_rads[:, 0::2])
# apply cos to odd index in the array
cosines = tf.math.cos(angle_rads[:, 1::2])
pos_encoding = tf.concat([sines, cosines], axis=-1)
pos_encoding = pos_encoding[tf.newaxis, ...]
return tf.cast(pos_encoding, tf.float32)
def call(self, inputs):
return inputs + self.pos_encoding[:, :tf.shape(inputs)[1], :]
# This allows to the transformer to know where there is real data and where it is padded
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def encoder_layer(units, d_model, num_heads, dropout,name="encoder_layer"):
inputs = tf.keras.Input(shape=(None,d_model ), name="inputs")
padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask")
attention = MultiHeadAttention(
d_model, num_heads, name="attention")({
'query': inputs,
'key': inputs,
'value': inputs,
'mask': padding_mask
})
attention = tf.keras.layers.Dropout(rate=dropout)(attention)
attention = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(inputs + attention)
outputs = tf.keras.layers.Dense(units=units, activation='relu')(attention)
outputs = tf.keras.layers.Dense(units=d_model)(outputs)
outputs = tf.keras.layers.Dropout(rate=dropout)(outputs)
outputs = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(attention + outputs)
return tf.keras.Model(
inputs=[inputs, padding_mask], outputs=outputs, name=name)
def encoder(time_steps,
num_layers,
units,
d_model,
num_heads,
dropout,
projection,
name="encoder"):
inputs = tf.keras.Input(shape=(None,d_model), name="inputs")
padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask")
if projection=='linear':
## We implement a linear projection based on Very Deep Self-Attention Networks for End-to-End Speech Recognition. Retrieved from https://arxiv.org/abs/1904.13377
projection=tf.keras.layers.Dense( d_model,use_bias=True, activation='linear')(inputs)
print('linear')
else:
projection=tf.identity(inputs)
print('none')
projection *= tf.math.sqrt(tf.cast(d_model, tf.float32))
projection = PositionalEncoding(time_steps, d_model)(projection)
outputs = tf.keras.layers.Dropout(rate=dropout)(projection)
for i in range(num_layers):
outputs = encoder_layer(
units=units,
d_model=d_model,
num_heads=num_heads,
dropout=dropout,
name="encoder_layer_{}".format(i),
)([outputs, padding_mask])
return tf.keras.Model(
inputs=[inputs, padding_mask], outputs=outputs, name=name)
def transformer(time_steps,
num_layers,
units,
d_model,
num_heads,
dropout,
output_size,
projection,
name="transformer"):
inputs = tf.keras.Input(shape=(None,d_model), name="inputs")
enc_padding_mask = tf.keras.layers.Lambda(
create_padding_mask, output_shape=(1, 1, None),
name='enc_padding_mask')(tf.dtypes.cast(
#Like our input has a dimension of length X d_model but the masking is applied to a vector
# We get the sum for each row and result is a vector. So, if result is 0 it is because in that position was masked
tf.math.reduce_sum(
inputs,
axis=2,
keepdims=False,
name=None
), tf.int32))
enc_outputs = encoder(
time_steps=time_steps,
num_layers=num_layers,
units=units,
d_model=d_model,
num_heads=num_heads,
dropout=dropout,
projection=projection,
name='encoder'
)(inputs=[inputs, enc_padding_mask])
#We reshape for feeding our FC in the next step
outputs=tf.reshape(enc_outputs,(-1,time_steps*d_model))
#We predict our class
outputs = tf.keras.layers.Dense(units=output_size,use_bias=True,activation='softmax', name="outputs")(outputs)
return tf.keras.Model(inputs=[inputs], outputs=outputs, name='audio_class')
def extract_features(files):
# Sets the name to be the path to where the file is in my computer
file_name = os.path.join(str(files.file))
global Counter
if(Counter%10==0):
print(Counter)
Counter+=1
# Loads the audio file as a floating point time series and assigns the default sample rate
# Sample rate is set to 22050 by default
X, sample_rate = librosa.load(file_name, res_type='kaiser_fast')
# Generate Mel-frequency cepstral coefficients (MFCCs) from a time series
#mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T,axis=0)
# Generates a Short-time Fourier transform (STFT) to use in the chroma_stft
#stft = np.abs(librosa.stft(X))
# Computes a chromagram from a waveform or power spectrogram.
#chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
# Computes a mel-scaled spectrogram.
mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
# Computes spectral contrast
#contrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T,axis=0)
# Computes the tonal centroid features (tonnetz)
#tonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X),
#sr=sample_rate).T,axis=0)
# We add also the classes of each file as a label at the end
#label = files.label
return mel
startTime = datetime.now()
# Applying the function to the train data by accessing each row of the dataframe
features_label = df.apply(extract_features, axis=1)
print(datetime.now() - startTime)
# Saving the numpy array because it takes a long time to extract the features
np.save('features_label', features_label)
# loading the features
features_label = np.load('features_label.npy', allow_pickle=True)
features_label.shape
trial_features=[]
for i in range(0,len(features_label)):
a=[]
a.append(features_label[i])
#a.append(features_label[i][1])
trial_features.append(a)
xxx = np.array(trial_features)
xxx.shape
X = xxx
y = np.array(labels)
lb = LabelEncoder()
y = to_categorical(lb.fit_transform(y))
X.shape
y.shape
limit_1 = int(X.shape[0]*0.5)
limit_2 = int(X.shape[0]*0.85)
X_train = X[:limit_1]
Y_train = y[:limit_1]
X_val = X[limit_1:limit_2]
Y_val = y[limit_1:limit_2]
X_test = X[limit_2:]
Y_test = y[limit_2:]
# #We get our train and test set
# X_train,X_test, Y_train, Y_test =train_test_split(X,y, test_size=0.2, random_state=27)
projection=['linear','none']
accuracy=[]
proj_implemented=[]
for i in projection:
NUM_LAYERS = 2
D_MODEL = X.shape[2]
NUM_HEADS = 4
UNITS = 1024
DROPOUT = 0.1
TIME_STEPS= X.shape[1]
OUTPUT_SIZE=4
EPOCHS = 100
EXPERIMENTS=1
for j in range(EXPERIMENTS):
model = transformer(time_steps=TIME_STEPS,
num_layers=NUM_LAYERS,
units=UNITS,
d_model=D_MODEL,
num_heads=NUM_HEADS,
dropout=DROPOUT,
output_size=OUTPUT_SIZE,
projection=i)
#model.compile(optimizer=tf.keras.optimizers.Adam(0.000001), loss='categorical_crossentropy', metrics=['accuracy'])
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=100, verbose=1, mode='auto')
#history=model.fit(X_train,Y_train, epochs=EPOCHS, validation_data=(X_test, Y_test))
history = model.fit(X_train, Y_train, batch_size=64, epochs=100, validation_data=(X_val, Y_val),callbacks=[early_stop])
accuracy.append(sum(history.history['val_accuracy'])/len(history.history['val_accuracy']))
proj_implemented.append(i)
# Check out our train accuracy and validation accuracy over epochs.
train_accuracy = history.history['accuracy']
val_accuracy = history.history['val_accuracy']
import matplotlib.pyplot as plt
# Set figure size.
plt.figure(figsize=(12, 8))
# Generate line plot of training, testing loss over epochs.
plt.plot(train_accuracy, label='Training Accuracy', color='#185fad')
plt.plot(val_accuracy, label='Validation Accuracy', color='orange')
# Set title
plt.title('Training and Validation Accuracy by Epoch', fontsize = 25)
plt.xlabel('Epoch', fontsize = 18)
plt.ylabel('Categorical Crossentropy', fontsize = 18)
plt.xticks(range(0,100,5), range(0,100,5))
plt.legend(fontsize = 18);
accuracy=pd.DataFrame(accuracy, columns=['accuracy'])
proj_implemented=pd.DataFrame(proj_implemented, columns=['projection'])
results=pd.concat([accuracy,proj_implemented],axis=1)
results.groupby('projection').mean()
import keras
y_prob = model.predict(X_test)
y_classes = y_prob.argmax(axis=-1)
res_list = y_classes.tolist()
label_mapping = {0:'Aayush',1:'Kanishk',2:'Kayan',3:'Rohit'}#clarify
for i in range(len(res_list)):
print("prediction ",i," ",label_mapping[res_list[i]])
model.evaluate(X_test,Y_test)
###Output
2/2 [==============================] - 0s 7ms/step - loss: 0.3979 - accuracy: 0.9459
|
Machine-Learning-2021/project9/Assignment-9-solution.ipynb | ###Markdown
Mustererkennung/Machine Learning - Assignment 9
###Code
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import accuracy_score
%matplotlib inline
###Output
_____no_output_____
###Markdown
Excercise Solution: Multilayer-Perceptron (MLP) with Backpropogation Splitting the data into training/test and according to their class memberships
###Code
training_data = np.array(pd.read_csv('zip.train', sep=' ', header=None),dtype=np.float128)
test_data = np.array(pd.read_csv('zip.test', sep =' ',header=None),dtype=np.float128)
X_train, y_train = training_data[:,1:-1], training_data[:,0]
X_test, y_test = test_data[:,1:], test_data[:,0]
def show_numbers(X):
num_samples = 90
indices = np.random.choice(range(len(X)), num_samples)
sample_digits = X[indices]
fig = plt.figure(figsize=(20, 6))
for i in range(num_samples):
ax = plt.subplot(6, 15, i + 1)
img = 255 - sample_digits[i].reshape((16, 16))
plt.imshow(img, cmap='gray')
plt.axis('off')
show_numbers(X_train)
#y_labels in form of vector for digits for multiclass
enc = OneHotEncoder()
# 0 -> (1, 0, 0, 0), 1 -> (0, 1, 0, 0), 2 -> (0, 0, 1, 0), 3 -> (0, 0, 0, 1)
y_OH_train = enc.fit_transform(np.expand_dims(y_train,1)).toarray()
y_OH_test = enc.fit_transform(np.expand_dims(y_test,1)).toarray()
#print( X_train)
#Data Normalization/standardization
scaler = StandardScaler()
scaler.fit(X_train)
X_train=scaler.transform(X_train)
X_test=scaler.transform(X_test)
class Sigmoid:
def activation(z):
return 1 / (1 + np.exp(-z))
def gradient(z):
return Sigmoid.activation(z) * (1 - Sigmoid.activation(z))
class Relu:
def activation(z):
z[z < 0] = 0
return z
def gradient(x):
x[x<=0] = 0
x[x>0] = 1
return x
class softmax:
def activation(Z):
expZ = np.exp(Z - np.max(Z))
return expZ / expZ.sum(axis=0, keepdims=True)
class Multilayer_perceptron:
def __init__(self, total_layer=2,dimensions=None, activations=None, learning_rate=0.1):
"""
parameters
1. total_layer: no of layers including input layer, hidden layers and output layer
2. dimensions: Dimensions of the neural net. (no of input, no of nodes in hidden layer, no of neuron in output)
3. activations:Activations functions for each layer.
4. learning_rate: learning
"""
self.n_layers = total_layer
self.loss = None
self.learning_rate = learning_rate
self.sizes=dimensions
# Weights and biases are initiated by index. For a one hidden layer net you will have a w[1] and w[2]
self.w = {}
self.b = {}
# Activations are also initiated by index. For the example we will have activations[2] and activations[3]
self.activations = {}
for i in range(len(dimensions) - 1):
#self.w[i + 1] = np.ones((dimensions[i+1], dimensions[i]))
limit = 1 / np.sqrt(dimensions[i])
self.w[i + 1] = np.random.uniform(-limit, limit,(dimensions[i+1], dimensions[i]) )
self.b[i + 1] = np.zeros((dimensions[i + 1],1))
self.activations[i + 2] = activations[i]
def _feed_forward(self, x):
"""
Execute a forward feed through the network.
x:input data vectors.
return: Node outputs and activations per layer.
The numbering of the output is equivalent to the layer numbers.
"""
# w(x) + b
z = {}
# activations: f(z)
a = {1: x.T} # First layer has no activations as input. The input x is the input.
for i in range(1, self.n_layers):
# current layer = i
# activation layer = i + 1
#print(np.dot( self.w[i],a[i]))
z[i + 1] = np.dot( self.w[i],a[i]) + self.b[i]
#print(z[i+1])
a[i + 1] = self.activations[i + 1].activation(z[i + 1])
#print(a[i+1])
#print(z,a)
return z, a
#backpropogation function
def back_propogation(self, x, y):
self.Z,self.A=self._feed_forward(x)
self.dW = {}
self.dB = {}
self.dZ = {}
self.dA = {}
L = self.n_layers
#print(self.A[L],self.A[L].shape)
#print(y,y.shape)
#gradient of error in respect of z and z=wx+b
self.dZ[L] = (self.A[L] - y.T)
#print(self.A[L],y,self.dZ[L])
for k in range(L, 1, -1):
#previous value in chain rule
delta=np.dot(self.w[k-1].T,self.dZ[k])
#print('iter',k)
#self.A[k-1]=np.array(self.A[k-1]).reshape(self.A[k-1].shape[0],1)
self.dW[k-1] = np.dot(self.dZ[k],self.A[k-1].T)*(1/self.total_samples)
#print('dw',self.dW[k-1])
self.dB[k-1] = np.sum(self.dZ[k], axis=1, keepdims=True)*(1/self.total_samples)
#print('dB',self.dB[k-1])
# assuming activation function start from in between 1st layer and 2nd layer so a1=X, a2=activation , a3=activation, an=output activation
if(k>2):
self.dZ[k-1] = delta*self.activations[k-1].gradient(self.A[k-1])
def fit(self, X, Y, epochs=100, display_loss=True,test_data=None):
if display_loss:
loss = {}
train_acc={}
test_acc={}
self.total_samples = X.shape[0]
for epoch in range(epochs):
dW = {}
dB = {}
for i in range(self.n_layers - 1):
dW[i+1] = np.zeros((self.sizes[i+1],self.sizes[i]))
dB[i+1] = np.zeros((self.sizes[i+1],1))
#for x, y in zip(X, Y):
X,Y=self.shuffle_data(X, Y)
self.back_propogation(X, Y)
for i in range(self.n_layers-1):
dW[i+1] += self.dW[i+1]
dB[i+1] += self.dB[i+1]
#print(total_samples)
for i in range(self.n_layers-1):
#self.b[i+1]=self.b[i+1].reshape(1,-1)
#print(dB[i+1].shape)
self.w[i+1] -= self.learning_rate * dW[i+1]
self.b[i+1] -= self.learning_rate * dB[i+1]
if display_loss:
#Y_pred = self.predict(X)
loss[epoch] = self.cross_entropy(Y)
train_acc[epoch]=self.accuracy(X,Y)
test_acc[epoch]=self.accuracy(test_data[0],test_data[1])
if epoch%500==0:
print("epoch",epoch,'loss:',loss[epoch],' Training Accuracy:',train_acc[epoch]*100,' Test Accuracy:',test_acc[epoch]*100)
#loss plot
if display_loss:
self.plot_loss_acc([loss,(train_acc,test_acc)])
#prediction method
def predict(self, X):
lin_sum,output = self._feed_forward(X)
return output[self.n_layers]
#cross entropy calculation function
def cross_entropy(self,Y):
return -np.mean(Y * np.log(self.A[self.n_layers].T))
def accuracy(self,X,Y):
Y_pred_test = self.predict(X)
Y_pred_test = np.argmax(Y_pred_test.T,1)
Y = np.argmax(Y, axis=1)
return accuracy_score(Y_pred_test, Y)
def plot_loss_acc(self,data):
fig, axs = plt.subplots(1,2,figsize=(15, 10))
y_label=['loss','Training Accuracy','Test Accuracy']
for kk, (ax,yy) in enumerate(zip(axs.reshape(-1),data)):
if(kk==1):
ax.plot(yy[0].values(),label=y_label[kk])
ax.plot(yy[1].values(), label=y_label[kk+1])
ax.legend(loc='upper left')
ax.set(xlabel='Epochs', ylabel='accuracy')
else:
ax.plot(yy.values())
ax.set(xlabel='Epochs', ylabel=y_label[kk])
#fig.delaxes(axs[1][1])
plt.show()
def shuffle_data(self,X, y, seed=None):
""" Random shuffle of the samples in X and y """
if seed:
np.random.seed(seed)
idx = np.arange(X.shape[0])
np.random.shuffle(idx)
return X[idx], y[idx]
###Output
_____no_output_____
###Markdown
(a) Optimize width (the number of neurons in a hidden layer; it is usually the same for all of them) and depth of the network. Try to find a setting that trains in a reasonable time. Plot the loss.Answer: We tried with different architecture of neural network like number of layers , and number of neurons in each layer, however we found best results with 1 layer having 16 neurons with sigmoid activation function(hidden layer) and softmax activation function(output layer). Neural Network Architecture 1: Hidden Layer=1, 16 neurons, Sigmoid (hidden layer), Softmax(output layer)
###Code
#dimenstions=(nodes_inputlayer,nodes_hidden_layer1,nodes_hidden_layer2, ....., nodes_hidden_layern,nodes_output_layer)
dimenstions=(X_train.shape[1], 16, len(np.unique(y_train)))
#activations=(activation_function_of_hidden_layer1,activation_function_of_hidden_layer2, ...., activation_function_of_output_layer )
activations_funct_list= ( Sigmoid, softmax)
mlp = Multilayer_perceptron(total_layer=len(dimenstions),dimensions=dimenstions, activations= activations_funct_list,learning_rate=0.1)
mlp.fit( X_train,y_OH_train, epochs=10000, display_loss=True,test_data=(X_test,y_OH_test))
import sklearn
Y_pred_train = mlp.predict(X_train)
print(Y_pred_train.T.shape)
Y_pred_train = np.argmax(Y_pred_train.T,1)
Y_pred_test = mlp.predict(X_test)
Y_pred_test = np.argmax(Y_pred_test.T,1)
print(Y_pred_train.shape,y_train.shape)
accuracy_train = accuracy_score(Y_pred_train, y_train)
accuracy_test = accuracy_score(Y_pred_test, y_test)
print("Training accuracy", round(accuracy_train, 2))
print("Test Dataset accuracy", round(accuracy_test, 2))
print(sklearn.metrics.classification_report(y_test, Y_pred_test))
###Output
(7291, 10)
(7291,) (7291,)
Training accuracy 0.94
Test Dataset accuracy 0.89
precision recall f1-score support
0.0 0.93 0.97 0.95 359
1.0 0.97 0.94 0.96 264
2.0 0.84 0.85 0.85 198
3.0 0.86 0.88 0.87 166
4.0 0.79 0.86 0.82 200
5.0 0.86 0.80 0.83 160
6.0 0.91 0.89 0.90 170
7.0 0.88 0.89 0.89 147
8.0 0.89 0.79 0.84 166
9.0 0.88 0.89 0.89 177
accuracy 0.89 2007
macro avg 0.88 0.88 0.88 2007
weighted avg 0.89 0.89 0.89 2007
###Markdown
Neural Network Architecture 2: Hidden Layer=2, neurons in each hidden layer=16, Sigmoid Activation (hidden layer), Softmax(output layer)
###Code
#dimenstions=(nodes_inputlayer,nodes_hidden_layer1,nodes_hidden_layer2, ....., nodes_hidden_layern,nodes_output_layer)
dimenstions=(X_train.shape[1], 16,16, len(np.unique(y_train)))
#activations=(activation_function_of_hidden_layer1,activation_function_of_hidden_layer2, ...., activation_function_of_output_layer )
activations_funct_list= ( Sigmoid, Sigmoid,softmax)
mlp1 = Multilayer_perceptron(total_layer=len(dimenstions),dimensions=dimenstions, activations= activations_funct_list,learning_rate=0.3)
mlp1.fit( X_train,y_OH_train, epochs=10000, display_loss=True,test_data=(X_test,y_OH_test))
###Output
epoch 0 loss: 0.23234757232659572 Training Accuracy: 8.846523110684405 Test Accuracy: 7.324364723467862
epoch 500 loss: 0.14044484565345697 Training Accuracy: 54.99931422301467 Test Accuracy: 51.569506726457405
###Markdown
(b) Show some digits that are classified incorrectly.
###Code
Incorrectly_classfied_images=X_test[Y_pred_test!=y_test]
Incorrectly_classfied_images=scaler.inverse_transform(Incorrectly_classfied_images)
print('Incorrectly classfied Digits')
show_numbers(Incorrectly_classfied_images)
###Output
Incorrectly classfied Digits
###Markdown
(c) Plot your first weight layer as a grayscale image.To scale the weights in range of grayscale image pixel(0,255)m=(mโrmin)/(rmaxโrmin)*(tmaxโtmin)+tminwill scale m linearly into [tmax, tmin] as desired. so here tmax=255, tmin=0 ,rmin denote the minimum of the range of your weights, rmax denote the maximum of the range of your weights
###Code
def show_numbers(X):
num_samples = X.shape[0]
print(num_samples)
indices = np.random.choice(range(len(X)), num_samples)
sample_digits = X[indices]
fig = plt.figure(figsize=(20, 6))
nu_images=np.round(num_samples/2)
for i in range(num_samples):
ax = plt.subplot(2,nu_images, i + 1)
img = 255 - sample_digits[i].reshape((16, 16))
plt.imshow(img, cmap='gray')
plt.axis('off')
#rescalling values into the grayscale range 0 to 255
min_value=np.min(mlp.w[1])
max_value=np.max(mlp.w[1])
scaled_value=(mlp.w[1]-min_value)/(max_value-min_value)*255
#plt.imshow(scaled_value.T, cmap="gray")
#plt.show()
show_numbers(scaled_value)
###Output
16
|
Chapter7b.ipynb | ###Markdown
Seq2Seq Translation with Attention
###Code
from fastai.text.all import *
from utils import *
path = Config.config_path/"giga-fren"
df = pd.read_csv(path/"questions_easy.csv")
df["en"] = df["en"].apply(lambda x: x.lower())
df["fr"] = df["fr"].apply(lambda x: x.lower())
df.tail()
sl = 30
dls = DataBlock(
blocks=(TextBlock.from_df("fr", tok=SpacyTokenizer("fr"), seq_len=sl),
TextBlock.from_df("en", tok=SpacyTokenizer("en"), seq_len=sl)),
get_x=ColReader("text"), get_y=ColReader("text"),
splitter=RandomSplitter(0.1)
).dataloaders(df, bs=64, num_workers=os.cpu_count(), seq_len=sl)
dls.show_batch(max_n=3)
m = np.array([len(st.split(" ")) for st in df.to_numpy()[:, 0]])
np.where(m > 50)
model_path = Config.config_path/"models"
emb_enc = torch.load(model_path/"fr_emb.pth")
emb_dec = torch.load(model_path/"en_emb.pth")
emb_dec
def seq2seq_loss(out, targ, pad_idx=1):
bs, targ_len = targ.size()
_, out_len, vs = out.size()
if targ_len > out_len: out = F.pad(out, (0, 0, 0, targ_len - out_len, 0, 0), value=pad_idx)
if out_len > targ_len: targ = F.pad(targ, (0, out_len - targ_len, 0, 0), value=pad_idx)
return CrossEntropyLossFlat()(out, targ)
def seq2seq_acc(out, targ, pad_idx=1):
bs, targ_len = targ.size()
_, out_len, vs = out.size()
if targ_len > out_len: out = F.pad(out, (0, 0, 0, targ_len - out_len, 0, 0), value=pad_idx)
if out_len > targ_len: targ = F.pad(targ, (0, out_len - targ_len, 0, 0), value=pad_idx)
out = out.argmax(2)
return (out == targ).float().mean()
class NGram():
def __init__(self, ngram, max_n=5000): self.ngram,self.max_n = ngram,max_n
def __eq__(self, other):
if len(self.ngram) != len(other.ngram): return False
return np.all(np.array(self.ngram) == np.array(other.ngram))
def __hash__(self): return int(sum([o * self.max_n**i for i,o in enumerate(self.ngram)]))
def get_grams(x, n, max_n=5000):
return x if n==1 else [NGram(x[i:i+n], max_n=max_n) for i in range(len(x)-n+1)]
def get_correct_ngrams(pred, targ, n, max_n=5000):
pred_grams,targ_grams = get_grams(pred, n, max_n=max_n),get_grams(targ, n, max_n=max_n)
pred_cnt,targ_cnt = Counter(pred_grams),Counter(targ_grams)
return sum([min(c, targ_cnt[g]) for g,c in pred_cnt.items()]),len(pred_grams)
class GetPreds:
def __init__(self, inputs, preds, targs):
self.inputs, self.preds, self.targs = inputs, preds, targs
def get_predictions(self, num, ignore_pad=False):
""":ignore_pad: Whether to ignore pad for predictions. Default: False"""
return (
itos(dls.vocab[0], self.inputs[num], join=True, ignore_pad=True),
itos(dls.vocab[1], self.targs[num], join=True, ignore_pad=True),
itos(dls.vocab[1], self.preds[num].argmax(1), join=True, ignore_pad=ignore_pad)
)
class TeacherForcing(Callback):
def __init__(self, end_epoch, full_force_for=0):
self.fff = full_force_for - 1 # start counting from zero.
self.end_epoch = end_epoch
def before_batch(self):
self.learn.xb = (self.x, self.y)
def before_epoch(self):
self.learn.model.pr_force = 1 - ((self.learn.epoch - self.fff) / (self.end_epoch - self.fff))
if self.learn.epoch <= self.fff: self.learn.model.pr_force = 1
###Output
_____no_output_____
###Markdown
Implementing Attention
###Code
class Seq2SeqRNN_attn(Module):
def __init__(self, emb_enc, emb_dec, nh, out_sl, nl=2, bos_idx=0, pad_idx=1):
self.nl, self.nh, self.out_sl, self.pr_force = nl, nh, out_sl, 1
self.bos_idx, self.pad_idx = bos_idx, pad_idx
self.emb_enc, self.emb_dec = emb_enc, emb_dec
self.emb_sz_enc, self.emb_sz_dec = emb_enc.embedding_dim, emb_enc.embedding_dim
self.voc_sz_dec = emb_dec.num_embeddings
self.emb_enc_drop = nn.Dropout(0.15)
self.gru_enc = nn.GRU(self.emb_sz_enc, nh, num_layers=nl, dropout=0.25,
batch_first=True, bidirectional=True)
self.out_enc = nn.Linear(2 * nh, self.emb_sz_dec, bias=False)
self.gru_dec = nn.GRU(self.emb_sz_dec + 2 * nh, self.emb_sz_dec, num_layers=nl,
dropout=0.1, batch_first=True)
self.out_drop = nn.Dropout(0.35)
self.out = nn.Linear(self.emb_sz_dec, self.voc_sz_dec)
self.out.weight.data = self.emb_dec.weight.data
self.enc_att = nn.Linear(2 * nh, self.emb_sz_dec, bias=False)
self.hid_att = nn.Linear(self.emb_sz_dec, self.emb_sz_dec)
self.V = self.init_param(self.emb_sz_dec)
def encoder(self, bs, inp):
h = self.initHidden(bs)
emb = self.emb_enc_drop(self.emb_enc(inp))
enc_out, hid = self.gru_enc(emb, 2 * h)
pre_hid = hid.view(2, self.nl, bs, self.nh).permute(1, 2, 0, 3).contiguous()
pre_hid = pre_hid.view(self.nl, bs, 2 * self.nh)
hid = self.out_enc(pre_hid)
return hid, enc_out
def decoder(self, dec_inp, hid, enc_att, enc_out):
hid_att = self.hid_att(hid[-1])
# enc_out and hid through linear layers
u = torch.tanh(enc_att + hid_att[:, None])
# Learn importance each time step
attn_wgts = F.softmax(u @ self.V, 1)
# weighted average of enc_out (output at every time step)
ctx = (attn_wgts[..., None] * enc_out).sum(1)
emb = self.emb_dec(dec_inp)
# Concat decoder embed with context
outp, hid = self.gru_dec(torch.cat([emb, ctx], 1)[:, None], hid)
outp = self.out(self.out_drop(outp[:, 0]))
return hid, outp
def show(self, nm, v):
if False: print(f"{nm}={v[nm].shape}")
def forward(self, inp, targ=None):
bs, sl = inp.size()
hid, enc_out = self.encoder(bs, inp)
# self.show("hid", vars())
dec_inp = inp.new_zeros(bs).long() + self.bos_idx
enc_att = self.enc_att(enc_out)
res = []
for i in range(self.out_sl):
hid, outp = self.decoder(dec_inp, hid, enc_att, enc_out)
res.append(outp)
dec_inp = outp.max(1)[1]
if (dec_inp==self.pad_idx).all(): break
if (targ is not None) and (random.random() < self.pr_force):
if i >= targ.shape[1]: continue
assert dec_inp.shape == targ[:, i].shape
dec_inp = targ[:, i]
return torch.stack(res, dim=1)
def initHidden(self, bs): return one_param(self).new_zeros(2 * self.nl, bs, self.nh)
def init_param(self, *sz): return nn.Parameter(torch.randn(sz) / math.sqrt(sz[0]))
model = Seq2SeqRNN_attn(emb_enc, emb_dec, 256, 30)
model
torch.cuda.empty_cache()
learn = Learner(dls, model, loss_func=seq2seq_loss,
metrics=[seq2seq_acc, CorpusBLEUMetric(len(dls.vocab[1]))],
cbs=[TeacherForcing(30, 3)])
learn.lr_find()
learn.fit_one_cycle(15, 3e-3)
inputs, preds, targs = learn.get_preds(with_input=True)
p = GetPreds(inputs[0], preds, targs)
###Output
_____no_output_____
###Markdown
It seems like this have 2 input features, and `inputs[0]` matches their english equivalent. One isn't sure that does `inputs[1]` represents here.
###Code
p.get_predictions(700)
p.get_predictions(701)
p.get_predictions(4002) # this is very funny
p.get_predictions(4010)
###Output
_____no_output_____ |
Python/open_satellite_he5.ipynb | ###Markdown
Example Map Plotting - MOPITT CO
###Code
# By line: RRB 2020-07-26
# Script aims to:
# - Load a MOPITT HDF5 file
# - Extract variables: CO column, latitude, longitude
# - Create contour plot of variable as world map with coastlines
# - Customize contours and colorbar
# - Add axes labels
# - Add grid lines
###Output
_____no_output_____
###Markdown
At the start of a Jupyter notebook you need to import all modules that you will use.
###Code
import matplotlib.pyplot as plt
import cartopy.crs as ccrs # For plotting maps
import cartopy.feature as cfeature # For plotting maps
from cartopy.util import add_cyclic_point # For plotting maps
from pathlib import Path # System agnostic paths
import xarray as xr # For loading the data arrays
import numpy as np # For array creation and calculations
import h5py # For loading he5 files
###Output
_____no_output_____
###Markdown
Define a filedump function for he5 files.
###Code
def h5filedump(filename):
import subprocess
cmd = 'h5dump -n ' + filename
# returns output as byte string
shell_process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
# convert to string
subprocess_return = shell_process.stdout.read().decode('utf8').strip()
# Human readable
mystr = subprocess_return.split(sep='\n')
return(mystr)
###Output
_____no_output_____
###Markdown
Define the directories and file of interest for your results.
###Code
result_dir = Path("../../data")
file = "MOP03JM-201801-L3V95.6.3.he5"
file_to_open = result_dir / file
###Output
_____no_output_____
###Markdown
Load file
###Code
# Currently does not work
#he5_load = xr.open_dataset(file,engine='h5netcdf')
he5_load = h5py.File(file_to_open, mode='r')
###Output
_____no_output_____
###Markdown
Extract dataset of choice
###Code
# Uncomment below to see location of variables
#h5filedump(file)
# load the data
dataset = he5_load["/HDFEOS/GRIDS/MOP03/Data Fields/RetrievedCOTotalColumnDay"][:]
lat = he5_load["/HDFEOS/GRIDS/MOP03/Data Fields/Latitude"][:]
lon = he5_load["/HDFEOS/GRIDS/MOP03/Data Fields/Longitude"][:]
# create xarray DataArray
dataset_new = xr.DataArray(dataset, dims=["lon","lat"], coords=[lon,lat])
# missing value -> nan
ds_masked = dataset_new.where(dataset_new != -9999.)
print(ds_masked)
###Output
_____no_output_____
###Markdown
Plot the value over the globe.
###Code
plt.figure(figsize=(20,8))
#Define projection
ax = plt.axes(projection=ccrs.PlateCarree())
#define contour levels
clev = np.arange(0.5, 3.2, 0.1)
#plot the data
plt.contourf(lon, lat, ds_masked.transpose()/1e18,clev,cmap='Spectral_r',extend='both')
# add coastlines
ax.add_feature(cfeature.COASTLINE)
#add lat lon grids
gl = ax.gridlines(draw_labels=True, color='grey', alpha=0.8, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
# Titles
# Main
plt.title("Global map of MOPITT column CO, January 2018",fontsize=18)
# y-axis
ax.text(-0.04, 0.5, 'Latitude', va='bottom', ha='center',
rotation='vertical', rotation_mode='anchor',
transform=ax.transAxes)
# x-axis
ax.text(0.5, -0.08, 'Longitude', va='bottom', ha='center',
rotation='horizontal', rotation_mode='anchor',
transform=ax.transAxes)
# legend
ax.text(1.15, 0.5, 'CO (x 10$^{18}$ molec/cm$^{2}$)', va='bottom', ha='center',
rotation='vertical', rotation_mode='anchor',
transform=ax.transAxes)
plt.colorbar()
plt.show()
###Output
_____no_output_____ |
Giacomos_UnbinnedDataset/test_UnbinnedDataset_class_BKG.ipynb | ###Markdown
SPECTRAL MODEL - POWER LAW WITH SOME WIGGLING
###Code
class MyCustomSpectralModel(SpectralModel):
"""My custom spectral model, parametrising a power law with some wiggling.
Parameters
----------
amplitude : `astropy.units.Quantity`
Amplitude of the spectra model.
index : `astropy.units.Quantity`
Spectral index of the model.
reference : `astropy.units.Quantity`
Reference energy of the power law.
a : `scalar`
Amplitude of the wiggling.
"""
tag = "MyCustomSpectralModel"
amplitude = Parameter("amplitude", "1e-12 cm-2 s-1 TeV-1", min=0,frozen=False)
index = Parameter("index", 2, min=0,frozen=False)
reference = Parameter("reference", "1 TeV", frozen=True)
a = Parameter("a", "4", min=0,max=30, frozen=False)
@staticmethod
def evaluate(energy, index, amplitude, reference, a):
pwl = PowerLawSpectralModel.evaluate(
energy=energy,
index=index,
amplitude=amplitude,
reference=reference,
)
sinus = 3 + a/10*np.sin( (10+ 10*np.log10(energy.value) ))
return pwl*sinus
my_custom_model = MyCustomSpectralModel(a=0,amplitude= "1e-11 cm-2 s-1 TeV-1",index=2.5)
my_custom_model.parameters['index'].frozen = False
my_custom_model.parameters['amplitude'].frozen = True
my_custom_model.parameters['a'].frozen = True
my_custom_model2 = MyCustomSpectralModel(a=20,amplitude= "1e-12 cm-2 s-1 TeV-1",index=0.8)
my_custom_model3 = MyCustomSpectralModel(a=10,amplitude= "1e-12 cm-2 s-1 TeV-1",index=0.8)
fig, ax = plt.subplots(figsize=(12,8))
my_custom_model.plot(energy_bounds=[1, 100] * u.TeV,label="a = 10")
###Output
_____no_output_____
###Markdown
SPATIAL MODEL - POINT LIKE
###Code
spatial_model_point = PointSpatialModel(lon_0="150.58 deg", lat_0="-13.26 deg", frame="icrs")
spatial_model_point.parameters['lon_0'].frozen = True
spatial_model_point.parameters['lat_0'].frozen = True
sky_model = SkyModel(spectral_model=my_custom_model,spatial_model=spatial_model_point, name="point-exp-pwl")
#models = Models( sky_model )
bkg_model = FoVBackgroundModel(dataset_name="my-dataset")
models = Models([sky_model , bkg_model] )
###Output
_____no_output_____
###Markdown
IRF
###Code
18000/3600
pointing = SkyCoord(150.58 ,-13.26 , frame="icrs", unit="deg")
livetime = 1 * u.hr
irfs = load_cta_irfs("cta-prod5-zenodo-fitsonly-v0.1/fits/CTA-Performance-prod5-v0.1-South-20deg.FITS/Prod5-South-20deg-AverageAz-14MSTs37SSTs.18000s-v0.1.fits.gz")
observation = Observation.create( pointing=pointing, livetime=livetime, irfs=irfs )
observation.peek()
###Output
_____no_output_____
###Markdown
DATASET - BOTH UNBINNED (UnbinnedDataset) AND BINNED ( MapDataset )
###Code
energy_axis = MapAxis.from_energy_bounds( "0.01 TeV", "100 TeV", nbin=15, per_decade=True, name="energy" )
energy_axis_true = MapAxis.from_energy_bounds( "0.01 TeV", "100 TeV", nbin=25, per_decade=True, name="energy_true")
migra_axis = MapAxis.from_bounds(0.5, 2, nbin=15, node_type="edges", name="migra")
# WcsGeom
geom = WcsGeom.create(frame="icrs", skydir=pointing, width=(2, 2), binsz=0.02, axes=[energy_axis])
# Or RegionGeom
# but does not work for simulating events
#on_region_radius = Angle("1 deg")
#on_region = CircleSkyRegion(center=pointing, radius=on_region_radius)
#geom = RegionGeom.create(region=on_region, axes=[energy_axis])
maker = MapDatasetMaker(selection=["exposure","edisp", "background", ])# no "psf"
# UnbinnedDatase
d_empty = UnbinnedDataset.create( geom, energy_axis_true=energy_axis_true, migra_axis=migra_axis, name="my-dataset")
unbinned_dataset = maker.run(d_empty, observation)
unbinned_dataset.models = models
print(unbinned_dataset)
energy_axis = MapAxis.from_energy_bounds( "0.01 TeV", "100 TeV", nbin=15, per_decade=True, name="energy" )
energy_axis_true = MapAxis.from_energy_bounds( "0.01 TeV", "100 TeV", nbin=25, per_decade=True, name="energy_true")
migra_axis = MapAxis.from_bounds(0.5, 2, nbin=15, node_type="edges", name="migra")
# WcsGeom
geom = WcsGeom.create(frame="icrs", skydir=pointing, width=(2, 2), binsz=0.02, axes=[energy_axis])
# MapDatase
d_empty = MapDataset.create( geom, energy_axis_true=energy_axis_true, migra_axis=migra_axis, name="my-dataset")
dataset = maker.run(d_empty, observation)
dataset.models = models
print(dataset)
###Output
UnbinnedDataset
---------------
Name : my-dataset
Event list : None
Total counts : 0
Total background counts : 49791.05
Total excess counts : -49791.05
Predicted counts : 61208.99
Predicted background counts : 49791.05
Predicted excess counts : 11417.94
Exposure min : 2.41e+03 m2 s
Exposure max : 1.27e+10 m2 s
Number of total bins : 600000
Number of fit bins : 600000
Fit statistic type : unbinned
Fit statistic value (-2 log(L)) : -inf
Number of models : 2
Number of parameters : 9
Number of free parameters : 2
Component 0: SkyModel
Name : point-exp-pwl
Datasets names : None
Spectral model type : MyCustomSpectralModel
Spatial model type : PointSpatialModel
Temporal model type :
Parameters:
amplitude (frozen) : 1.00e-11 1 / (cm2 s TeV)
index : 2.500 +/- 0.00
reference (frozen) : 1.000 TeV
a (frozen) : 0.000
lon_0 (frozen) : 150.580 deg
lat_0 (frozen) : -13.260 deg
Component 1: FoVBackgroundModel
Name : my-dataset-bkg
Datasets names : ['my-dataset']
Spectral model type : PowerLawNormSpectralModel
Parameters:
norm : 1.000 +/- 0.00
tilt (frozen) : 0.000
reference (frozen) : 1.000 TeV
MapDataset
----------
Name : my-dataset
Total counts : 0
Total background counts : 49791.05
Total excess counts : -49791.05
Predicted counts : 61208.99
Predicted background counts : 49791.05
Predicted excess counts : 11417.94
Exposure min : 2.41e+03 m2 s
Exposure max : 1.27e+10 m2 s
Number of total bins : 600000
Number of fit bins : 600000
Fit statistic type : cash
Fit statistic value (-2 log(L)) : 122417.98
Number of models : 2
Number of parameters : 9
Number of free parameters : 2
Component 0: SkyModel
Name : point-exp-pwl
Datasets names : None
Spectral model type : MyCustomSpectralModel
Spatial model type : PointSpatialModel
Temporal model type :
Parameters:
amplitude (frozen) : 1.00e-11 1 / (cm2 s TeV)
index : 2.500 +/- 0.00
reference (frozen) : 1.000 TeV
a (frozen) : 0.000
lon_0 (frozen) : 150.580 deg
lat_0 (frozen) : -13.260 deg
Component 1: FoVBackgroundModel
Name : my-dataset-bkg
Datasets names : ['my-dataset']
Spectral model type : PowerLawNormSpectralModel
Parameters:
norm : 1.000 +/- 0.00
tilt (frozen) : 0.000
reference (frozen) : 1.000 TeV
###Markdown
EVENTS SIMULATION
###Code
###################################
# SIMULATING EVENTS
###################################
n_obs = 100
unbinned_datasets = Datasets()
datasets = Datasets()
for idx in range(n_obs):
#UNBINNED
unbinned_dataset_fake = unbinned_dataset.copy(name=f"obs-{idx}")
unbinned_dataset_fake.meta_table["OBS_ID"] = [idx]
unbinned_dataset_fake.models = models.copy()
unbinned_dataset_fake.fake(random_state=idx)
unbinned_datasets.append(unbinned_dataset_fake)
#BINNED
dataset_fake = dataset.copy(name=f"obs-{idx}")
dataset_fake.meta_table["OBS_ID"] = [idx]
dataset_fake.models = models.copy()
# counts from the simulated events
fake_counts = Map.from_geom(dataset_fake.geoms["geom"])
fake_counts.fill_events(unbinned_dataset_fake.events)
dataset_fake.counts = fake_counts
datasets.append(dataset_fake)
###################################
# SHOW SOME SMULATIONS OF THE UNBINNED DATASET
###################################
fig, axs = plt.subplots(figsize=(22,14),nrows=2, ncols=2)
for i in [0,1]:
for j in [0,1]:
example_dataset = unbinned_datasets[np.random.choice(n_obs )]
#Plot Model
plot_dict = dict(color='black',linewidth=2,alpha=0.8,label="Source dN/dE")
ax, _ = example_dataset.plot_predicted_dnde(ax=axs[i,j],fig=fig,**plot_dict,line=True,bkg=True)
#Plot events
plot_dict = dict(color='black',label="Simulated source events")
ax, _ = example_dataset.plot_observed_dnde(ax=axs[i,j],fig=fig,en_bins=28,**plot_dict)
ax.set_ylim(bottom=2e2,top=5e3)
ax.set_xlim([5e-2,1e2])
###Output
_____no_output_____
###Markdown
FITTING WITH Fit class
###Code
###################################
# PERFORM THE FIT
###################################
TS_nullH_list = []
TS_bestH_list = []
unbinned_TS_nullH_list = []
unbinned_TS_bestH_list = []
unbinned_ampl_fitted_list = []
ampl_fitted_list = []
unbinned_index_fitted_list = []
index_fitted_list = []
unbinned_a_fitted_list = []
a_fitted_list = []
i = 0
for unbinned_idataset, idataset in zip(unbinned_datasets, datasets):
# Unbinned
unbinned_TS_nullH_list.append(unbinned_idataset.stat_sum())
unbinned_fit = Fit()
result = unbinned_fit.optimize(unbinned_idataset)
unbinned_TS_bestH_list.append(unbinned_idataset.stat_sum())
print("Fit number :", i)
a_fitted = result.parameters["a"].value
index_fitted = result.parameters["index"].value
ampl_fitted = result.parameters["amplitude"].value
print("Unbinned Result : a =", a_fitted)
print("Unbinned Result : index =", index_fitted)
print("Unbinned Result : ampl. =", ampl_fitted)
unbinned_a_fitted_list.append(a_fitted)
unbinned_index_fitted_list.append(index_fitted)
unbinned_ampl_fitted_list.append(ampl_fitted)
# Binned
TS_nullH_list.append(idataset.stat_sum())
fit = Fit()
result = fit.optimize(idataset)
TS_bestH_list.append(idataset.stat_sum())
a_fitted = result.parameters["a"].value
index_fitted = result.parameters["index"].value
ampl_fitted = result.parameters["amplitude"].value
print("Binned Result : a =", a_fitted)
print("Binned Result : index =", index_fitted)
print("Binned Result : ampl. =", ampl_fitted,"\n")
a_fitted_list.append(a_fitted)
index_fitted_list.append(index_fitted)
ampl_fitted_list.append(ampl_fitted)
i += 1
unbinned_a_fitted_list = np.array(unbinned_a_fitted_list)
a_fitted_list = np.array(a_fitted_list)
unbinned_index_fitted_list = np.array(unbinned_index_fitted_list)
index_fitted_list = np.array(index_fitted_list)
unbinned_ampl_fitted_list = np.array(unbinned_ampl_fitted_list)
ampl_fitted_list = np.array(ampl_fitted_list)
TS_nullH_list = np.array(TS_nullH_list)
TS_bestH_list = np.array(TS_bestH_list)
unbinned_TS_nullH_list = np.array(unbinned_TS_nullH_list)
unbinned_TS_bestH_list = np.array(unbinned_TS_bestH_list)
###Output
Fit number : 0
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4978104127781338
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4963186988907884
Binned Result : ampl. = 1e-11
Fit number : 1
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5003909308140595
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4990972389562356
Binned Result : ampl. = 1e-11
Fit number : 2
Unbinned Result : a = 0.0
Unbinned Result : index = 2.496375865645256
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.495039543115904
Binned Result : ampl. = 1e-11
Fit number : 3
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5003680151135663
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.498991088246135
Binned Result : ampl. = 1e-11
Fit number : 4
Unbinned Result : a = 0.0
Unbinned Result : index = 2.501428394556427
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.499921906986852
Binned Result : ampl. = 1e-11
Fit number : 5
Unbinned Result : a = 0.0
Unbinned Result : index = 2.50061281568051
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.499403055570424
Binned Result : ampl. = 1e-11
Fit number : 6
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5073269429270284
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5057906017091143
Binned Result : ampl. = 1e-11
Fit number : 7
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4904376878610273
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.488960976430999
Binned Result : ampl. = 1e-11
Fit number : 8
Unbinned Result : a = 0.0
Unbinned Result : index = 2.504630629985323
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5030344269475315
Binned Result : ampl. = 1e-11
Fit number : 9
Unbinned Result : a = 0.0
Unbinned Result : index = 2.500508832151957
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.499048212930052
Binned Result : ampl. = 1e-11
Fit number : 10
Unbinned Result : a = 0.0
Unbinned Result : index = 2.496139594497718
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.494719972343968
Binned Result : ampl. = 1e-11
Fit number : 11
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4975513824078535
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4961551021095936
Binned Result : ampl. = 1e-11
Fit number : 12
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4988789310827286
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4974566197106465
Binned Result : ampl. = 1e-11
Fit number : 13
Unbinned Result : a = 0.0
Unbinned Result : index = 2.508146334500514
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5066192978245954
Binned Result : ampl. = 1e-11
Fit number : 14
Unbinned Result : a = 0.0
Unbinned Result : index = 2.504162083371151
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.502828083206523
Binned Result : ampl. = 1e-11
Fit number : 15
Unbinned Result : a = 0.0
Unbinned Result : index = 2.508154294715987
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.506836073854764
Binned Result : ampl. = 1e-11
Fit number : 16
Unbinned Result : a = 0.0
Unbinned Result : index = 2.50063889836186
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4992380773674654
Binned Result : ampl. = 1e-11
Fit number : 17
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4983997422989885
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.496873811342433
Binned Result : ampl. = 1e-11
Fit number : 18
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4954258977030768
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.493900674666685
Binned Result : ampl. = 1e-11
Fit number : 19
Unbinned Result : a = 0.0
Unbinned Result : index = 2.49459989630945
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.493048164043145
Binned Result : ampl. = 1e-11
Fit number : 20
Unbinned Result : a = 0.0
Unbinned Result : index = 2.507585670644119
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.506122712809962
Binned Result : ampl. = 1e-11
Fit number : 21
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4948492383913763
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4934758853765024
Binned Result : ampl. = 1e-11
Fit number : 22
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4962872973194967
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4949804243617657
Binned Result : ampl. = 1e-11
Fit number : 23
Unbinned Result : a = 0.0
Unbinned Result : index = 2.499793628697306
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.498482922012444
Binned Result : ampl. = 1e-11
Fit number : 24
Unbinned Result : a = 0.0
Unbinned Result : index = 2.498844540118475
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4973503543812923
Binned Result : ampl. = 1e-11
Fit number : 25
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5120942774227277
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5106496568393215
Binned Result : ampl. = 1e-11
Fit number : 26
Unbinned Result : a = 0.0
Unbinned Result : index = 2.497539406680584
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4961995539756296
Binned Result : ampl. = 1e-11
Fit number : 27
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4992750377525796
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4980335136940637
Binned Result : ampl. = 1e-11
Fit number : 28
Unbinned Result : a = 0.0
Unbinned Result : index = 2.500360476425023
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.49890335986189
Binned Result : ampl. = 1e-11
Fit number : 29
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5061183243890373
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5047492998320737
Binned Result : ampl. = 1e-11
Fit number : 30
Unbinned Result : a = 0.0
Unbinned Result : index = 2.502926960496609
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.501628780157326
Binned Result : ampl. = 1e-11
Fit number : 31
Unbinned Result : a = 0.0
Unbinned Result : index = 2.49984513966961
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4986365023293624
Binned Result : ampl. = 1e-11
Fit number : 32
Unbinned Result : a = 0.0
Unbinned Result : index = 2.509552829161084
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.508264918162454
Binned Result : ampl. = 1e-11
Fit number : 33
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4942077597190218
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.492716101915375
Binned Result : ampl. = 1e-11
Fit number : 34
Unbinned Result : a = 0.0
Unbinned Result : index = 2.494346534345484
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.492862618913001
Binned Result : ampl. = 1e-11
Fit number : 35
Unbinned Result : a = 0.0
Unbinned Result : index = 2.500952342400879
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4995723905839093
Binned Result : ampl. = 1e-11
Fit number : 36
Unbinned Result : a = 0.0
Unbinned Result : index = 2.504193869770363
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5028253971430807
Binned Result : ampl. = 1e-11
Fit number : 37
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5091413207909876
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.507593567107369
Binned Result : ampl. = 1e-11
Fit number : 38
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5010059546520798
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.499808203487913
Binned Result : ampl. = 1e-11
Fit number : 39
Unbinned Result : a = 0.0
Unbinned Result : index = 2.505072462697355
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5036675164371895
Binned Result : ampl. = 1e-11
Fit number : 40
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5012454943057407
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4997992068307893
Binned Result : ampl. = 1e-11
Fit number : 41
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4958118974242076
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.494165104939351
Binned Result : ampl. = 1e-11
Fit number : 42
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4995338619160767
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.498351897001521
Binned Result : ampl. = 1e-11
Fit number : 43
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4998759651666798
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4984879723166227
Binned Result : ampl. = 1e-11
Fit number : 44
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5071480784012987
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5057020743898692
Binned Result : ampl. = 1e-11
Fit number : 45
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4990465316886974
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.497702611850697
Binned Result : ampl. = 1e-11
Fit number : 46
Unbinned Result : a = 0.0
Unbinned Result : index = 2.50082890172023
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4993858367423787
Binned Result : ampl. = 1e-11
Fit number : 47
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4945889826437386
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.493021372423741
Binned Result : ampl. = 1e-11
Fit number : 48
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5015058483606576
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.500160829123487
Binned Result : ampl. = 1e-11
Fit number : 49
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4964514473176864
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.494930143818173
Binned Result : ampl. = 1e-11
Fit number : 50
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5048390998081542
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5033611747344655
Binned Result : ampl. = 1e-11
Fit number : 51
Unbinned Result : a = 0.0
Unbinned Result : index = 2.502477675324557
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5010590468023146
Binned Result : ampl. = 1e-11
Fit number : 52
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5022213018148944
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5007443474921107
Binned Result : ampl. = 1e-11
Fit number : 53
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5041353949028133
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5026644564986427
Binned Result : ampl. = 1e-11
Fit number : 54
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4999494854734623
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4985073892219063
Binned Result : ampl. = 1e-11
Fit number : 55
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4970549873597334
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4955911758811173
Binned Result : ampl. = 1e-11
Fit number : 56
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5033413230142614
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5019290618141197
Binned Result : ampl. = 1e-11
Fit number : 57
Unbinned Result : a = 0.0
Unbinned Result : index = 2.494479008193802
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4931102180924376
Binned Result : ampl. = 1e-11
Fit number : 58
Unbinned Result : a = 0.0
Unbinned Result : index = 2.50026949869222
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.498934424981225
Binned Result : ampl. = 1e-11
Fit number : 59
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5091404868467624
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5077082153757573
Binned Result : ampl. = 1e-11
Fit number : 60
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4987122593606457
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.497362668542928
Binned Result : ampl. = 1e-11
Fit number : 61
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4972606543782367
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.495892093157715
Binned Result : ampl. = 1e-11
Fit number : 62
Unbinned Result : a = 0.0
Unbinned Result : index = 2.504437520094006
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.502925784793221
Binned Result : ampl. = 1e-11
Fit number : 63
Unbinned Result : a = 0.0
Unbinned Result : index = 2.500806028038603
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4991673821351412
Binned Result : ampl. = 1e-11
Fit number : 64
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4955547537300293
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.494044910335157
Binned Result : ampl. = 1e-11
Fit number : 65
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4962488461353964
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4949183002501196
Binned Result : ampl. = 1e-11
Fit number : 66
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4977141460341
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.496287809809846
Binned Result : ampl. = 1e-11
Fit number : 67
Unbinned Result : a = 0.0
Unbinned Result : index = 2.506853454699976
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.505471630029601
Binned Result : ampl. = 1e-11
Fit number : 68
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4965593680357396
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4951230667892634
Binned Result : ampl. = 1e-11
Fit number : 69
Unbinned Result : a = 0.0
Unbinned Result : index = 2.497871329625232
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4964507687254884
Binned Result : ampl. = 1e-11
Fit number : 70
Unbinned Result : a = 0.0
Unbinned Result : index = 2.508109050417455
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.50673920277009
Binned Result : ampl. = 1e-11
Fit number : 71
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4962068652859606
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.495043797766035
Binned Result : ampl. = 1e-11
Fit number : 72
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4972392969788197
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4960260125782168
Binned Result : ampl. = 1e-11
Fit number : 73
Unbinned Result : a = 0.0
Unbinned Result : index = 2.502143946078717
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.500698703076981
Binned Result : ampl. = 1e-11
Fit number : 74
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5010527102044575
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.499694438774238
Binned Result : ampl. = 1e-11
Fit number : 75
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4992141423665992
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4978830310397915
Binned Result : ampl. = 1e-11
Fit number : 76
Unbinned Result : a = 0.0
Unbinned Result : index = 2.495177047734762
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.493800526322983
Binned Result : ampl. = 1e-11
Fit number : 77
Unbinned Result : a = 0.0
Unbinned Result : index = 2.50763978711161
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.506207896421485
Binned Result : ampl. = 1e-11
Fit number : 78
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4914769033528876
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4899607250359947
Binned Result : ampl. = 1e-11
Fit number : 79
Unbinned Result : a = 0.0
Unbinned Result : index = 2.504071564558397
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5026688904339944
Binned Result : ampl. = 1e-11
Fit number : 80
Unbinned Result : a = 0.0
Unbinned Result : index = 2.499957379172257
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4983827672956846
Binned Result : ampl. = 1e-11
Fit number : 81
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4977167353398912
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4960921496824975
Binned Result : ampl. = 1e-11
Fit number : 82
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4998723317889513
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.498404367852182
Binned Result : ampl. = 1e-11
Fit number : 83
Unbinned Result : a = 0.0
Unbinned Result : index = 2.498744860443645
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.497343392812511
Binned Result : ampl. = 1e-11
Fit number : 84
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4889168594949336
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.487582838382494
Binned Result : ampl. = 1e-11
Fit number : 85
Unbinned Result : a = 0.0
Unbinned Result : index = 2.500709885989481
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4992805590174183
Binned Result : ampl. = 1e-11
Fit number : 86
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4933473251960963
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4918419726045475
Binned Result : ampl. = 1e-11
Fit number : 87
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5046516164528416
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5032356823610185
Binned Result : ampl. = 1e-11
Fit number : 88
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5033548970115063
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.502010079995326
Binned Result : ampl. = 1e-11
Fit number : 89
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4996617209361642
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4983573090503715
Binned Result : ampl. = 1e-11
Fit number : 90
Unbinned Result : a = 0.0
Unbinned Result : index = 2.499765387256433
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.498388543400687
Binned Result : ampl. = 1e-11
Fit number : 91
Unbinned Result : a = 0.0
Unbinned Result : index = 2.497009681795155
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4955896943585176
Binned Result : ampl. = 1e-11
Fit number : 92
Unbinned Result : a = 0.0
Unbinned Result : index = 2.508183300907704
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.506750172898637
Binned Result : ampl. = 1e-11
Fit number : 93
Unbinned Result : a = 0.0
Unbinned Result : index = 2.499653003219341
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.49807991850697
Binned Result : ampl. = 1e-11
Fit number : 94
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4990322053769383
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4977111209707767
Binned Result : ampl. = 1e-11
Fit number : 95
Unbinned Result : a = 0.0
Unbinned Result : index = 2.4951870903025615
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.493745484170349
Binned Result : ampl. = 1e-11
Fit number : 96
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5011889754221026
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4995300489696874
Binned Result : ampl. = 1e-11
Fit number : 97
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5004900200578803
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.499068820707492
Binned Result : ampl. = 1e-11
Fit number : 98
Unbinned Result : a = 0.0
Unbinned Result : index = 2.5013993994310737
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.4999647387069794
Binned Result : ampl. = 1e-11
Fit number : 99
Unbinned Result : a = 0.0
Unbinned Result : index = 2.507584868085439
Unbinned Result : ampl. = 1e-11
Binned Result : a = 0.0
Binned Result : index = 2.5061725821244574
Binned Result : ampl. = 1e-11
###Markdown
SHOW RESULT Check if the Statistic is a Chi2 variable
###Code
unbinned_TS = unbinned_TS_nullH_list - unbinned_TS_bestH_list
TS = TS_nullH_list - TS_bestH_list
fig, ax = plt.subplots(figsize=(8,5),nrows=1, ncols=1)
ax.scatter(unbinned_TS, TS)
ax.set_xlabel("Unbinned TS")
ax.set_ylabel("Binned TS")
fig, ax = plt.subplots(figsize=(25,10),nrows=1, ncols=2)
DOF = 1 # Number of free parameters that have been fitted
min_TS = np.min(unbinned_TS)
max_TS = np.max(unbinned_TS)
TS_bins = np.linspace(min_TS ,max_TS,1000)
TS_CDF = np.array([np.sum( unbinned_TS < i ) for i in TS_bins])/len(unbinned_TS )
ax[0].plot(TS_bins, TS_CDF , color='black', label="Observed CDF")
Chi2_CDF = chi2.cdf(TS_bins,df=DOF )
ax[0].plot(TS_bins,Chi2_CDF, color='black',alpha=0.3, linewidth=4, label=r"CDF of $\chi^2_{df = "+str(DOF) +"}$")
ax[0].set_ylabel("CDF")
ax[0].set_xlabel("Statistic")
ax[0].set_title("CDF of the Statistic for the Unbinned case")
ax[0].legend(loc="lower right")
min_TS = np.min(TS)
max_TS = np.max(TS)
TS_bins = np.linspace(min_TS ,max_TS,1000)
TS_CDF = np.array([np.sum( TS < i ) for i in TS_bins])/len(TS )
ax[1].plot(TS_bins, TS_CDF , color='black', label="Observed CDF")
Chi2_CDF = chi2.cdf(TS_bins,df=DOF )
ax[1].plot(TS_bins,Chi2_CDF, color='black',alpha=0.3, linewidth=4, label=r"CDF of $\chi^2_{df = "+str(DOF) +"}$")
ax[1].set_ylabel("CDF")
ax[1].set_xlabel("Statistic")
ax[1].set_title("CDF of the Statistic for the Binned case")
ax[1].legend(loc="lower right")
###Output
_____no_output_____
###Markdown
FOR THE PARAMETER a
###Code
fig, axs = plt.subplots(figsize=(20,8),nrows=1, ncols=2)
bins = 20
bins = np.linspace(13,17,20)
true_val = my_custom_model.parameters["a"].value
axs[0].hist(unbinned_a_fitted_list, bins=bins, alpha=0.5)
axs[0].axvline(x=true_val, color="red")
axs[0].set_xlabel("a")
axs[0].set_title("Unbinned - fitted values")
print(f"Unbinned a : {unbinned_a_fitted_list.mean()} += {unbinned_a_fitted_list.std()}")
axs[1].hist(a_fitted_list, bins=bins, alpha=0.5)
axs[1].axvline(x=true_val, color="red")
axs[1].set_xlabel("a")
axs[1].set_title("Binned - fitted values")
print(f"Binned a : {a_fitted_list.mean()} += {a_fitted_list.std()}")
fig, axs = plt.subplots(figsize=(15,7),)
axs.scatter(a_fitted_list, unbinned_a_fitted_list)
x = np.linspace( np.min( [a_fitted_list, unbinned_a_fitted_list] ) ,np.max( [a_fitted_list, unbinned_a_fitted_list]) ,100)
axs.plot(x,x)
axs.set_title("Comparison unbinned vs binned fit values")
axs.set_xlabel("a - Binned")
axs.set_ylabel("a - Unbinned")
axs.set_ylim([12,17])
axs.set_xlim([9,15])
###Output
_____no_output_____
###Markdown
FOR THE PARAMETER INDEX
###Code
fig, axs = plt.subplots(figsize=(20,8),nrows=1, ncols=2)
bins = 20
bins = np.linspace(2.425,2.60,20)
true_val = my_custom_model.parameters["index"].value
axs[0].hist(unbinned_index_fitted_list, bins=bins, alpha=0.5)
axs[0].axvline(x=true_val, color="red")
axs[0].set_xlabel("Index")
axs[0].set_title("Unbinned - fitted values")
print(f"Unbinned index : {unbinned_index_fitted_list.mean()} += {unbinned_index_fitted_list.std()}")
axs[1].hist(index_fitted_list, bins=bins, alpha=0.5)
axs[1].axvline(x=true_val, color="red")
axs[1].set_xlabel("Index")
axs[1].set_title("Binned - fitted values")
print(f"Binned index : {index_fitted_list.mean()} += {index_fitted_list.std()}")
fig, axs = plt.subplots(figsize=(15,7),)
axs.scatter(index_fitted_list, unbinned_index_fitted_list)
x = np.linspace( np.min( [index_fitted_list, unbinned_index_fitted_list] ) ,np.max( [index_fitted_list, unbinned_index_fitted_list]) ,100)
axs.plot(x,x)
axs.set_title("Comparison unbinned vs binned fit values")
axs.set_xlabel("Index - Binned")
axs.set_ylabel("Index - Unbinned")
#axs.set_ylim([2.46,2.54])
#axs.set_xlim([3.25,3.325])
###Output
_____no_output_____
###Markdown
FOR THE PARAMTER AMPLITUDE
###Code
fig, axs = plt.subplots(figsize=(20,8),nrows=1, ncols=2)
nbins = 20
bins = np.linspace(0.85,1.15,20)*1e-11
true_val = my_custom_model.parameters["amplitude"].value
axs[0].hist(unbinned_ampl_fitted_list, bins=bins, alpha=0.5)
axs[0].axvline(x=true_val, color="red")
axs[0].set_xlabel("Amplitude")
axs[0].set_title("Unbinned - fitted values")
print(f"Unbinned index : {unbinned_ampl_fitted_list.mean()} += {unbinned_ampl_fitted_list.std()}")
axs[1].hist(ampl_fitted_list, bins=bins, alpha=0.5)
axs[1].axvline(x=true_val, color="red")
axs[1].set_xlabel("Amplitude")
axs[1].set_title("Binned - fitted values")
print(f"Binned index : {ampl_fitted_list.mean()} += {ampl_fitted_list.std()}")
fig, axs = plt.subplots(figsize=(15,7),)
axs.scatter(ampl_fitted_list, unbinned_ampl_fitted_list)
x = np.linspace( np.min( [ampl_fitted_list, unbinned_ampl_fitted_list] ) ,np.max( [ampl_fitted_list, unbinned_ampl_fitted_list]) ,100)
axs.plot(x,x)
axs.set_title("Comparison unbinned vs binned fit values")
axs.set_xlabel("Amplitude - Binned")
axs.set_ylabel("Amplitude - Unbinned")
###Output
_____no_output_____ |
2016/tutorial_final/202/neural_network.ipynb | ###Markdown
Neural Networks IntroductionIn this tutorial, I will build a classifier based on neural networks without using python package for neural network -- PyBrain.An artificial neural network is an interconnected group of nodes, akin to the vast network of neurons in a brain. Each circular node represents an artificial neuron and an arrow represents a connection from the output of one neuron to the input of another. The goal of the neural networks is to solve problems in the same way that the human brain would. Modern artificial neural network tipically works with thousand to a few million neural units and millions of connections.The neural networks has been used a lot in computer vision and speech recognition at present. Build model for neural networksIn this tutorial, we are going to build a three layer neural networks, which is the simplest structure for neural networks. But it can be simply extends to N-layer Neural Networks basically by adding more hidden layers in this model.In the three layer neural network, here we have just one hidden layer with one input layer and one output layer.The number of nodes in the input layer is determined by the dimension of our input data, for example, if we use two dimensional input like 2-D points, we will have two nodes in the input layer. The number of nodes in the output layer is determined by the number of classes we want to classify the data. For example, if we want to classify our data into two classes, we will have two nodes in the output layer.The situation to choose the middle hidden layer is more complex. Although we can casually choose the number of nodes for our hidden layer, there are some trade offs here. First, if we have a small number of nodes in our hiddle layer, we may not accurately fit our model. But if we choose a large number of nodes in our hiddle layer, we may have more calculations when doing prediction and alsohave possibility of overfitting. There is not a formular for us the determine the nuber of nodes in hiddle layer which has the best performance, there is a rule of thumb proposed by Jeff Heaton, author of Introduction to Neural Networks in Java, saying that 'the optimal size of the hidden layer is usually between the size of the input and size of the output layers'.Then we should choose our activation functions. Although there are lots of different activations functions, we will choose the most popular one, sigmoid function. Make prediction using neural network modelWe use forward propagation for our model. For our model, we have two nodes in input layer and two nodes in output layer, thus we can write the function as follows.$$z_{1} = x*W_{1} + b_{1}$$$$a_{1} = tanh(z_{1})$$$$z_{2} = a_{1}*W_{2} + b_{2}$$$$a_{2} = \hat{y} = softmax(z_{2})$$We notate input layer to be layer 0 and the output to be layer n, which is 2 in our model. $z_{i}$ is the input of layer i and output of layer i-1, and $a_{i}$ is the output of layer i.$W_{1}$,$b_{1}$,$W_{2}$,$b_{2}$ are parameter for the networks. The sizes of these parameters are determined by the number of node for input, and number of nodes for output, the number of node for each hidden middle layer. Learn the parameterTo determine the parameters, we should minimize the error in our training data. To define the error, we use loss function. As for softmax fundtion, a popular choice is cross-entropy loss function. If we have N training examples and C classes then the loss for our prediction \hat{y} with respect to the true labels y is given by the following formula:$$L(y,\hat{y}) = -\frac{1}{N}*\sum_{n\in N}\sum_{i\in C}y_{n,i}log\hat{y}_{n,i}$$To find the mininum, we use gradient descent to find the minimum. We implement batch gradient descent with a fixed learning rate in this project.Batch gradient descent, computes the gradient of the cost function for the entire training dataset using the following formula :$$\theta = \theta - \eta * \bigtriangledown_{\theta}J(\theta)$$
###Code
from IPython.display import Image
Image("neural_network.png")
###Output
_____no_output_____
###Markdown
Implementation
###Code
# Generate a dataset and plot it
def create_dataset():
(X, y) = datasets.make_moons(n_samples=400, noise=0.20)
return (X, y)
# TEST_START
(X, y) = create_dataset()
print len(X)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
# TEST_STOP
###Output
400
###Markdown
Step1 : Define variables and parameters
###Code
class Config:
#the number of node in the input layer
INPUT_DIMENSION = 2
#the number of node in the output layer
OUTPUT_DIMENSION = 2
#learning rate in gradient descent, which is also the step size when doing gradient descent
EPSILON = 0.01
#regulation parameter when calculating the loss function
LAMBDA = 0.01
###Output
_____no_output_____
###Markdown
Step2 : Implement the loss function In this part, we calculate the loss according to our sample and true label under the constain of the present model parameter. The function we use is as follows:$$L(y,\hat{y}) = -\frac{1}{N}\sum_{n=1}^{N}\sum_{i=1}^{I}y_{n,i}log(\hat{y}_{n,i})$$
###Code
# loss fundction for sample
def loss_function(model, X, y):
# num_examples = len(X) # training set size
W1 = model['W1']
W2 = model['W2']
b1 = model['b1']
b2 = model['b2']
# calculate the softmax value for the output and find the max value which indicates the class for each sample
z1 = X.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_val = np.exp(z2)
softmax_val = exp_val / np.sum(exp_val, axis=1, keepdims=True)
# calculate the cross entropy loss for the
log_likelihood = -np.log(softmax_val[range(len(X)), y])
data_loss = np.sum(log_likelihood)
data_loss += Config.LAMBDA / 2 * (np.sum(np.square(W1)) + np.sum(np.square(W2)))
return 1. / len(X) * data_loss
###Output
_____no_output_____
###Markdown
Step3 : Implement the prediction function for output In this part, we implement method for doing the prediction based on our output. With the known parameter of neural network, we can calculate the softmax value, which is the output for our sample X. The output is an N * 2 matrix (N is the number of sample input and 2 is the number of output nodes). We assume that the max value indicate the prediction calss of X and return it.
###Code
# predict for samples, which the output is 0 or 1 in our case with two nodes for output layer
def predict(model, x):
W1 = model['W1']
W2 = model['W2']
b1 = model['b1']
b2 = model['b2']
# calculate the softmax value for the output and find the max value which indicates the class for each sample
z1 = x.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_val = np.exp(z2)
softmax_val = exp_val / np.sum(exp_val, axis=1, keepdims=True)
# return the prediction for each sample
return np.argmax(softmax_val, axis=1)
# TEST_START
mu = 0
sigma = 0.20
test_x = np.random.normal(mu,sigma,2)
print test_x
predict_label = predict(model,test_x)
print predict_label
# TEST_STOP
###Output
[-0.41073408 0.13656888]
[1]
###Markdown
Step4 : Implement the training function for building neural network model In this part, we define method to build our model for the neural networks. There are several steps.First, we initialize the parameter in our model. For W1 and W2, we have to determine that they are not zero matrix, otherwise, the model will be wrong because the coresponding nodes will be inactive when W is zero. And the W value will then be tuned in the process of gradient descent. For b1 and b2, the value can be anything, because the value will be adapted in the iteration process of gradient descent.Second, we are doing the gradient descent to minimize the loss fundtion and optimize the parameter of our neural networks. The equations are as follows :1/Forward Propagation$$z_{1} = x*W_{1} + b_{1}$$$$a_{1} = tanh(z_{1})$$$$z_{2} = a_{1}*W_{2} + b_{2}$$$$a_{2} = \hat{y} = softmax(z_{2})$$2/Backend Propagation$$\delta _{3} = \hat{y} - y$$$$\delta _{2} = (1-tanh^{2}z_{1})\cdot \delta _{3}W_{2}^{T}$$$$\frac{\partial L}{\partial W_{2}} = a_{1}^{T}\delta _{3}$$$$\frac{\partial L}{\partial b_{2}} = \delta _{3}$$$$\frac{\partial L}{\partial W_{1}} = x^{T}\delta _{2}$$$$\frac{\partial L}{\partial b_{1}} = \delta _{2}$$Then, though a number of iterations, we will have our model parameter
###Code
def build_model(X, y, nn_hdim, num_passes=20000, print_loss=False):
# Initialize the parameters to random values. We need to learn these.
num_examples = len(X)
# np.random.seed(0)
# initial W1 randomly, using while to make sure that W1 is not zero matrix
W1 = np.random.randn(Config.INPUT_DIMENSION, nn_hdim) / np.sqrt(Config.INPUT_DIMENSION)
while not np.any(W1):
W1 = np.random.randn(Config.INPUT_DIMENSION, nn_hdim) / np.sqrt(Config.INPUT_DIMENSION)
# initial b1 to be random value ,b1 can be zero of course
b1 = np.random.randn(1, nn_hdim)
# initial W1 randomly, using while to make sure that W1 is not zero matrix
W2 = np.random.randn(nn_hdim, Config.OUTPUT_DIMENSION) / np.sqrt(nn_hdim)
while not np.any(W2):
W2 = np.random.randn(Config.INPUT_DIMENSION, nn_hdim) / np.sqrt(Config.INPUT_DIMENSION)
# initial b2 to be random value, b2 can be zero of course
b2 = np.random.randn(1, Config.OUTPUT_DIMENSION)
# This is what we return at the end
model = {}
# Gradient descent. For each batch...
for i in range(0, num_passes):
# Forward propagation
z1 = X.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Back propagation
delta3 = probs
delta3[range(num_examples), y] -= 1
dW2 = (a1.T).dot(delta3)
db2 = np.sum(delta3, axis=0, keepdims=True)
delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2))
dW1 = np.dot(X.T, delta2)
db1 = np.sum(delta2, axis=0)
# Add regularization terms (b1 and b2 don't have regularization terms)
dW2 += Config.LAMBDA * W2
dW1 += Config.LAMBDA * W1
# Gradient descent parameter update
W1 += -Config.EPSILON * dW1
b1 += -Config.EPSILON * db1
W2 += -Config.EPSILON * dW2
b2 += -Config.EPSILON * db2
# Assign new parameters to the model
model = {'W1': W1}
model = {'b1': b1}
model = {'W2': W2}
model = {'b2': b2}
# Optionally print the loss.
# This is expensive because it uses the whole dataset, so we don't want to do it too often.
if print_loss and i % 1000 == 0:
print("Loss after iteration %i: %f" % (i, loss_function(model, X, y)))
return model
###Output
_____no_output_____
###Markdown
Test
###Code
def plot(X, y, model):
plot_scatter_and_boundary(lambda x:predict(model,x), X, y)
plt.title("Neural Network")
def plot_scatter_and_boundary(pred_func, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
# plt.show()
# Create test data
(X, y) = create_dataset()
# Build a model with a 3-dimensional hidden layer
model = build_model(X, y, 3, print_loss=True)
# Plot the decision boundary
plot(X, y, model)
###Output
Loss after iteration 0: 0.846439
Loss after iteration 1000: 0.073920
Loss after iteration 2000: 0.073646
Loss after iteration 3000: 0.073521
Loss after iteration 4000: 0.073453
Loss after iteration 5000: 0.073410
Loss after iteration 6000: 0.073381
Loss after iteration 7000: 0.073362
Loss after iteration 8000: 0.073349
Loss after iteration 9000: 0.073340
Loss after iteration 10000: 0.073334
Loss after iteration 11000: 0.073330
Loss after iteration 12000: 0.073327
Loss after iteration 13000: 0.073325
Loss after iteration 14000: 0.073324
Loss after iteration 15000: 0.073323
Loss after iteration 16000: 0.073322
Loss after iteration 17000: 0.073322
Loss after iteration 18000: 0.073322
Loss after iteration 19000: 0.073321
|
UKGWA_London_2012.ipynb | ###Markdown
###Code
import requests; #used for connecting to the API
import sys
from time import sleep
from math import log
import os
from urllib.request import urlopen
import re
from operator import itemgetter
from matplotlib import pyplot as plt
import time
import timeit
if 'google.colab' in str(get_ipython()):
environment = "Colab"
elif 'BINDER_SERVICE_HOST' in os.environ:
environment = "Binder"
else:
environment = "Unknown"
# For development purposes only when making changes in Github
import shutil
shutil.rmtree('ComputationalAccess')
if environment == "Colab":
!git clone https://github.com/mark-bell-tna/ComputationalAccess.git
sys.path.insert(0, 'ComputationalAccess')
github_data = "ComputationalAccess/Data/"
os.listdir(github_data)
# Connect to gdrive
from google.colab import drive
drive.mount('/content/gdrive')
data_folder = "/content/gdrive/My Drive/Data/"
else:
github_data = "Data/"
data_folder = "Data/"
from ukgwa_index import UKGWAIndex
from text_utils import SuffixTree, text_to_parts
from web_structure import UKGWAStructure
from ukgwa_query import QueryEngine
from cdx_indexer import TemporalIndexer
from ukgwa_textindex import UKGWATextIndex
from ukgwa_linkcrawler import Crawl
# Get entries from AtoZ index
refresh = False
print("Getting index...")
ATOZ = UKGWAIndex()
if refresh:
ATOZ.indexfromweb() # Read the A to Z index from the UKGWA website
else:
ATOZ.indexfromfile(data_folder + "atoz_index.txt") # Read from a saved file
print("Loaded index...")
# Updated the entries with Discovery catalogue references
ATOZ.discoveryfromfile(github_data + "discovery_ukgwa_links.txt")
# Test the last command worked - should see a catalogue reference in last position of list
# If it says 'N' then try a few other numbers.
ATOZ.index['UKGWA.100']
# Only run this if you want to save the results of "indexfromweb" for next time
ATOZ.indextofile(data_folder + "atoz_index.txt")
# Index the text of the index to make it searchable and to get common ngrams
TI = UKGWATextIndex(stop_words = set(['on','for','and','of','&','the','in','to']))
print("Loading text to index...")
for key in ATOZ:
entry_type = ATOZ.get_field(key, 'CAT')
if entry_type != '*': # Archived web sites
continue # skip twitter, youtube, etc.
w_p = text_to_parts(ATOZ.get_field(key, 'TEXT'))
i = 0
for p in w_p:
if p[3] == "web":
continue
i += 1
TI.add_tokens(p[0].split(" "), key + "." + str(i))
print("Loaded")
# Print examples of top N common ngrams
print("Getting phrases...")
ph = TI.get_phrases(min_count=30, min_length=2)
ph.sort(key=itemgetter(3), reverse=True)
# Change N to view more phrases
N = 5
topNindex = {}
for row in ph[0:N]:
print(row)
WS = UKGWAStructure()
for identifier in ATOZ:
url = ATOZ.get_field(identifier, 'URL')
WS.add_entry(url, identifier)
Q = QueryEngine()
Q.add_view('TextIdx', TI)
Q.add_view('Domain', WS)
Q.add_view('AtoZ', ATOZ)
search_terms = ['Olympic Games','London 2012']
search_terms = [s.split(' ') for s in search_terms]
search_terms
match_ids = [x for x in Q.filter_view('TextIdx', 'NGRAM', '=', *search_terms)]
print("Matched:",len(match_ids),'Ids')
# Set query to include matching ids
_ = [Q.include(x[:-2]) for x in match_ids]
# Summarise included ids by web domain
domain_summary = {}
for identifier in Q:
url_parts = WS.index[identifier]
dt = WS.domaintotree(url_parts[WS.fields['NETLOC']], strip_www=True)
dom_name = ".".join(reversed(dt[:2]))
if dom_name in domain_summary:
domain_summary[dom_name] += 1
else:
domain_summary[dom_name] = 1
sorted_domains = sorted([(k,v) for k,v in domain_summary.items()], key=itemgetter(1), reverse=True)
print(sorted_domains)
# Bar chart of the figures derived above
x,y = zip(*sorted_domains)
plt.bar(x,y)
plt.xticks(rotation='vertical')
T = TemporalIndexer()
Q.add_view("Temp", T)
# This takes quite a while to run (approx 12 seconds for London 2012 example which is only 33 pages to lookup)
# Get snapshot data from the CDX API
print("Started",time.asctime())
for identifier in Q:
T.add_entry(ATOZ.get_field(identifier, 'URL'), identifier)
print("Finished", time.asctime())
# View a sample temporal record
print("Field names:",sorted([[x[1],x[0]] for x in T.fields.items()], key=itemgetter(0)))
for t in T:
print(T.lookup(t))
break
# Draw chart of earliest and latest snapshots by site
# Red for earliest, blue for latest
first_last_snapshots = [[idx] + T.lookup(idx, ['MIN','MAX']) for idx in Q]
first_last_snapshots = [x for x in first_last_snapshots if x[1] != 90000000000000] # Hide this in Temporal class. Odd bug in snapshot crawling code causes them
first_last_snapshots = [x for x in first_last_snapshots if x[2] != 0]
first_last_snapshots.sort(key=itemgetter(1)) # Sort by date of first snapshot
x_labels,y,z = zip(*first_last_snapshots)
x = [n for n in range(len(x_labels))]
d = [h for h in zip(y,z)]
y = [i for (i,j) in d]
z = [j for (i,j) in d]
plt.figure(figsize=(10,8))
plt.plot(y, x, 'rs', markersize = 4)
plt.plot(z, x, 'bo', markersize = 4)
plt.plot(([i for (i,j) in d], [j for (i,j) in d]),(x,x),c='black')
plt.xticks(rotation=45, ticks=[int(str(y) + '0101000000') for y in range(2005,2022)],
labels = [str(y) for y in range(2005,2022)])
plt.yticks(ticks=x,labels = x_labels,fontsize=8)
plt.show()
# Same data plotted by snapshot count
snapshot_counts = [[idx] + T.lookup(idx, ['COUNT']) for idx in Q]
x,y = zip(*snapshot_counts)
plt.figure(figsize=(10,8))
plt.bar(x,y)
plt.xticks(rotation='vertical')
plt.xticks(fontsize=8)
plt.show()
# What are the top N results?
N = 20
snapshot_counts.sort(key=itemgetter(1),reverse=True)
for idx in snapshot_counts[0:N]:
print(ATOZ.lookup(idx[0]), "Snapshots:",idx[1])
import matplotlib.dates as mdates
import numpy as np
month_summary = {}
plt.figure(figsize=(10,8))
for idx in snapshot_counts[0:N]:
snapshots = T.get_field(idx[0], 'CDX')
for c in snapshots:
iso_month = str(c)[0:4] + '-' + str(c)[4:6]
month = np.datetime64(iso_month, 'M')
if month in month_summary:
month_summary[month] += 1
else:
month_summary[month] = 1
summary_data = sorted([(x,y) for x,y in month_summary.items()], key=itemgetter(0))
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
import seaborn as sns
fig, ax = plt.subplots(figsize=(10,8))
ax.xaxis.set_major_locator(years)
ax.xaxis.set_minor_locator(months)
ax.xaxis.set_major_formatter(years_fmt)
ax.plot([x for x,y in summary_data], [y for x,y in summary_data], color='b')
plt.xticks(rotation='vertical')
ax.grid(True)
plt.show(ax)
# Get entries where first snapshot is less than 1/1/2010
t_filt = Q.filter_view("Temp", 'MIN', '<', 20100101000000)
for t in t_filt:
print(t, ATOZ.lookup(t))
# Get entries where first snapshot is after the Games (roughly)
t_filt = Q.filter_view("Temp", 'MIN', '>', 20120901000000)
for t in t_filt:
print(t, ATOZ.lookup(t))
T.lookup('UKGWA.1957')
# Get entries where last snapshot is before the Games (roughly)
t_filt = Q.filter_view("Temp", 'MAX', '<', 20120726000000)
for t in t_filt:
print(t, ATOZ.lookup(t))
# Get entries where latest snapshot is later than 2017
t_filt = Q.filter_view("Temp", 'MAX', '>', 20171231235959)
for t in t_filt:
print(t, ATOZ.lookup(t))
# Reset the query selection
Q.clear()
_ = [Q.include(x[:-2]) for x in match_ids]
# Summarise by catalogue series
series_summary = {}
for identifier in Q:
entry = ATOZ.lookup(identifier)
series = entry[ATOZ.fields['CATREF']].split(" ")[0]
if series in series_summary:
series_summary[series] += 1
else:
series_summary[series] = 1
sorted_series = sorted([(k,v) for k,v in series_summary.items()], key=itemgetter(1), reverse=True)
print(sorted_series)
# These codes will only be meaningful with a catalogue lookup
# Now what?
# Look at website change over time
# Extract content
# Topic model over time?
# Network view
# What is crawling HTML like through Colabs?
C = Crawl(ATOZ.get_field('UKGWA.3078','URL'), 'MB TNA Research')
C.links[0:5]
# Idea for visualising graph change over time
# From: https://stackoverflow.com/questions/56577154/matplotlib-heatmap-of-complex-numbes-modulus-and-phase-as-hue-and-value
def huevalueplot(cmplxarray):
# Creating the black cover layer
black = np.full((*cmplxarray.shape, 4), 0.)
black[:,:,-1] = np.abs(cmplxarray) / np.abs(cmplxarray).max()
black[:,:,-1] = 1 - black[:,:,-1]
# Actual plot
fig, ax = plt.subplots()
# Plotting phases using 'hsv' colormap (the 'hue' part)
ax.imshow(np.angle(cmplxarray), cmap='plasma')
# Plotting the modulus array as the 'value' part
ax.imshow(black)
ax.set_axis_off()
size = 10
step = 1/size
results = np.zeros((size+1,size+1), dtype=complex)
for i in range(size+1):
for j in range(size+1):
results[i,j] = (step*i) + ((step*j) * 1j)
print(results)
huevalueplot(results)
page_dates = [[1,20], [6,14], [4,10], [2,18], [4,20]]
link_dates = [[0,1,8,12], [0,4,2,10], [3,4,15,15]]
size = 5
results = np.zeros((size,size), dtype=complex)
for ld in link_dates:
a = page_dates[ld[0]]
b = page_dates[ld[1]]
olp = [max(a[0],b[0]), min(a[1],b[1])]
olp_len = olp[1]-olp[0]+1
olp_mid = olp[1]-(olp_len/2)
a_half_1 = max(0, (olp_mid-ld[2]+1) / olp_mid)
#print(ld[0],ld[1],olp, olp_len, olp_mid, ld[2], a_half_1)
a_half_2 = max(0,(ld[3]-olp_mid) / olp_mid)
results[ld[0]][ld[1]] = a_half_1 + (a_half_2 * 1j)
# #results_half_1[ld[0]][ld[1]] += a_half_2 * 1j
# Plot
huevalueplot(results)
###Output
_____no_output_____ |
sec5exercise01.ipynb | ###Markdown
**Exercise 1**: Try to approximate $f'(1)$ where $f(x)=e^x$ and determine (using your code) the optimal step size in achieving the best accuracy.---
###Code
import math
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
$f(x) = e^x$
###Code
def f(x):
return math.e ** x
###Output
_____no_output_____
###Markdown
Consider the formula for approximating 1st derivative:$$f'(x_0) = \frac{-2f(x_0-3h)+9f(x_0-2h)-18f(x_0-h)+11f(x_0)}{6h} + O(h^3)$$
###Code
def fp(x,i):
h = 10 ** (-i)
return ( -2*f(x-3*h)+9*f(x-2*h)-18*f(x-h)+11*f(x) ) / (6*h)
###Output
_____no_output_____
###Markdown
Approximate $f'(-1)$ and the error, and draw the "error graph"
###Code
x = []
y = []
for i in range(1,17):
#print("h = ",10**(-i),":")
x.append(-i)
#print("f'(-1) = ",fp(1,i))
e = abs(fp(1,i) - (math.e)**1)
#print("error = ", e) #่ชคๅทฎ
y.append(math.log10(e))
#print("----")
fig = plt.plot(x,y,label='error')
plt.xlabel('log10(h)', fontsize=20)
plt.ylabel('log10(error)', fontsize=20)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
็ต่ซ ๅพๅไธๅฏ็ๅบ็ถh่ฟไผผๆผ$10^{-4}$ๆ๏ผๆๆๅฐ่ชคๅทฎใ
###Code
print(fp(1,4)-math.e)
###Output
-6.248335182590381e-13
###Markdown
ไผฐ่จๅบ็$f'(1)$
###Code
print(fp(1,4))
###Output
2.7182818284584203
|
s_5_DIC_inflow.ipynb | ###Markdown
DIC in the water column
###Code
ds = xr.open_dataset('data/base/kz145_scale10000/water.nc')
dic_df = ds['B_C_DIC'].to_dataframe()
dic_surface = dic_df.groupby('z').get_group(1.250)
dic = dic_surface.loc['2011-01-01':'2011-12-31']
ds = xr.open_dataset('data/with_dic/means_from_data/water.nc')
dic_df = ds['B_C_DIC'].to_dataframe()
dic_surface = dic_df.groupby('z').get_group(1.250)
dic_with_flux = dic_surface.loc['2011-01-01':'2011-12-31']
dic = dic.reset_index()
dic_with_flux = dic_with_flux.reset_index()
fig = plt.figure(figsize=(10, 2))
ax = fig.add_subplot(1, 1, 1)
ax.plot(dic['time'], dic['B_C_DIC'], linewidth=2, label=r'DIC')
ax.plot(dic_with_flux['time'], dic_with_flux['B_C_DIC'],
linewidth=2, label=r'+ advective DIC flux; different means')
ax.legend(loc='best');
print("Mean is {}, min is {}, max is {}".format(
dic['B_C_DIC'].values.mean(),
dic['B_C_DIC'].values.min(),
dic['B_C_DIC'].values.max())) # no inflow
print("Mean is {}, min is {}, max is {}".format(
dic_with_flux['B_C_DIC'].values.mean(),
dic_with_flux['B_C_DIC'].values.min(),
dic_with_flux['B_C_DIC'].values.max())) # with inflow
###Output
Mean is 2146.589599609375, min is 1968.998291015625, max is 2213.5361328125
###Markdown
TA in the water column
###Code
ds = xr.open_dataset('data/base/kz145_scale10000/water.nc')
alk_df = ds['B_C_Alk'].to_dataframe()
alk_surface = alk_df.groupby('z').get_group(1.250)
alk = alk_surface.loc['2011-01-01':'2011-12-31']
ds = xr.open_dataset('data/with_dic/means_from_data/water.nc')
alk_df = ds['B_C_Alk'].to_dataframe()
alk_surface = alk_df.groupby('z').get_group(1.250)
alk_with_flux = alk_surface.loc['2011-01-01':'2011-12-31']
alk = alk.reset_index()
alk_with_flux = alk_with_flux.reset_index()
fig = plt.figure(figsize=(10, 2))
ax = fig.add_subplot(1, 1, 1)
ax.plot(alk['time'], alk['B_C_Alk'], linewidth=2, label=r'Alk')
ax.plot(alk_with_flux['time'], alk_with_flux['B_C_Alk'],
linewidth=2, label=r'+ advective TA flux; different means')
ax.legend(loc='upper left');
print("Mean is {}, min is {}, max is {}".format(
alk['B_C_Alk'].values.mean(),
alk['B_C_Alk'].values.min(),
alk['B_C_Alk'].values.max())) # no inflow
print("Mean is {}, min is {}, max is {}".format(
alk_with_flux['B_C_Alk'].values.mean(),
alk_with_flux['B_C_Alk'].values.min(),
alk_with_flux['B_C_Alk'].values.max())) # with inflow
###Output
Mean is 2292.516357421875, min is 2268.84765625, max is 2307.853271484375
###Markdown
The water column pCO$_2$
###Code
ds = xr.open_dataset('data/base/kz145_scale10000/water.nc')
co2_df = ds['B_C_pCO2'].to_dataframe()
co2_surface = co2_df.groupby('z').get_group(1.250)
co2_no = co2_surface.loc['2011-01-01':'2011-12-31']
ds = xr.open_dataset('data/with_dic/means_from_data/water.nc')
co2_df = ds['B_C_pCO2'].to_dataframe()
co2_surface = co2_df.groupby('z').get_group(1.250)
co2_with_flux = co2_surface.loc['2011-01-01':'2011-12-31']
co2_no = co2_no.reset_index()
co2_with_flux = co2_with_flux.reset_index()
co2_no['B_C_pCO2'] *= 1e6 # to convert to ppm
co2_with_flux['B_C_pCO2'] *= 1e6
atm_pCO2 = 390 # to be comparable with Thomas 2004
dco2_no = co2_no['B_C_pCO2']-atm_pCO2
dco2_with_flux = co2_with_flux['B_C_pCO2']-atm_pCO2
fig = plt.figure(figsize=(10, 3))
ax = fig.add_subplot(1, 1, 1)
ax.plot(co2_no['time'], dco2_no, linewidth=2, label=r'$\delta$pCO$_2$')
ax.plot(co2_with_flux['time'], dco2_with_flux,
linewidth=2, label=r'+ advective TA and DIC; different means')
plt.title('pCO$_2$ difference between seawater and atmosphere')
ax.legend(loc='best');
co2_no['B_C_pCO2'].mean() # no flux
co2_with_flux['B_C_pCO2'].mean() # with flux
print("Mean is {}, min is {}, max is {}".format(
dco2_no.mean(),
dco2_no.min(),
dco2_no.max()))
print("Mean is {}, min is {}, max is {}".format(
dco2_with_flux.mean(),
dco2_with_flux.min(),
dco2_with_flux.max())) # with inflow
###Output
Mean is 150.80862426757812, min is -223.962158203125, max is 545.74853515625
###Markdown
Surface fluxes of CO$_2$
###Code
import numpy as np
import pandas as pd
ds = xr.open_dataset('data/base/kz145_scale10000/water.nc')
co2flux_df = ds['B_C_DIC _flux'].to_dataframe()
co2flux_surface = co2flux_df.groupby('z_faces').get_group(0)
co2flux_no = -co2flux_surface.loc['2011-01-01':'2011-12-31']
ds = xr.open_dataset('data/with_dic/means_from_data/water.nc')
co2flux_df = ds['B_C_DIC _flux'].to_dataframe()
co2flux_surface = co2flux_df.groupby('z_faces').get_group(0)
co2flux_alk = -co2flux_surface.loc['2011-01-01':'2011-12-31']
###Output
_____no_output_____
###Markdown
*Calculate the monthly CO$_2$ flux*
###Code
year = (('2011-01-01','2011-01-31'), ('2011-02-01','2011-02-28'), ('2011-03-01','2011-03-31'),
('2011-04-01','2011-04-30'), ('2011-05-01','2011-05-31'), ('2011-06-01','2011-06-30'),
('2011-07-01','2011-07-31'), ('2011-08-01','2011-08-31'), ('2011-09-01','2011-09-30'),
('2011-10-01','2011-10-31'), ('2011-11-01','2011-11-30'), ('2011-12-01','2011-12-31'))
co2flux_no_year = []
co2flux_alk_year = []
for month in year:
co2flux_no_month = co2flux_no.loc[month[0]:month[1]]
co2flux_alk_month = co2flux_alk.loc[month[0]:month[1]]
co2flux_no_year.append(co2flux_no_month['B_C_DIC _flux'].mean())
co2flux_alk_year.append(co2flux_alk_month['B_C_DIC _flux'].mean())
co2flux_no_year = np.array(co2flux_no_year)
co2flux_alk_year = np.array(co2flux_alk_year)
year_days = np.array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])
co2flux_no_monthly = co2flux_no_year*year_days/1000
co2flux_alk_monthly = co2flux_alk_year*year_days/1000
dates = pd.date_range('2011-01-01', '2012-01-01', freq='M')
fig = plt.figure(figsize=(10, 3))
ax = fig.add_subplot(1, 1, 1)
ax.plot(dates, co2flux_no_monthly, linewidth=2, label=r'Flux')
ax.plot(dates, co2flux_alk_monthly,
linewidth=2, label=r'+ advective TA and DIC; different means')
plt.title('Monthly air-sea CO$_2$ flux [mol m$^{-2}$ month$^{-1}$]; positive means inwards')
ax.legend(loc='best');
###Output
_____no_output_____
###Markdown
*Show daily CO$_2$ flux*
###Code
co2flux_no = co2flux_no.reset_index()
co2flux_alk = co2flux_alk.reset_index()
fig = plt.figure(figsize=(10, 3))
ax = fig.add_subplot(1, 1, 1)
ax.plot(co2flux_no['time'], co2flux_no['B_C_DIC _flux'], linewidth=2, label=r'Flux')
ax.plot(co2flux_alk['time'], co2flux_alk['B_C_DIC _flux'],
linewidth=2, label=r'+ advective TA and DIC; different means')
plt.title('Daily air-sea CO$_2$ flux [mmol m$^{-2}$ day$^{-1}$]; positive means inwards')
ax.legend(loc='best');
co2flux_no['B_C_DIC _flux'].values.sum()
co2flux_alk['B_C_DIC _flux'].values.sum() # CO2 [mmol m-2 year-1] seawater excreets
print("Mean is {}, min is {}, max is {}".format(
co2flux_no['B_C_DIC _flux'].values.mean(),
co2flux_no['B_C_DIC _flux'].values.min(),
co2flux_no['B_C_DIC _flux'].values.max())) # with inflow
print("Mean is {}, min is {}, max is {}".format(
co2flux_alk['B_C_DIC _flux'].values.mean(),
co2flux_alk['B_C_DIC _flux'].values.min(),
co2flux_alk['B_C_DIC _flux'].values.max())) # with inflow
###Output
Mean is -8.939801216125488, min is -31.578248977661133, max is 13.75753116607666
|
chapter_computer-vision/bounding-box.ipynb | ###Markdown
็ฎๆ ๆฃๆตๅ่พน็ๆกๅจๅ้ข็ไธไบ็ซ ่ไธญ๏ผๆไปฌไป็ปไบ่ฏธๅค็จไบๅพๅๅ็ฑป็ๆจกๅใๅจๅพๅๅ็ฑปไปปๅก้๏ผๆไปฌๅ่ฎพๅพๅ้ๅชๆไธไธชไธปไฝ็ฎๆ ๏ผๅนถๅ
ณๆณจๅฆไฝ่ฏๅซ่ฏฅ็ฎๆ ็็ฑปๅซใ็ถ่๏ผๅพๅคๆถๅๅพๅ้ๆๅคไธชๆไปฌๆๅ
ด่ถฃ็็ฎๆ ๏ผๆไปฌไธไป
ๆณ็ฅ้ๅฎไปฌ็็ฑปๅซ๏ผ่ฟๆณๅพๅฐๅฎไปฌๅจๅพๅไธญ็ๅ
ทไฝไฝ็ฝฎใๅจ่ฎก็ฎๆบ่ง่ง้๏ผๆไปฌๅฐ่ฟ็ฑปไปปๅก็งฐไธบ็ฎๆ ๆฃๆต๏ผobject detection๏ผๆ็ฉไฝๆฃๆตใ็ฎๆ ๆฃๆตๅจๅคไธช้ขๅไธญ่ขซๅนฟๆณไฝฟ็จใไพๅฆ๏ผๅจๆ ไบบ้ฉพ้ฉถ้๏ผๆไปฌ้่ฆ้่ฟ่ฏๅซๆๆๅฐ็่ง้ขๅพๅ้็่ฝฆ่พใ่กไบบใ้่ทฏๅ้็ข็ไฝ็ฝฎๆฅ่งๅ่ก่ฟ็บฟ่ทฏใๆบๅจไบบไนๅธธ้่ฟ่ฏฅไปปๅกๆฅๆฃๆตๆๅ
ด่ถฃ็็ฎๆ ใๅฎ้ฒ้ขๅๅ้่ฆๆฃๆตๅผๅธธ็ฎๆ ๏ผๅฆๆญนๅพๆ่
็ธๅผนใๅจๆฅไธๆฅ็ๅ ่้๏ผๆไปฌๅฐไป็ป็ฎๆ ๆฃๆต้็ๅคไธชๆทฑๅบฆๅญฆไน ๆจกๅใๅจๆญคไนๅ๏ผ่ฎฉๆไปฌๆฅไบ่งฃ็ฎๆ ไฝ็ฝฎ่ฟไธชๆฆๅฟตใๅ
ๅฏผๅ
ฅๅฎ้ชๆ้็ๅ
ๆๆจกๅใ
###Code
%matplotlib inline
import d2lzh as d2l
from mxnet import image
###Output
_____no_output_____
###Markdown
ไธ้ขๅ ่ฝฝๆฌ่ๅฐไฝฟ็จ็็คบไพๅพๅใๅฏไปฅ็ๅฐๅพๅๅทฆ่พนๆฏไธๅช็๏ผๅณ่พนๆฏไธๅช็ซใๅฎไปฌๆฏ่ฟๅผ ๅพๅ้็ไธคไธชไธป่ฆ็ฎๆ ใ
###Code
d2l.set_figsize()
img = image.imread('../img/catdog.jpg').asnumpy()
d2l.plt.imshow(img); # ๅ ๅๅทๅชๆพ็คบๅพ
###Output
_____no_output_____
###Markdown
่พน็ๆกๅจ็ฎๆ ๆฃๆต้๏ผๆไปฌ้ๅธธไฝฟ็จ่พน็ๆก๏ผbounding box๏ผๆฅๆ่ฟฐ็ฎๆ ไฝ็ฝฎใ่พน็ๆกๆฏไธไธช็ฉๅฝขๆก๏ผๅฏไปฅ็ฑ็ฉๅฝขๅทฆไธ่ง็$x$ๅ$y$่ฝดๅๆ ไธๅณไธ่ง็$x$ๅ$y$่ฝดๅๆ ็กฎๅฎใๆไปฌๆ นๆฎไธ้ข็ๅพ็ๅๆ ไฟกๆฏๆฅๅฎไนๅพไธญ็ๅ็ซ็่พน็ๆกใๅพไธญ็ๅๆ ๅ็นๅจๅพๅ็ๅทฆไธ่ง๏ผๅ็นๅพๅณๅๅพไธๅๅซไธบ$x$่ฝดๅ$y$่ฝด็ๆญฃๆนๅใ
###Code
# bboxๆฏbounding box็็ผฉๅ
dog_bbox, cat_bbox = [60, 45, 378, 516], [400, 112, 655, 493]
###Output
_____no_output_____
###Markdown
ๆไปฌๅฏไปฅๅจๅพไธญๅฐ่พน็ๆก็ปๅบๆฅ๏ผไปฅๆฃๆฅๅ
ถๆฏๅฆๅ็กฎใ็ปไนๅ๏ผๆไปฌๅฎไนไธไธช่พ
ๅฉๅฝๆฐ`bbox_to_rect`ใๅฎๅฐ่พน็ๆก่กจ็คบๆmatplotlib็่พน็ๆกๆ ผๅผใ
###Code
def bbox_to_rect(bbox, color): # ๆฌๅฝๆฐๅทฒไฟๅญๅจd2lzhๅ
ไธญๆนไพฟไปฅๅไฝฟ็จ
# ๅฐ่พน็ๆก(ๅทฆไธx, ๅทฆไธy, ๅณไธx, ๅณไธy)ๆ ผๅผ่ฝฌๆขๆmatplotlibๆ ผๅผ๏ผ
# ((ๅทฆไธx, ๅทฆไธy), ๅฎฝ, ้ซ)
return d2l.plt.Rectangle(
xy=(bbox[0], bbox[1]), width=bbox[2]-bbox[0], height=bbox[3]-bbox[1],
fill=False, edgecolor=color, linewidth=2)
###Output
_____no_output_____
###Markdown
ๆไปฌๅฐ่พน็ๆกๅ ่ฝฝๅจๅพๅไธ๏ผๅฏไปฅ็ๅฐ็ฎๆ ็ไธป่ฆ่ฝฎๅปๅบๆฌๅจๆกๅ
ใ
###Code
fig = d2l.plt.imshow(img)
fig.axes.add_patch(bbox_to_rect(dog_bbox, 'blue'))
fig.axes.add_patch(bbox_to_rect(cat_bbox, 'red'));
###Output
_____no_output_____
###Markdown
Object Detection and Bounding Boxes:label:`chapter_bbox`In the previous section, we introduced many models for image classification. In image classification tasks, we assume that there is only one main target in the image and we only focus on how to identify the target category. However, in many situations, there are multiple targets in the image that we are interested in. We not only want to classify them, but also want to obtain their specific positions in the image. In computer vision, we refer to such tasks as object detection (or object recognition).Object detection is widely used in many fields. For example, in self-driving technology, we need to plan routes by identifying the locations of vehicles, pedestrians, roads, and obstacles in the captured video image. Robots often perform this type of task to detect targets of interest. Systems in the security field need to detect abnormal targets, such as intruders or bombs.In the next few sections, we will introduce multiple deep learning models used for object detection. Before that, we should discuss the concept of target location. First, import the packages and modules required for the experiment.
###Code
%matplotlib inline
import d2l
from mxnet import image, npx
npx.set_np()
###Output
_____no_output_____
###Markdown
Next, we will load the sample images that will be used in this section. We can see there is a dog on the left side of the image and a cat on the right. They are the two main targets in this image.
###Code
d2l.set_figsize((3.5, 2.5))
img = image.imread('../img/catdog.jpg').asnumpy()
d2l.plt.imshow(img);
###Output
_____no_output_____
###Markdown
Bounding BoxIn object detection, we usually use a bounding box to describe the target location. The bounding box is a rectangular box that can be determined by the $x$ and $y$ axis coordinates in the upper-left corner and the $x$ and $y$ axis coordinates in the lower-right corner of the rectangle. We will define the bounding boxes of the dog and the cat in the image based on the coordinate information in the above image. The origin of the coordinates in the above image is the upper left corner of the image, and to the right and down are the positive directions of the $x$ axis and the $y$ axis, respectively.
###Code
# bbox is the abbreviation for bounding box
dog_bbox, cat_bbox = [60, 45, 378, 516], [400, 112, 655, 493]
###Output
_____no_output_____
###Markdown
We can draw the bounding box in the image to check if it is accurate. Before drawing the box, we will define a helper function `bbox_to_rect`. It represents the bounding box in the bounding box format of matplotlib.
###Code
# Save to the d2l package.
def bbox_to_rect(bbox, color):
"""Convert bounding box to matplotlib format."""
# Convert the bounding box (top-left x, top-left y, bottom-right x,
# bottom-right y) format to matplotlib format: ((upper-left x,
# upper-left y), width, height)
return d2l.plt.Rectangle(
xy=(bbox[0], bbox[1]), width=bbox[2]-bbox[0], height=bbox[3]-bbox[1],
fill=False, edgecolor=color, linewidth=2)
###Output
_____no_output_____
###Markdown
After loading the bounding box on the image, we can see that the main outline of the target is basically inside the box.
###Code
fig = d2l.plt.imshow(img)
fig.axes.add_patch(bbox_to_rect(dog_bbox, 'blue'))
fig.axes.add_patch(bbox_to_rect(cat_bbox, 'red'));
###Output
_____no_output_____
###Markdown
Object Detection and Bounding Boxes:label:`sec_bbox`In the previous section, we introduced many models for image classification. In image classification tasks, we assume that there is only one main target in the image and we only focus on how to identify the target category. However, in many situations, there are multiple targets in the image that we are interested in. We not only want to classify them, but also want to obtain their specific positions in the image. In computer vision, we refer to such tasks as object detection (or object recognition).Object detection is widely used in many fields. For example, in self-driving technology, we need to plan routes by identifying the locations of vehicles, pedestrians, roads, and obstacles in the captured video image. Robots often perform this type of task to detect targets of interest. Systems in the security field need to detect abnormal targets, such as intruders or bombs.In the next few sections, we will introduce multiple deep learning models used for object detection. Before that, we should discuss the concept of target location. First, import the packages and modules required for the experiment.
###Code
%mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/
%maven ai.djl:api:0.7.0-SNAPSHOT
%maven ai.djl:model-zoo:0.7.0-SNAPSHOT
%maven ai.djl:basicdataset:0.7.0-SNAPSHOT
%maven org.slf4j:slf4j-api:1.7.26
%maven org.slf4j:slf4j-simple:1.7.26
%maven ai.djl.mxnet:mxnet-engine:0.7.0-SNAPSHOT
%maven ai.djl.mxnet:mxnet-native-auto:1.7.0-b
import ai.djl.modality.cv.Image;
import ai.djl.modality.cv.ImageFactory;
import ai.djl.modality.cv.output.BoundingBox;
import ai.djl.modality.cv.output.DetectedObjects;
import ai.djl.modality.cv.output.Rectangle;
###Output
_____no_output_____
###Markdown
Next, we will load the sample images that will be used in this section. We can see there is a dog on the left side of the image and a cat on the right. They are the two main targets in this image.
###Code
Image imgArr = ImageFactory.getInstance()
.fromUrl("https://github.com/d2l-ai/d2l-en/blob/master/img/catdog.jpg?raw=true");
imgArr.getWrappedImage();
###Output
_____no_output_____
###Markdown
Bounding BoxIn object detection, we usually use a bounding box to describe the target location. The bounding box is a rectangular box that can be determined by the $x$ and $y$ axis coordinates in the upper-left corner and the $x$ and $y$ axis coordinates in the lower-right corner of the rectangle. We will define the bounding boxes of the dog and the cat in the image based on the coordinate information in the above image. The origin of the coordinates in the above image is the upper left corner of the image, and to the right and down are the positive directions of the $x$ axis and the $y$ axis, respectively.
###Code
// bbox is the abbreviation for bounding box
double[] dog_bbox = new double[]{60, 45, 378, 516};
double[] cat_bbox = new double[]{400, 112, 655, 493};
###Output
_____no_output_____
###Markdown
We can draw the bounding box in the image to check if it is accurate. Before drawing the box, we will define a helper function `bboxToRectangle`. In DJL, the rectangle we create are basically probabilities. Hence, we divide the coordinates by width and height respectively. It represents the bounding box in the bounding box format of DJL's `Image` API.
###Code
public Rectangle bboxToRectangle(double[] bbox, int width, int height){
return new Rectangle(bbox[0]/width, bbox[1]/height, (bbox[2]-bbox[0])/width, (bbox[3]-bbox[1])/height);
}
###Output
_____no_output_____
###Markdown
After loading the bounding box on the image, we can see that the main outline of the target is basically inside the box.
###Code
List<String> classNames = new ArrayList();
classNames.add("dog");
classNames.add("cat");
List<Double> prob = new ArrayList<>();
prob.add(1.0);
prob.add(1.0);
List<BoundingBox> boxes = new ArrayList<>();
boxes.add(bboxToRectangle(dog_bbox, imgArr.getWidth(), imgArr.getHeight()));
boxes.add(bboxToRectangle(cat_bbox, imgArr.getWidth(), imgArr.getHeight()));
DetectedObjects detectedObjects = new DetectedObjects(classNames, prob, boxes);
imgArr.drawBoundingBoxes(detectedObjects);
imgArr.getWrappedImage();
###Output
_____no_output_____
###Markdown
็ฎๆ ๆฃๆตๅ่พน็ๆกๅ้ขๅฐ่้ๆไปฌไป็ปไบ่ฏธๅค็จไบๅพๅๅ็ฑป็ๆจกๅใๅจๅพๅๅ็ฑปไปปๅก้๏ผๆไปฌๅ่ฎพๅพๅ้ๅชๆไธไธชไธปไฝ็ฎๆ ๏ผๅนถๅ
ณๆณจๅฆไฝ่ฏๅซ่ฏฅ็ฎๆ ็็ฑปๅซใ็ถ่๏ผๅพๅคๆถๅๅพๅ้ๆๅคไธชๆไปฌๆๅ
ด่ถฃ็็ฎๆ ๏ผๆไปฌไธไป
ๆณ็ฅ้ๅฎไปฌ็็ฑปๅซ๏ผ่ฟๆณๅพๅฐๅฎไปฌๅจๅพๅไธญ็ๅ
ทไฝไฝ็ฝฎใๅจ่ฎก็ฎๆบ่ง่ง้๏ผๆไปฌๅฐ่ฟ็ฑปไปปๅก็งฐไธบ็ฎๆ ๆฃๆต๏ผๆ็ฉไฝๆฃๆต๏ผใ็ฎๆ ๆฃๆตๅจๅคไธช้ขๅ่ขซๅนฟๆณไฝฟ็จใไพๅฆๅจๆ ไบบ้ฉพ้ฉถ้๏ผๆไปฌ้่ฆ้่ฟ่ฏๅซๆๆๅฐ็่ง้ขๅพๅ้็่ฝฆ่พใ่กไบบใ้่ทฏๅ้็ข็ไฝ็ฝฎๆฅ่งๅ่ก่ฟ็บฟ่ทฏใๆบๅจไบบไนๅธธ้่ฟ่ฏฅไปปๅกๆฅๆฃๆตๆๅ
ด่ถฃ็็ฎๆ ใๅฎ้ฒ้ขๅๅ้่ฆๆฃๆตๅผๅธธ็ฎๆ ๏ผไพๅฆๆญนๅพๆ่
็ธๅผนใๅจๆฅไธๆฅ็ๅ ่้๏ผๆไปฌๅฐไป็ป็ฎๆ ๆฃๆต้็ๅคไธชๆทฑๅบฆๅญฆไน ๆจกๅใๅจๆญคไนๅ๏ผ่ฎฉๆไปฌๆฅไบ่งฃ็ฎๆ ไฝ็ฝฎ่ฟไธชๆฆๅฟตใๅ
ๅฏผๅ
ฅๅฎ้ชๆ้็ๅ
ๆๆจกๅใ
###Code
%matplotlib inline
import d2lzh as d2l
from mxnet import image
###Output
_____no_output_____
###Markdown
ไธ้ขๅ ่ฝฝๆฌ่ๅฐไฝฟ็จ็็คบไพๅพๅใๅฏไปฅ็ๅฐๅพๅๅทฆ่พนๆฏไธๅช็๏ผๅณ่พนๆฏไธๅช็ซใๅฎไปฌๆฏ่ฟๅผ ๅพๅ้็ไธคไธชไธป่ฆ็ฎๆ ใ
###Code
d2l.set_figsize()
img = image.imread('../img/catdog.jpg').asnumpy()
d2l.plt.imshow(img); # ๅ ๅๅทๅชๆพ็คบๅพ
###Output
_____no_output_____
###Markdown
่พน็ๆกๅจ็ฎๆ ๆฃๆต้๏ผๆไปฌ้ๅธธไฝฟ็จ่พน็ๆก๏ผbounding box๏ผๆฅๆ่ฟฐ็ฎๆ ไฝ็ฝฎใ่พน็ๆกๆฏไธไธช็ฉๅฝขๆก๏ผๅฏไปฅ็ฑ็ฉๅฝขๅทฆไธ่ง็$x$ๅ$y$่ฝดๅๆ ไธๅณไธ่ง็$x$ๅ$y$่ฝดๅๆ ็กฎๅฎใๆไปฌๆ นๆฎไธๅพๅๆ ไฟกๆฏๆฅๅฎไนๅพไธญ็ๅ็ซ็่พน็ๆกใไธๅพไธญ็ๅๆ ๅ็นๅจๅพๅ็ๅทฆไธ่ง๏ผๅ็นๅพๅณๅๅพไธๅๅซไธบ$x$่ฝดๅ$y$่ฝด็ๆญฃๆนๅใ
###Code
# bboxๆฏbounding box็็ผฉๅ
dog_bbox, cat_bbox = [60, 45, 378, 516], [400, 112, 655, 493]
###Output
_____no_output_____
###Markdown
ๆไปฌๅฏไปฅๅจๅพไธญๅฐ่พน็ๆก็ปๅบๆฅ๏ผไปฅๆฃๆฅๅ
ถๆฏๅฆๅ็กฎใ็ปไนๅ๏ผๆไปฌๅฎไนไธไธช่พ
ๅฉๅฝๆฐ`bbox_to_rect`ใๅฎๅฐ่พน็ๆก่กจ็คบๆmatplotlib็่พน็ๆกๆ ผๅผใ
###Code
def bbox_to_rect(bbox, color): # ๆฌๅฝๆฐๅทฒไฟๅญๅจd2lzhๅ
ไธญๆนไพฟไปฅๅไฝฟ็จ
# ๅฐ่พน็ๆก(ๅทฆไธx,ๅทฆไธy,ๅณไธx,ๅณไธy)ๆ ผๅผ่ฝฌๆขๆmatplotlibๆ ผๅผ๏ผ((ๅทฆไธx,ๅทฆไธy),ๅฎฝ,้ซ)
return d2l.plt.Rectangle(
xy=(bbox[0], bbox[1]), width=bbox[2]-bbox[0], height=bbox[3]-bbox[1],
fill=False, edgecolor=color, linewidth=2)
###Output
_____no_output_____
###Markdown
ๆไปฌๅฐ่พน็ๆกๅ ่ฝฝๅจๅพๅไธ๏ผๅฏไปฅ็ๅฐ็ฎๆ ็ไธป่ฆ่ฝฎๅปๅบๆฌๅจๆกๅ
ใ
###Code
fig = d2l.plt.imshow(img)
fig.axes.add_patch(bbox_to_rect(dog_bbox, 'blue'))
fig.axes.add_patch(bbox_to_rect(cat_bbox, 'red'));
###Output
_____no_output_____ |
files/notebooks/2020-08-15-torchanimd.ipynb | ###Markdown
PyTorch + ANI + MDPyTorch provides nice utilities for differentiation.ANI provides some interatomic potentials trained on some neural networks.Molecular Dynamics might be an interesting combination Some basic pytorch functionality, a 1-D springPytorch replicates a lot of numpy functionality, and we can build python functions that take pytorch tensors as input
###Code
import torch
import matplotlib.pyplot as plt
x = torch.ones((2,2), requires_grad=True)
###Output
_____no_output_____
###Markdown
A simple quadratic function
###Code
def sq_function(x):
return x**2
###Output
_____no_output_____
###Markdown
Since we have an array of 1s, the square won't look very interesting...
###Code
foo = sq_function(x)
foo
###Output
_____no_output_____
###Markdown
More interstingly, we can compute the gradient of this function.To compute the gradient, the value/function needs to be a scalar, but this scalar could be computed from a bunch of other functions stemming from some independent variables (our tensor x).In this case, our final scalar looks like this,$ Y = x_0^2 + x_1^2 + x_2^2 + x_3^2 $.Taking the gradient means taking 4 partial derivatives for each input.Fortunately, the equation is simple to compute each partial derivative,$ \frac{\partial Y}{\partial x_i} = 2*x_i $, where $i = [0,4)$.Since this is an array of 1s, each partial derivative evaluates to 2
###Code
torch.autograd.grad(foo.sum(), x)
###Output
_____no_output_____
###Markdown
We've evaluated the function and its gradient at just one point, but we can use some numpy-esque functions to evaluate the square-function and its gradient at a range of points.Yup, looks right to me
###Code
some_xvals = torch.arange(-12., 12., step=0.5, requires_grad=True)
some_yvals = sq_function(some_xvals)
fig, ax = plt.subplots(1,1)
ax.plot(some_xvals.detach().numpy(), some_yvals.detach().numpy())
ax.plot(some_xvals.detach().numpy(),
torch.autograd.grad(some_yvals.sum(), some_xvals)[0])
###Output
_____no_output_____
###Markdown
Slightly more book-keeping, 3x 1-D harmonic springs Define an energy function as the sum of 3 harmonic springs $V(x, y, z) = V_x + V_y + V_z = (x-x_0)^2 + (y-y_0)^2 + (z-z_0)^2$The gradient, the 3 partial derivatives, are computed as such (being verbose with the chain rule)$\frac{\partial V}{\partial X} = 2 *(x-x_0) * 1$$\frac{\partial V}{\partial Y} = 2 *(y-y_0) * 1$$\frac{\partial V}{\partial Z} = 2 *(z-z_0) * 1$
###Code
def harmonic_spring_3d(coord, origin=torch.tensor([0,0,0])):
V_x = (coord[0]-origin[0])**2
V_y = (coord[1]-origin[1])**2
V_z = (coord[2]-origin[2])**2
return V_x + V_y + V_z
###Output
_____no_output_____
###Markdown
We can evaluate the potential energy at 1 point, which involves computing the energy in 3 dimensions.Our "anchor" will be the origin, and our endpoint will be (1,2,3)$ 1^2 + 2^2 + 3^2 = 14$
###Code
my_coords = torch.tensor([1.,2.,3.], requires_grad=True)
total_energy = harmonic_spring_3d(my_coords)
total_energy
###Output
_____no_output_____
###Markdown
Computing the gradient, partial derivatives in each direction, which is simply 2 times the distance in each dimension $ \nabla \hat V = = $
###Code
torch.autograd.grad(total_energy, my_coords)
###Output
_____no_output_____
###Markdown
More involved: Lennard JonesThe Lennard-Jones potential describes the potential energy between two particles.Not the most accurate potential, but has been decent for a long time now.[Some background information on the Lenanrd-Jones potential](http://www.sklogwiki.org/SklogWiki/index.php/Lennard-Jones_model).For simplicity, assume $\epsilon =1$ and $\sigma=1$ in unitless quantities:$ V_{LJ} = 4 * ( \frac{1}{r}^{12} - \frac{1}{r}^6) $$ -\frac{\partial V}{\partial r} = -4 * (-12 * r^{-13} + 6 * r^{-7}) $
###Code
def lj(val):
return 4 * ((1/val)**12 - (1/val)**6)
r_values = torch.arange(0.1, 12., step=0.001, requires_grad=True)
energy = lj(r_values)
forces = -torch.autograd.grad(energy.sum(), r_values)[0]
###Output
_____no_output_____
###Markdown
For sanity check, we can confirm that energy reaches a critical point (local minimum) when the force is 0. Also, this *definitely* looks like a LJ potential to me
###Code
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1,1, dpi=100)
ax.plot(r_values.detach().numpy(), energy.detach().numpy(), label='energy')
ax.plot(r_values.detach().numpy(), forces.detach().numpy(), label='force')
ax.set_ylim([-2,1])
ax.legend()
ax.set_xlim([0,2])
ax.axhline(y=0, color='r', linestyle='--')
###Output
_____no_output_____
###Markdown
Moving to torchaniANI is an interatomic potential built upon neural networks.Rather than write our own function to evaluate the energy between atoms, maybe we can just use ANI.Since this is pytorch-based, this is still available for autodifferentiation to get the forceshttps://github.com/aiqm/torchani To begin, we have to define our elements (a tensor of atomic numbers).For the molecular mechanics people, each atom is identifiable by its element, and not one of many atom-types.We have to define the positions (units of Angstrom), which is also a multi-dimensional tensor.Load the model, specifying to convert the atomic numbers to indices suitable for ANI.We can compute the energies and forces from the model.The energy comes from the model, but the force is obtained via an autograd call, observing that we are differentiating the sum of the forces, evaluating at the positions
###Code
import torchani
elements = torch.tensor([[6, 6]])
positions = torch.tensor([[[3.0, 3.0, 3.0],
[3.5, 3.5, 3.5]]], requires_grad=True)
model = torchani.models.ANI2x(periodic_table_index=True)
energy = model((elements, positions)).energies
forces = -1.0 * torch.autograd.grad(energy.sum(), positions)[0]
energy
forces
###Output
_____no_output_____
###Markdown
Going a step further, we can try to visualize the interaction potential by evaluating the energy at a variety of distances.We can also do some autodifferentiation to compute the forces.In this example, we have 2 atoms that share X and Y coordinates, but pull them apart in the Z direction
###Code
all_z = torch.arange(3.0, 12.0, step=0.1)
all_energy = []
all_forces = []
for z in all_z:
# Generate a new set of positions
positions = torch.tensor([[[3.0, 3.0, 3.0],
[3.0, 3.0, z]]], requires_grad=True
)
# Compute energy
energy = model((elements, positions)).energies
# Compute force
forces = -1.0 * torch.autograd.grad(energy.sum(), positions)[0]
# Get the force vector on the first atom
one_atom_forces = forces[0,0]
# Compute the magnitude of this force vector
force_magnitude = torch.sqrt(torch.dot(one_atom_forces, one_atom_forces))
# Calculate the unit vector for this force vector,
# although it's a little unnecessary because the only distance is in the
# z direction
unit_vector_force = one_atom_forces/force_magnitude
# Get z-component of force vector
force_vector_z = unit_vector_force[2]*force_magnitude
# Some nans will form if the force magnitude is zero, but this
# is really just a 0 force vector
if torch.isnan(force_vector_z).any():
force_vector_z = 0.0
else:
force_vector_z = float(force_vector_z.detach().numpy())
# Accumulate
all_energy.append(float(energy.detach().numpy()))
all_forces.append(force_vector_z)
###Output
_____no_output_____
###Markdown
Hmmm... this does not resemble the Lennard-Jones potential (or basic chemistry for that matter)
###Code
fig, ax = plt.subplots(1,1, dpi=100)
ax.plot(all_z-3, all_energy)
ax.set_xlabel(r"Distance ($\AA$)")
ax.set_ylabel("Energy (Hartree)")
fig, ax = plt.subplots(1,1, dpi=100)
ax.plot(all_z-3, all_forces)
ax.set_xlabel(r"Distance ($\AA$)")
ax.set_ylabel("Force (Hartree / $\AA$)")
from mbuild.lib.recipes import Alkane
# The mBuild alkane recipe is mainly used to generate
# some particles and positions
cmpd = Alkane(n=5)
# Convert to mdtraj trajectory out of convenience for atomic numbers
traj = cmpd.to_trajectory()
# Periodic cell, from nm to angstrom
cell = torch.tensor(traj.unitcell_vectors[0]*10)
# We just need atomic numbers
species = torch.tensor([[
a.element.atomic_number for a in traj.top.atoms
]])
# Make tensor for coordinates
# Since we are differentiating WRT coordinates, we need the
# requires_grad=True
coordinates = torch.tensor(traj.xyz*10, requires_grad=True)
# PBC flag necessary for computing energies with periodic boundaries
pbc = torch.tensor([True, True, True], dtype=torch.bool)
energies = model((species, coordinates), cell=cell, pbc=pbc).energies
forces = -1.0 * (
torch.autograd.grad(energies.sum(), coordinates)[0]
)
energies
forces
###Output
/home/ayang41/miniconda3/envs/torch37/lib/python3.7/site-packages/ipykernel/ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
|
recursion_dynamic_program.ipynb | ###Markdown
Factorial by recursion
###Code
def factorial(n):
if n < 2:
return 1
else:
return n * factorial(n-1)
factorial(10)
###Output
_____no_output_____
###Markdown
Find the total number of subsets of a given list that the sum of the subset elements equals to a particular number
###Code
def rec(arr, total, i):
if total == 0:
return 1
elif total < 0:
return 0
elif i < 0:
return 0
elif total < arr[i]:
return rec(arr, total, i-1)
else:
return rec(arr, total-arr[i],i-1) + rec(arr, total, i-1)
def count_set(arr, total):
return rec(arr,total, len(arr)-1)
a = [2,4,6,10,8,100]
count_set(a, 108)
def dynamicP(arr, total, i, mem):
key = str(total)+":"+str(i)
if key in mem:
return mem[key]
if total == 0:
return 1
elif total < 0:
return 0
elif i < 0:
return 0
elif total < arr[i]:
to_return = dynamicP(arr, total, i-1, mem)
else:
to_return = dynamicP(arr, total-arr[i], i-1, mem) + dynamicP(arr, total, i-1, mem)
mem[key] = to_return
return to_return
def count_set_dyp(arr, total):
mem = {}
return dynamicP(arr,total, len(arr)-1, mem)
a = [2,4,6,10,8,100]
count_set_dyp(a, 108)
###Output
_____no_output_____ |
Ex-02-Add Various Shapes.ipynb | ###Markdown
Add Various Shapes Import Libraries
###Code
import cv2
import numpy as np
###Output
_____no_output_____
###Markdown
Drawing a Line on Black Image
###Code
#Image
black_image = np.zeros((512,512,3), np.uint8)
#Draw Line
cv2.line(black_image, (1,1), (250,250), (100,255,0),4)
cv2.imshow("Flouresent Line", black_image)
cv2.waitKey(6000)
cv2.destroyAllWindows()
###Output
_____no_output_____
###Markdown
Draw a Circle on Black Image
###Code
#Image
black_image = np.zeros((512,512,3), np.uint8)
#Draw Circle
cv2.circle(black_image,(250,250), 175, (255,0,0),4)
cv2.imshow("Circle", black_image)
cv2.waitKey(6000)
cv2.destroyAllWindows()
###Output
_____no_output_____
###Markdown
Drawing a Rectangle o Black Image
###Code
#Image
black_image = np.zeros((512,512,3), np.uint8)
#Draw Rectangle
cv2.rectangle(black_image, (50,100), (250,250), (110,100,90),-1)
cv2.imshow("Rectangle", black_image)
cv2.waitKey(6000)
cv2.destroyAllWindows()
###Output
_____no_output_____ |
others/SignAuth_Google_Colab.ipynb | ###Markdown
SignAuth - View On [Github](https://github.com/360modder/signauth)
###Code
#@title Install Dependencies
import os
if os.path.exists("signauth"):
!rm -r signauth
!git clone https://www.github.com/360modder/signauth.git
working_dir = os.getcwd()
os.chdir("signauth")
!pip install -r requirements.txt
###Output
_____no_output_____
###Markdown
Pre-Trained Model Predictions
###Code
#@title SignAuth (Pre-Trained) {display-mode: "form"}
image_path = "/content/" #@param {type: "string"}
scan = True #@param {type: "boolean"}
if scan:
!python production/signauth.py $image_path --scan
else:
!python production/signauth.py $image_path
###Output
_____no_output_____
###Markdown
Training & Predictions
###Code
#@title Preprocess Images {display-mode: "form"}
process = "train,test,predict" #@param ["train,test,predict", "train", "test", "predict"]
scan = False #@param {type:"boolean"}
backup = True #@param {type:"boolean"}
overwrite = False #@param {type:"boolean"}
if backup and overwrite:
!python preprocessing/preprocessor.py --process=$process --backup --overwrite
elif backup:
!python preprocessing/preprocessor.py --process=$process --backup
else:
!python preprocessing/preprocessor.py --process=$process
if scan:
!python preprocessing/preprocessor.py --process=$process --scan
#@title Train The Model {display-mode: "form"}
#@markdown Adjust The Hyper-parameters
batchsize = 20 #@param {type:"slider", min:4, max:30}
epochs = 10 #@param {type:"slider", min:1, max:30}
learningrate = 0.001 #@param {type:"slider", min:0.001, max:1.0, step:0.001}
#@markdown Generate tflite Model
tflitemodel = False #@param {type:"boolean"}
!python train.py --batchsize=$batchsize --epochs=$epochs --learningrate=$learningrate
if tflitemodel:
!python models/tflite.py
#@title Predict Using Trained Model {display-mode: "form"}
!python predict.py
###Output
_____no_output_____
###Markdown
SignAuth - View On [Github](https://github.com/clitic/signauth)
###Code
#@title Install Dependencies
import os
if os.path.exists("signauth"):
!rm -r signauth
!git clone https://www.github.com/clitic/signauth.git
working_dir = os.getcwd()
os.chdir("signauth")
!pip install -r requirements.txt
###Output
_____no_output_____
###Markdown
Pre-Trained Model Predictions
###Code
#@title SignAuth (Pre-Trained) {display-mode: "form"}
image_path = "/content/" #@param {type: "string"}
scan = True #@param {type: "boolean"}
if scan:
!python production/signauth.py $image_path --scan
else:
!python production/signauth.py $image_path
###Output
_____no_output_____
###Markdown
Training & Predictions
###Code
#@title Preprocess Images {display-mode: "form"}
process = "train,test,predict" #@param ["train,test,predict", "train", "test", "predict"]
scan = False #@param {type:"boolean"}
backup = True #@param {type:"boolean"}
overwrite = False #@param {type:"boolean"}
if backup and overwrite:
!python preprocessing/preprocessor.py --process=$process --backup --overwrite
elif backup:
!python preprocessing/preprocessor.py --process=$process --backup
else:
!python preprocessing/preprocessor.py --process=$process
if scan:
!python preprocessing/preprocessor.py --process=$process --scan
#@title Train The Model {display-mode: "form"}
#@markdown Adjust The Hyper-parameters
batchsize = 20 #@param {type:"slider", min:4, max:30}
epochs = 10 #@param {type:"slider", min:1, max:30}
learningrate = 0.001 #@param {type:"slider", min:0.001, max:1.0, step:0.001}
#@markdown Generate tflite Model
tflitemodel = False #@param {type:"boolean"}
!python train.py --batchsize=$batchsize --epochs=$epochs --learningrate=$learningrate
if tflitemodel:
!python models/tflite.py
#@title Predict Using Trained Model {display-mode: "form"}
!python predict.py
###Output
_____no_output_____ |
notebooks/2020-5-24 Launch Angle.ipynb | ###Markdown
Launch AngleWe're going to analyze 2019 Statcast data with a specific focus on launch angle. We'll be loading it from .csv but previous notebooks have demonstrated use of *pybaseball* module to pull the data directly from *baseball savant*.1. First we load the data and python libraries2. We're going to look at the batted results broken down by launch angle in a series of simple bar charts3. Statistical distribution of launch angle in 20194. Relationship of launch angle and wOBA and/or home runsThis is all to produce supporting information for my blog article which can be viewed at cgutwein.github.io
###Code
## libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
% matplotlib inline
## import data
statcast_data = pd.read_csv('./statcast_pitches_2019.csv')
from matplotlib.patches import Circle
###Output
_____no_output_____
###Markdown
Ted Williams ReferenceThe article mentions Ted Williams approximately of 10 degrees launch angle. Here we'll filter the data for:* batted results* launch angle <= 10 degreesWe'll get a second look with the data above 10 degrees as well. For information pertaining to the Statcast data, this link will be helpful: https://baseballsavant.mlb.com/csv-docs
###Code
event_list = list(statcast_data['events'].unique())
batted_list = ['field_out',
'grounded_into_double_play',
'home_run',
'single',
'double',
'force_out',
'field_error',
'double_play',
'sac_fly',
'triple',
'fielders_choice_out',
'fielders_choice',
'sac_fly_double_play',
'triple_play']
# Create the dictionary
batted_dictionary ={'field_out': 'out',
'grounded_into_double_play': 'out',
'single':'single',
'double':'double',
'force_out':'out',
'field_error':'out',
'double_play':'out',
'sac_fly':'out',
'triple':'triple',
'home_run': 'home_run',
'fielders_choice_out':'out',
'fielders_choice':'out',
'sac_fly_double_play':'out',
'triple_play':'out'}
# filter data
batted_statcast_data = statcast_data[statcast_data['events'].isin(batted_list)].copy()
# put all outs and errors in one category
batted_statcast_data['events'] = batted_statcast_data['events'].map(batted_dictionary)
batted_statcast_data['angle10'] = batted_statcast_data['launch_angle'] > 10
# sort data to look better for chart
mapping = {'out': 0,
'single': 1,
'double': 2,
'triple': 3,
'home_run': 4}
key = batted_statcast_data['events'].map(mapping)
# Bar chart - launch angle by result
# Setting FiveThirtyEight style
plt.style.use('fivethirtyeight')
# Setting size of our plot
fig, ax = plt.subplots(figsize=(8,6))
ax1 = sns.countplot(x="events", hue="angle10", data=batted_statcast_data.iloc[key.argsort()])
# Y axis past 0 & above 100 -- grid line will pass 0 & 100 marker
plt.ylim(-5,50000)
# Bolded horizontal line at y=0
#ax1.axhline(y=0, color='#414141', linewidth=1.5, alpha=.5)
# Y-labels to only these
ax.set_yticks([0, 10000,20000, 30000, 40000])
ax.set_yticklabels(labels=['0', '10000', '20000', '30000', '40000'], fontsize=14, color='#414141')
# X-labels and changing label names
ax.set_xticklabels(['Out', 'Single', 'Double', 'Triple','Homer'],fontsize=14, color='#414141')
# Title text
ax.text(x=-1, y=49000, s="Is 10 the magic number?", fontsize=18.5, fontweight='semibold', color='#414141')
# Subtitle text
ax.text(x=-1, y=46500, s='Batted ball results above and below 10 degree launch angle', fontsize=16.5, color='#414141')
ax.set_ylabel('')
ax.set_xlabel('')
# legend
ax.legend(["< 10 degree launch angle", "> 10 degree launch angle"], loc=7)
# Line at bottom for signature line
ax1.text(x = -1, y = -7000,
s = ' ยฉChet Gutwein Source: Baseball Savant ',
fontsize = 14, color = '#f0f0f0', backgroundcolor = '#414141');
plt.savefig('ted.png')
###Output
_____no_output_____
###Markdown
Launch vs. Exit VeloThe next chart presents launch angle vs. exit velocity. We somewhat want to re-create the charts of past articles by the Washington Post, etc. that show the "sweet spot" for home runs that exists within this relationship. For our version, we'll want to have home runs and outs presented in different colors.
###Code
# Scatter chart - launch angle vs exit velo
# new variable - is homer?
batted_statcast_data['is_homer'] = batted_statcast_data['events'] == 'home_run'
# Setting FiveThirtyEight style
plt.style.use('fivethirtyeight')
# Setting size of our plot
fig, ax = plt.subplots(figsize=(12,9))
ax1 = plt.scatter(x=batted_statcast_data["launch_angle"], y=batted_statcast_data["launch_speed"], c=batted_statcast_data['is_homer'], alpha=0.02, cmap='rainbow')
# Y axis past 0 & above 100 -- grid line will pass 0 & 100 marker
plt.ylim(-5,150)
# Bolded horizontal line at y=0
#ax1.axhline(y=0, color='#414141', linewidth=1.5, alpha=.5)
# Y-labels to only these
ax.set_yticks([0, 25, 50, 75, 100, 125])
ax.set_yticklabels(labels=['0', '25', '50', '75', '100', '125'], fontsize=14, color='#414141')
# X-labels and changing label names
#ax.set_xticklabels(['Out', 'Single', 'Double', 'Triple','Homer'],fontsize=14, color='#414141')
# Title text
ax.text(x=-99, y=140, s="Launch angle vs. exit velocity", fontsize=18.5, fontweight='semibold', color='#414141')
# Subtitle text
ax.text(x=-99, y=135, s='Where are the homers being it?', fontsize=16.5, color='#414141')
ax.set_ylabel('')
ax.set_xlabel('Launch Angle')
# legend
#ax.legend(["< 10 degree launch angle", "> 10 degree launch angle"], loc=7)
# Line at bottom for signature line
ax.text(x = -110, y = -25,
s = ' ยฉChet Gutwein Source: Baseball Savant ',
fontsize = 14, color = '#f0f0f0', backgroundcolor = '#414141');
plt.savefig('purple_cloud.png')
###Output
_____no_output_____
###Markdown
Further examination of launch angle vs. wOBA, we need to group by position player ID. We'll then plot the mean launch angle vs. the mean wOBA. It'd be nice to get a fit line and also annotate a player or two, which might become useful later.
###Code
batted_grouped = batted_statcast_data.groupby(by='batter').mean()
batted_counts = batted_statcast_data.groupby(by='batter').count()
valid_batter_list = batted_counts[batted_counts['events'] >= 50].index
batted_grouped_trimmed = batted_grouped[batted_grouped.index.isin(valid_batter_list)]
annotated_list = [545361, 593934, 608336, 446359]
batted_grouped_trimmed['is_annotated'] = batted_grouped_trimmed.index.isin(annotated_list)
print(len(batted_grouped))
print(len(batted_grouped_trimmed))
print(sum(batted_grouped_trimmed['is_annotated']))
# Setting FiveThirtyEight style
plt.style.use('fivethirtyeight')
# Setting size of our plot
fig, ax = plt.subplots(figsize=(8,6))
ax1 = plt.scatter(x=batted_grouped_trimmed["launch_angle"], y=batted_grouped_trimmed["woba_value"], alpha=0.3, c=batted_grouped_trimmed['is_annotated'], cmap='rainbow')#, c=batted_grouped_trimmed["woba_value"], cmap='rainbow')
# Y axis past 0 & above 100 -- grid line will pass 0 & 100 marker
plt.ylim(-0.1,0.8)
# Bolded horizontal line at y=0
#ax1.axhline(y=0, color='#414141', linewidth=1.5, alpha=.5)
# Y-labels to only these
ax.set_yticks([0, 0.2, 0.4, 0.6])
ax.set_yticklabels(labels=['0', '0.2', '0.4', '0.6'], fontsize=14, color='#414141')
# X-labels and changing label names
ax.set_xticks([-10, 0, 10, 20,30])
ax.set_xticklabels(['-10', '0', '10', '20', '30'],fontsize=14, color='#414141')
# Title text
ax.text(x=-15, y=0.9, s="Launch angle vs. wOBA", fontsize=18.5, fontweight='semibold', color='#414141')
# Subtitle text
ax.text(x=-15, y=0.85, s='Batters average wOBA on batted balls', fontsize=16.5, color='#414141')
ax.set_ylabel('wOBA')
ax.set_xlabel('Player Mean Launch Angle on batted balls')
# Annotations
ax.annotate('Joey Gallo', xy=(batted_grouped_trimmed.loc[608336]['launch_angle'],batted_grouped_trimmed.loc[608336]['woba_value']),
xytext=((batted_grouped_trimmed.loc[608336]['launch_angle']-6,batted_grouped_trimmed.loc[608336]['woba_value'])))
#ax.add_artist(Circle(((batted_grouped_trimmed.loc[656541]['launch_angle'],batted_grouped_trimmed.loc[656541]['woba_value'])), 0.01, edgecolor='red',fill=False))
#Migel Sano
ax.annotate('Miguel Sano', xy=(batted_grouped_trimmed.loc[593934]['launch_angle'],batted_grouped_trimmed.loc[593934]['woba_value']),
xytext=((batted_grouped_trimmed.loc[593934]['launch_angle']-7,batted_grouped_trimmed.loc[593934]['woba_value'])))
#ax.add_artist(Circle(((batted_grouped_trimmed.loc[593934]['launch_angle'],batted_grouped_trimmed.loc[593934]['woba_value'])), 0.01, edgecolor='red',fill=False))
#Mike Trout
ax.annotate('Mike Trout', xy=(batted_grouped_trimmed.loc[545361]['launch_angle'],batted_grouped_trimmed.loc[545361]['woba_value']),
xytext=((batted_grouped_trimmed.loc[545361]['launch_angle']+0.5,batted_grouped_trimmed.loc[545361]['woba_value'])))
#ax.add_artist(Circle(((batted_grouped_trimmed.loc[446263]['launch_angle'],batted_grouped_trimmed.loc[446263]['woba_value'])), 0.01, edgecolor='red',fill=False))
#Zach Cozart
ax.annotate('Zac Cozart', xy=(batted_grouped_trimmed.loc[446359]['launch_angle'],batted_grouped_trimmed.loc[446359]['woba_value']),
xytext=((batted_grouped_trimmed.loc[446359]['launch_angle']+0.5,batted_grouped_trimmed.loc[446359]['woba_value'])))
# legend
#ax.legend(["< 10 degree launch angle", "> 10 degree launch angle"], loc=7)
# Line at bottom for signature line
ax.text(x = -15, y = -0.3,
s = ' ยฉChet Gutwein Source: Baseball Savant ',
fontsize = 14, color = '#f0f0f0', backgroundcolor = '#414141');
plt.savefig('batter_mean.png')
batted_grouped_trimmed[(batted_grouped_trimmed['woba_value'] < 0.2) & (batted_grouped_trimmed['launch_angle'] > 20)]
#id_map = pd.read_csv('../mlb/data/PLAYERIDMAP.csv')
id_map[id_map['MLBID'] == 446359]
#id_map[id_map['PLAYERNAME'] == 'Joey Gallo']['MLBID']
batted_grouped_trimmed.loc[656541]['woba_value']
###Output
_____no_output_____
###Markdown
For the final chart, we're going to plot batted balls by Mike Trout in Red and Zach Cozart in blue using the same launch angle vs. exit velocity. This time, for the homer's we'll use a shape or outline the circles as indication.
###Code
# Setting FiveThirtyEight style
plt.style.use('fivethirtyeight')
# Setting size of our plot
fig, ax = plt.subplots(figsize=(12,9))
x1 = batted_statcast_data[(batted_statcast_data["batter"] == 545361) & (batted_statcast_data["is_homer"] == False)]["launch_angle"]
y1 = batted_statcast_data[(batted_statcast_data["batter"] == 545361) & (batted_statcast_data["is_homer"] == False)]["launch_speed"]
ax1 = plt.scatter(x=x1, y=y1, c='blue', alpha=0.2)
x1b = batted_statcast_data[(batted_statcast_data["batter"] == 545361) & (batted_statcast_data["is_homer"] == True)]["launch_angle"]
y1b = batted_statcast_data[(batted_statcast_data["batter"] == 545361) & (batted_statcast_data["is_homer"] == True)]["launch_speed"]
ax1b = plt.scatter(x=x1b, y=y1b, c='blue', alpha=1, marker="*", edgecolor='pink')
x2 = batted_statcast_data[(batted_statcast_data["batter"] == 446359) & (batted_statcast_data["is_homer"] == False)]["launch_angle"]
y2 = batted_statcast_data[(batted_statcast_data["batter"] == 446359) & (batted_statcast_data["is_homer"] == False)]["launch_speed"]
ax2 = plt.scatter(x=x2, y=y2, c='red', alpha=0.2)
x2b = batted_statcast_data[(batted_statcast_data["batter"] == 446359) & (batted_statcast_data["is_homer"] == True)]["launch_angle"]
y2b = batted_statcast_data[(batted_statcast_data["batter"] == 446359) & (batted_statcast_data["is_homer"] == True)]["launch_speed"]
ax2b = plt.scatter(x=x2b, y=y2b, c='red', alpha=1, marker="*", edgecolor='pink')
# Y axis past 0 & above 100 -- grid line will pass 0 & 100 marker
plt.ylim(-5,150)
# Bolded horizontal line at y=0
#ax1.axhline(y=0, color='#414141', linewidth=1.5, alpha=.5)
# Y-labels to only these
ax.set_yticks([0, 25, 50, 75, 100, 125])
ax.set_yticklabels(labels=['0', '25', '50', '75', '100', '125'], fontsize=14, color='#414141')
# X-labels and changing label names
#ax.set_xticklabels(['Out', 'Single', 'Double', 'Triple','Homer'],fontsize=14, color='#414141')
# Title text
ax.text(x=-75, y=140, s="Launch angle vs. exit velocity", fontsize=18.5, fontweight='semibold', color='#414141')
# Subtitle text
ax.text(x=-75, y=135, s='High launch angle Angels, with different results', fontsize=16.5, color='#414141')
ax.set_ylabel('')
ax.set_xlabel('Launch Angle')
# legend
ax.legend(["Mike Trout", "Mike Trout home runs","Zach Cozart", "Zach Cozart home runs"], loc=3)
# Line at bottom for signature line
ax.text(x = -84, y = -25,
s = ' ยฉChet Gutwein Source: Baseball Savant ',
fontsize = 14, color = '#f0f0f0', backgroundcolor = '#414141');
plt.savefig('trout_cozart.png')
sum(batted_grouped_trimmed['launch_angle'] > 20)
###Output
_____no_output_____ |
ML/f03-fluxo/FS20/02-dataset.ipynb | ###Markdown
Gerando dataset em arquivo csv Importando bibliotecas
###Code
import numpy as np
import pandas as pd
import seaborn as sns
from zipfile import ZipFile
from feat_set_ext import *
###Output
_____no_output_____
###Markdown
Criando lista de DataFrames por amostra
###Code
HEADER_LINE_OF_CSV = 19
list_of_df = []
with ZipFile('./fluxo.zip') as fluxo:
path_list = fluxo.namelist()
path_list.sort()
for file_path in filter(lambda string: '.csv' in string, path_list):
with fluxo.open(file_path) as csv_file:
signal = pd.read_csv(csv_file, header=HEADER_LINE_OF_CSV)[['CH1']].values.flatten()
list_of_df.append(create_fs20(signal, file_path))
###Output
_____no_output_____
###Markdown
Gerando arquivos csv
###Code
data_df = pd.concat(list_of_df, ignore_index=True)
data_df.replace(np.inf, np.NaN)
data_df.dropna(inplace=True)
data_df.to_csv('./csvs/fs20.csv', index=False)
data_df.head()
data_df.describe()
###Output
_____no_output_____ |
53Questions.ipynb | ###Markdown
53 Python Interview Questions and Answers Python questions for data scientist and software engineersNot so long ago I started a new role as a โData Scientistโ which turned out to be โPython Engineerโ in practice.I would have been more prepared if Iโd brushed up on Pythonโs thread lifecycle instead of recommender systems in advance.In that spirit, here are my python interview/job preparation questions and answers. Most data scientists write a lot code so this applies to both scientists and engineers.Whether youโre interviewing candidates, preparing to apply to jobs or just brushing up on Python, I think this list will be invaluable.Questions are unordered. Letโs begin. (1) What is the difference between a list and a tuple?Iโve been asked this question in every python / data science interview Iโve ever had. Know the answer like the back of your hand.- Lists are mutable. They can be modified after creation.- Tuples are immutable. Once a tuple is created it cannot by changed- Lists have order. They are an ordered sequences, typically of the same type of object. Ie: all user names ordered by creation date, ```["Seth", "Ema", "Eli"]```- Tuples have structure. Different data types may exist at each index. Ie: a database record in memory, ```(2, "Ema", "2020โ04โ16") id, name, created_at```
###Code
###Output
_____no_output_____
###Markdown
(2) How is string interpolation performed?Without importing the ```Template``` class, there are 3 ways to interpolate strings.
###Code
name = 'Chris'
#1 โOld Styleโ String Formatting (% Operator)
print('Hey %s %s' % (name, name))
#2 โNew Styleโ String Formatting (str.format)
print(
"My name is {}".format((name))
)
#3 String Interpolation / f-Strings (Python 3.6+)
print(f'Hello {name}')
#4 Template Strings (Standard Library)
from string import Template
t = Template('Hey, $name!')
t.substitute(name=name)
###Output
Hey Chris Chris
My name is Chris
Hello Chris
###Markdown
(3) What is the difference between "is" and "=="?Early in my python career I assumed these were the same...hello bugs. So for the record, ```is``` checks identity and ```==``` checkes equality. We'll walk through an example. Create some lists and assign them to names. Note that ```b``` points to the same object as ```a``` in below. ``````
###Code
a = [1,2,3]
b = a
c = [1, 2, 3]
###Output
_____no_output_____
###Markdown
check equality and note they are all equal.
###Code
print(a == b)
print(a == c)
#=> True
#=> True
###Output
True
True
###Markdown
We can verify this by printing their object id's.
###Code
print(id(a))
print(id(b))
print(id(c))
#=> 4369567560
#=> 4369567560
#=> 4369567624
###Output
4375724592
4375724592
4375726032
###Markdown
```c``` has a different ```id``` than ```a``` and ```b`` (4) What is a decorator?Another question I've been asked in every interview. It deserves a post in itself, but you're prepared if you can walk through writing your own example. A decorator allows adding functionality to an existing function by passing that existing function to a decorator, which executes the exting function as well as additional code. We'll write a decorator that logs when another function is called. **Write the decorator function.** This takes a function, ```func```, as an argument. It also defines a function, ```log_function_called```, which calls ```func()``` and executes some code, ```print(f'{func} called.')```. then it returns the function it defined.
###Code
def logging(func):
def log_function_called():
print(f'{func} called.')
func()
return log_function_called
###Output
_____no_output_____
###Markdown
Letโs write other functions that weโll eventually add the decorator to (but not yet).
###Code
def my_name():
print('chris')
def friends_name():
print('naruto')
my_name()
friends_name()
#=> chris
#=> naruto
###Output
chris
naruto
###Markdown
Now add the decorator to both.
###Code
@logging
def my_name():
print('chris')
@logging
def friends_name():
print('naruto')
my_name()
friends_name()
#=> <function my_name at 0x10fca5a60> called.
#=> chris
#=> <function friends_name at 0x10fca5f28> called.
#=> naruto
###Output
<function my_name at 0x104d284d0> called.
chris
<function friends_name at 0x104d283b0> called.
naruto
###Markdown
See how we can now easily add logging to any function we write just by adding ```@logging``` above it. 5. Explain the range function Range generates a list of integes and there are 3 ways to use it. The function takes 1 to 3 arguments. Note I've wrapped each usage in list comprehension so we can see the values generated. ```range(stop)```: generate integers from 0 to the "stop" integer.
###Code
[i for i in range(10)]
#=> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
###Output
_____no_output_____
###Markdown
```range(start, stop)```: generate integers from the "start" to the "stop" integer.
###Code
[i for i in range(2,10)]
#=> [2, 3, 4, 5, 6, 7, 8, 9]
###Output
_____no_output_____
###Markdown
```range(start, stop, step)```: generate integers from "start" to "stop" at intervals of "step".
###Code
[i for i in range(2,10,2)]
#=> [2, 4, 6, 8]
###Output
_____no_output_____
###Markdown
6. Define a class named car with 2 properties, "color" and "speed". Then create an instance and return speed.
###Code
class Car :
def __init__(self, color, speed):
self.color = color
self.speed = speed
car = Car('red','100mph')
car.speed
#=> '100mph'
###Output
_____no_output_____
###Markdown
7. What is the difference between instance, static and class methods in python?***Instance methods***: accept ```self``` parameter and relate to a specific instance of the class. ***Static Methods***: use ```@staticmethod``` decorator, are not related to a specific instance, and are self-contained (don't modify class or instance properties)***Class methods***: accept ```cls``` parameter and can modify the class itselfWe're going to illustrate the difference around a fictional ```CoffeeShop``` class.
###Code
class CoffeeShop:
specialty = 'espresso'
def __init__(self, coffee_price):
self.coffee_price = coffee_price
# instance method
def make_coffee(self):
print(f'Making {self.specialty} for ${self.coffee_price}')
# static method
@staticmethod
def check_weather():
print('Its sunny')
# class method
@classmethod
def change_specialty(cls, specialty):
cls.specialty = specialty
print(f'Specialty changed to {specialty}')
###Output
_____no_output_____
###Markdown
```CoffeeShop``` class has a property, ```specialty```, set to ```'espresso'``` by default. Each instance of ```CoffeeShop``` is initialized with a property ```coffee_price```. It also has 3 methods, an instance method, a static method and a class method. Let's intialize an instance of the coffee shop with a ```coffee_price``` of ```5```. Then call the instance method ```make_coffee```.
###Code
coffee_shop = CoffeeShop('5')
coffee_shop.make_coffee()
#=> Making espresso for $5
###Output
Making espresso for $5
###Markdown
Now call the static method. Static methods can't modify class or instance state so they're normally used for utilitiy functions, for example, adding 2 numbers. We used ours to check the weather. ```its sunny```. Great!
###Code
coffee_shop.check_weather()
#=> Its sunny
###Output
Its sunny
###Markdown
Now let's use the class method to modify the coffee shop's specialty and then ```make_coffee```.
###Code
coffee_shop.change_specialty('drip coffee')
#=> Specialty changed to drip coffee
coffee_shop.make_coffee()
#=> Making drip coffee for $5
###Output
Specialty changed to drip coffee
Making drip coffee for $5
###Markdown
Note how ```make_coffee``` used to make ```espresso```but now makes ```drip coffee```! 8. What is the difference between "func" and "func()"?The purpose of this question is to see if you understand that all functions are also objects in python.
###Code
def func():
print('I\'m a function')
func
#=> function __main__.func>
func()
#=> Im a function
###Output
I'm a function
###Markdown
```func``` is the object representing the function which can be assigned to a variable or passed to another function. ```funct ()``` with parentheses calls the function and returns what it outputs. 9. Explain how the map function works```map``` return a list made up of the return values from applying a function to every element in a sequence.
###Code
# this will add 3 to every value in the list
def add_three(x):
return x + 3
li = [1,2,3]
[i for i in map(add_three, li)]
#=> [4, 5, 6]
###Output
_____no_output_____
###Markdown
10. Explain how the reduce function works This can be tricky to wrap your head around until you use it a few times. ```reduce``` takes a function and a sequence and iterates over that sequence. On each iteration, both the current element and output from the previous element are passed to the function. In the end, a single value is returned.
###Code
from functools import reduce
def add_three(x,y):
return x + y
li = [1,2,3,5]
reduce(add_three, li)
#=> 11
###Output
_____no_output_____
###Markdown
```m``` is returned which is the sum of ```1+2+3+5```. 11. Explain how the filter function worksFilter literally does what the name says. It filters elements in a sequence. Each element is passed to a function which is returned in the outputted sequence if the functions returns ```True``` and discarded if the function returns ```False```.
###Code
# ALL ELEMENT NOT DIVISIBLE BY 2 WILL BE REMOVED
def add_three(x):
if x % 2 == 0:
return True
else:
return False
li = [1,2,3,4,5,6,7,8]
[i for i in filter(add_three, li)]
#=> [2, 4, 6, 8]
###Output
_____no_output_____
###Markdown
12. Does python call by reference or call by value?Be prepared to go down a rabbit hole of semantics if you google this question and read the top few pages. You're better off just having an idea of how it works. Immutable objects like strings, numbers and tuples are call-by-value. Notice how the value of ```name``` didn't change outside the function when modified inside. The value of ```name``` didn't change outside the function when modified inside. The value of ```name``` was assigned to a new block in memory for the scope of that function.
###Code
name = 'chr'
def add_chars(s):
s += 'is'
print(s)
add_chars(name)
print(name)
#=> chris
#=> chr
###Output
chris
chr
###Markdown
Mutable objects like list are call-by-reference. Notice how the list defined outside the function was modified inside the function. The parameter in the function ponted to the original block in memory that sotred the value of ```li```.
###Code
li = [1,2]
def add_element(seq):
seq.append(3)
print(seq)
add_element(li)
print(li)
#=> [1, 2, 3]
#=> [1, 2, 3]
###Output
[1, 2, 3]
[1, 2, 3]
###Markdown
13. How to reverse a list?Note how ```reverse()``` is called on the list and mutates it. It doesn't return the mutated list itself.
###Code
li = ['a','b','c']
print(li)
li.reverse()
print(li)
#=> ['a', 'b', 'c']
#=> ['c', 'b', 'a']
###Output
['a', 'b', 'c']
['c', 'b', 'a']
###Markdown
14. How does string multiplication work?Let's see the results of multiplying the string ```'cat'``` by 3.
###Code
# the string is concatenated to itself 3 times
'cat' * 3
#=> 'catcatcat'
###Output
_____no_output_____
###Markdown
15. How does list multiplication work?Let's see the result of multiplying a list, ```[1, 2, 3]``` by 2.
###Code
# a list is outputted containging the contents of [1, 2, 3] repeated twice.
[1,2,3] * 2
#=> [1, 2, 3, 1, 2, 3]
###Output
_____no_output_____
###Markdown
16. What does "self" refer to in a class?Self refers to the instance of the class itself. It's how we give methods access to and the ability to update the object they belong to. Below, passing self to ```_init_()``` gives us the ability to set the ```color``` of an instance on initialization.
###Code
class Shirt:
def __init__(self, color):
self.color = color
s = Shirt('yellow')
s.color
#=> 'yellow'
###Output
_____no_output_____
###Markdown
17. How can you concatenate lists in python?Adding 2 lists together concatenates them. Note that arrays do not function the same way.
###Code
a = [1,2]
b = [3,4,5]
a + b
#=> [1, 2, 3, 4, 5]
###Output
_____no_output_____
###Markdown
18. What is the difference between a shallow and a deep copy?We'll discuss this in the context of a mutable object, a list. For immutable objects, shallow vs. deep isn't as relevant. Well walk through 3 scenarios. i) Reference the original object. This points a new name, ```li2```, to the same place in memory to which ```li1``` also occurs to ```li2```.
###Code
li1 = [['a'],['b'],['c']]
li2 = li1
li1.append(['d'])
print(li2)
#=> [['a'], ['b'], ['c'], ['d']]
###Output
[['a'], ['b'], ['c'], ['d']]
###Markdown
ii) Create a shallow copy of the original. We can do this with the ```list()``` constructor. A shallow copy creates a new object, but fills it with references to the original. So adding a new object to the original collection, ```li3```, doesnโt propagate to ```li4```, but modifying one of the objects in li3 will propagate to ```li4```.
###Code
li3 = [['a'],['b'],['c']]
li4 = list(li3)
li3.append([4])
print(li4)
#=> [['a'], ['b'], ['c']]
li3[0][0] = ['X']
print(li4)
#=> [[['X']], ['b'], ['c']]
###Output
[['a'], ['b'], ['c']]
[[['X']], ['b'], ['c']]
###Markdown
iii) Create a deep copy. This is done with ```copy.deepcopy()```. the 2 objects are now completely independent and changes to either have no affect on the other.
###Code
import copy
li5 = [['a'],['b'],['c']]
li6 = copy.deepcopy(li5)
li5.append([4])
li5[0][0] = ['X']
print(li6)
#=> [['a'], ['b'], ['c']]
###Output
[['a'], ['b'], ['c']]
###Markdown
19. What is the difference between lists and arrays?Note: Python's standard library has an array object but here I'm specifically referrring to the commonly used Numpy array. * Lists exist in python's standard library. Arrays are defined by Numpy. * Lists can be populated with different types of data at each index. Arrays require homogenous elements. * Arithmetic on lists adds or removes elements from the list. Arithmetic on arrays fucntions per linear algebra. * Arrays also use less memory and come with significantly more functionality. 20. How to concatenate two arrays?Remember, arrays are not lists. Arrays are from Numbpy and arithmetic functions like linear algebra. We need to use Numpy's concatenate function to do it.
###Code
import numpy as np
a = np.array([1,2,3])
b = np.array([4,5,6])
np.concatenate((a,b))
#=> array([1, 2, 3, 4, 5, 6])
### 21. What do you like about Python?
Python is very readable and there is a pythonic way to do just about everything, meaning a preferred way which is clear and concise.
I'd contrast this to Ruby where there are often many ways to do something without a guideline for which is preferred.
###Output
_____no_output_____
###Markdown
22. What is you favorite library in Python?When working with a lot of data, noth is quite as helfufl as pandas which makes manipulating and visualizing data a breeze. 23. Name mutable and immutable objectsImmutable means the state cannot be modified after creation. Examples are: int, float, bool, string and tuple. Mutable means the state can be modified after creation. Examples are list, dict and set. 24. How would you round a number to 3 decimal places?Use the ```round(value, decimal_places)``` function.
###Code
# WAS THIS THE METHOD THAT USED DURING EXAM1?
a = 5.12345
round(a,3)
#=> 5.123
###Output
_____no_output_____
###Markdown
25. How do you slice a list?Slicing notation thkes 3 arguments, ```list[start:stop:step]```, where step is the interval at whcih elements are returned.
###Code
a = [0,1,2,3,4,5,6,7,8,9]
print(a[:2])
#=> [0, 1]
print(a[8:])
#=> [8, 9]
print(a[2:8])
#=> [2, 3, 4, 5, 6, 7]
print(a[2:8:2])
#=> [2, 4, 6]
###Output
[0, 1]
[8, 9]
[2, 3, 4, 5, 6, 7]
[2, 4, 6]
###Markdown
26. What is pickling? Pickling is the go-to method of serializing and unserializing objects in Python. In the example below, we serialize and unserialize a list of dictionaries.
###Code
import pickle
obj = [
{'id':1, 'name':'Stuffy'},
{'id':2, 'name': 'Fluffy'}
]
with open('file.p', 'wb') as f:
pickle.dump(obj, f)
with open('file.p', 'rb') as f:
loaded_obj = pickle.load(f)
print(loaded_obj)
#=> [{'id': 1, 'name': 'Stuffy'}, {'id': 2, 'name': 'Fluffy'}]
###Output
[{'id': 1, 'name': 'Stuffy'}, {'id': 2, 'name': 'Fluffy'}]
###Markdown
27. What is the difference between dictionaries and JSON? Dict is python datatype, a collection of indexed but unordered keys and values. JSON is just a string which follows a specified format and is intended for transferring data. 28. What ORMs have you used in Python?ORMs (object relational mapping) map data models (usually in an app) to database tables and simplifies database transactions. SQLAlchemy is typically used in the context of Flask, and Django has its own ORM. 29. How do any() and all() work?Any takes a sequence and returns true if any element in the sequence is true. All returns true only if all elements in the sequence are true.
###Code
a = [False, False, False]
b = [True, False, False]
c = [True, True, True]
print( any(a) )
print( any(b) )
print( any(c) )
#=> False
#=> True
#=> True
print( all(a) )
print( all(b) )
print( all(c) )
#=> False
#=> False
#=> True
###Output
False
True
True
False
False
True
###Markdown
30. Are dictionaries or lists faster for lookups? Looking up a value in a list takes O(n) time because the whole list needs to be iterated through until the value is found. Looking up a key in a dictionary takes O(1) time because it's a hash table. This can make a huge time difference if there are a lot values so dictionaries are generally recommended for speed. But they do have other limitations like needing unique keys. 31. What is the difference between a module and a package?A module is a file (or collection of files) that can be imported together.
###Code
import sklearn
###Output
_____no_output_____
###Markdown
A package is a directory of modules. ```from sklearn import cross_validation``` 32. How to increment and decrement an integer in Python?Increments and decrements can be done with ```+=``` and ```-=```.
###Code
value = 5
value += 1
print(value)
#=> 6
value -= 1
value -= 1
print(value)
#=> 4value = 5
value += 1
print(value)
#=> 6
value -= 1
value -= 1
print(value)
#=> 4
###Output
6
4
5
3
###Markdown
33. How to return the binary of an integer?Use the bin() function.
###Code
bin(5)
#=> '0b101'
###Output
_____no_output_____
###Markdown
34. How to remove duplicate elements from a list?This can be done by converting the list to a set then bacm to a list.
###Code
# NOTE THAT SETS WILL NOT NECESSARILY MAINTAIN THE ORDER OF A LIST.
a = [1,1,1,2,3]
a = list(set(a))
print(a)
#=> [1, 2, 3]
###Output
[1, 2, 3]
###Markdown
35. How to check if a value exists in a list?Use ```in```.
###Code
'a' in ['a','b','c']
#=> True
'a' in [1,2,3]
#=> False
###Output
_____no_output_____
###Markdown
36. What is the difference between append and extend? ```append``` adds a value to a list while ```extend``` adds values in another list to a list.
###Code
a = [1,2,3]
b = [1,2,3]
a.append(6)
print(a)
#=> [1, 2, 3, 6]
b.extend([4,5])
print(b)
#=> [1, 2, 3, 4, 5]
###Output
[1, 2, 3, 6]
[1, 2, 3, 4, 5]
###Markdown
37. How to take the absolute value of an integer?This can be done with the abs() function.
###Code
abs(2)
#=> 2
abs(-2)
#=> 2
###Output
_____no_output_____
###Markdown
38. How to combine two lists into a list of tuples?You can use the ```zip``` function to combine lists into a list of tuples. This isn't restricted to only using 2 lists. It can also be done with 3 or more.
###Code
a = ['a','b','c']
b = [1,2,3]
[(k,v) for k,v in zip(a,b)]
#=> [('a', 1), ('b', 2), ('c', 3)]
###Output
_____no_output_____
###Markdown
39. How can you sort a dictionary by key, alphabetically?You can't "sort" a dictionary because dictionaries don't have order but you can return a sorted list of tuples which has the keys and values that are in the dictionary.
###Code
d = {'c':3, 'd':4, 'b':2, 'a':1}
sorted(d.items())
#=> [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
###Output
_____no_output_____
###Markdown
40. How does a class inherit from another class in Python?In the below example, ```Audi```, inherits from ```Car```. And with that inheritance comes the instance methods of the parent class.
###Code
class Car():
def drive(self):
print('vroom')
class Audi(Car):
pass
audi = Audi()
audi.drive()
###Output
vroom
###Markdown
41. How can you remove all whitespace from a string?The easiest way is to split the string on whitespace and then rejoin without spaces.
###Code
s = 'A string with white space'
''.join(s.split())
#=> 'Astringwithwhitespace'
###Output
_____no_output_____
###Markdown
2 readers recommend a more pythonic way to handle this following the Python ethos that ```Explicit is better than Implicit```. It's also faster because python doesn't create a new list object.
###Code
s = 'A string with white space'
s.replace(' ', '')
#=> 'Astringwithwhitespace'
###Output
_____no_output_____
###Markdown
42. Why would you use enumerate() when iterating on a sequence?```enumerate()``` allows tracking index when iterating over a sequence. It's more pythonic than defining and incrementing an integer representing the index.
###Code
li = ['a','b','c','d','e']
for idx,val in enumerate(li):
print(idx, val)
#=> 0 a
#=> 1 b
#=> 2 c
#=> 3 d
#=> 4 e
###Output
0 a
1 b
2 c
3 d
4 e
###Markdown
43. What is the difference between pass, continue and break?```pass``` means do nothing. We typically use it because Python doesn't allow creating a class, function or if-statement without code inside of it. In the example below, an error would be thrown without code inside the ```i>3``` so we use ```pass```.
###Code
a = [1,2,3,4,5]
for i in a:
if i > 3:
pass
print(i)
#=> 1
#=> 2
#=> 3
#=> 4
#=> 5
###Output
1
2
3
4
5
###Markdown
```continue``` continues to the next element and halts execution for the current element. So ```print(i)``` is never reached for values where ```i < 3.```
###Code
for i in a:
if i < 3:
continue
print(i)
#=> 3
#=> 4
#=> 5
###Output
3
4
5
###Markdown
```break``` breaks the loop and the sequence is no longer iterated over. So elements from 3 onward are not printed.
###Code
for i in a:
if i == 3:
break
print(i)
#=> 1
#=> 2
###Output
1
2
###Markdown
44. Convert the following for loop into a list comprehension. This ```for``` loop.
###Code
a = [1,2,3,4,5]
a2 = []
for i in a:
a2.append(i + 1)
print(a2)
#=> [2, 3, 4, 5, 6]
###Output
[2, 3, 4, 5, 6]
###Markdown
Becomes.
###Code
# LIST COMPREHENSION IS GENERALLY ACCEPTED AS MORE PYTHONIC WHERE IT'S STILL READABLE.
a3 = [i+1 for i in a]
print(a3)
#=> [2, 3, 4, 5, 6]
###Output
[2, 3, 4, 5, 6]
###Markdown
45. Give an example of the ternary operator. The ternary operator is a one-line if/else statement. The syntax looks like ```a if condition else b```.
###Code
x = 5
y = 10
'greater' if x > 6 else 'less'
#=> 'less'
'greater' if y > 6 else 'less'
#=> 'greater'
###Output
_____no_output_____
###Markdown
46. Check if a string only contains numbers. You can use ```isnumeric()```.
###Code
'123a'.isnumeric()
#=> False
'123'.isnumeric()
#=> True
###Output
_____no_output_____
###Markdown
47. Check if a string only contains letters. You can use ```isalpha()```
###Code
'123a'.isalpha()
#=> False
'a'.isalpha()
#=> True
###Output
_____no_output_____
###Markdown
48. Check if a string only contains numbers and letters. You can use ```isalnum()```.
###Code
'123abc...'.isalnum()
#=> False
'123abc'.isalnum()
#=> True
###Output
_____no_output_____
###Markdown
49. Return a list of keys from a dictionary. This can be done by passing the dictionary to python's ```list()``` constructor, ```list()```.
###Code
d = {'id':7, 'name':'Shiba', 'color':'brown', 'speed':'very slow'}
list(d)
#=> ['id', 'name', 'color', 'speed']
###Output
_____no_output_____
###Markdown
50. How do you upper and lowercase a string?You can use the ```upper()``` and ```lower()``` string methods.
###Code
small_word = 'potatocake'
big_word = 'FISHCAKE'
small_word.upper()
#=> 'POTATOCAKE'
big_word.lower()
#=> 'fishcake'
###Output
_____no_output_____
###Markdown
What is the difference between remove, del, pop?```remove()``` remove the first matching value.
###Code
li = ['a','b','c','d']
li.remove('b')
li
#=> ['a', 'c', 'd']
###Output
_____no_output_____
###Markdown
```del``` removew an element by index.
###Code
li = ['a','b','c','d']
del li[0]
li
#=> ['b', 'c', 'd']
###Output
_____no_output_____
###Markdown
```pop()``` removes an element by index and returns that element.
###Code
li = ['a','b','c','d']
li.pop(2)
#=> 'c'
li
#=> ['a', 'b', 'd']
###Output
_____no_output_____
###Markdown
52. Give an example of dictionary comprehension. Below we'll create dictionary with letters of the alphabet as keysm and index in the alphabet as values.
###Code
# creating a list of letters
import string
list(string.ascii_lowercase)
alphabet = list(string.ascii_lowercase)
# list comprehension
d = {val:idx for idx,val in enumerate(alphabet)}
d
#=> {'a': 0,
#=> 'b': 1,
#=> 'c': 2,
#=> ...
#=> 'x': 23,
#=> 'y': 24,
#=> 'z': 25}
###Output
_____no_output_____
###Markdown
(53) How is exception handling performed in Python?Python provides 3 words to handle exceptions, *try*, *except* and *finally*.The syntax looks like this.
###Code
try:
val = 1 + 'A'
except:
val = 10
finally:
print('complete')
print(val)
###Output
complete
10
###Markdown
53 Python Interview Questions and Answers Python questions for data scientist and software engineersNot so long ago I started a new role as a โData Scientistโ which turned out to be โPython Engineerโ in practice.I would have been more prepared if Iโd brushed up on Pythonโs thread lifecycle instead of recommender systems in advance.In that spirit, here are my python interview/job preparation questions and answers. Most data scientists write a lot code so this applies to both scientists and engineers.Whether youโre interviewing candidates, preparing to apply to jobs or just brushing up on Python, I think this list will be invaluable.Questions are unordered. Letโs begin. (1) What is the difference between a list and a tuple?Iโve been asked this question in every python / data science interview Iโve ever had. Know the answer like the back of your hand.- Lists are mutable. They can be modified after creation.- Tuples are immutable. Once a tuple is created it cannot by changed- Lists have order. They are an ordered sequences, typically of the same type of object. Ie: all user names ordered by creation date, ["Seth", "Ema", "Eli"]- Tuples have structure. Different data types may exist at each index. Ie: a database record in memory, (2, "Ema", "2020โ04โ16") id, name, created_at ... A WHOLE BUNCH OF TYPING.... 1. What is the difference between a list and a tuple? - __Lists__ are mutable: They can be modified after creation.- __Tuples__ are immutable: Once a tuple is created it can anot be cahnged- Lists have order:they are an ordered sequences, typically of the same type of object. I.e: all user names orderes by creation date, ["Seth", "Ema", "Eli"]- Tuples have structure: Different data types may exist at each index. I.e: a database record in memory, (2, "Ema", "2020-04-16") id, name, created_at 2.How is strin interpolation performed? -> Without importing the template class, there are 3 ways to interpolate strings.
###Code
name = 'Chris'
# 1. f strings
print(f'Hello {name}')
# 2 . % operator
print ('hey %s %s' % (name, name))
# 3 . format
print("My name is {}". format((name)))
###Output
Hello Chris
hey Chris Chris
My name is Chris
###Markdown
3. What is the difference between "is" and "=="? -> 'is' checks identity and '==' checks equality
###Code
a = [1,2,3]
b = a
c = [1,2,3]
print(a == b)
print(a == c)
print(a is b)
print(a is c)
###Output
True
False
###Markdown
we can verify this by priniting their object id's
###Code
print(id(a))
print(id(b))
print(id(c))
###Output
4360654752
4360654752
4359396160
###Markdown
4.What is a decorator? A __decorator__ allows adding functionality to an existing function by passing that existing function to a decorator, which executes the existing function as well as additionalcode(function).
###Code
def memes():
print('Why does Python live on land?\n Because its abpove C level')
def head_fun(designated_function):
def nested_fun():
print("The function called executed: ")
return designated_function()
return nested_fun
so = head_fun(memes)
so()
@head_fun
def idk():
print("chris")
print(f'{idk} called.')
print("look for more example on decorators")
idk()
def lhead_fun(func):
def log_function_called():
print(f'{func} called.')
print("this is orginal example")
func()
return log_function_called
@lhead_fun
def idk():
print("chris")
idk()
###Output
<function idk at 0x103f4b830> called.
this is orginal example
chris
###Markdown
5.Explain the range function. Range generates a list of integers.Three ways to use range function -> ex-1. range(stop): generate integers from 0 to the "stop" integer.range(10) The function takes 1 to 3 arguments. In the examples below, it's wrapped in a list comprehension so we can see the values generated.
###Code
# 1.range(stop)
[i for i in range(10)]
###Output
_____no_output_____
###Markdown
ex-2. range(start, stop ): generate integeres from the "start" to the "stop".
###Code
[i for i in range (2,10)]
###Output
_____no_output_____
###Markdown
ex-3.range(strat, stop, step): generate integers from "start" to "stop" at intervals of "step".
###Code
[i for i in range(2, 20, 2)]
###Output
_____no_output_____
###Markdown
6.Define a class named car with 2 properties, "color" and "speed". Then create an instance and return speed.
###Code
class Car:
def __init__(self, color, speed):
self.color = color
self.speed = speed
car = Car('red', '100mph')
car.speed
###Output
_____no_output_____
###Markdown
7.What is the difference between instance, static, and class methods in python? instance methods :accept self parameter and relate to a specific instance of the class. Static methods:use __@staticmethod__ decorator, are not related to a specififc instance, and are self-conained(don't modify class or instance properties) Class methods :accept cls parameter and can modify the class itself. Example: fictional CoffeShop class.
###Code
class CoffeeShop:
speciality = 'espresso'
def __init__(self, coffee_price):
self.coffee_price = coffee_price
# instance method
def make_coffee(self):
print(f'making {self.speciality} for ${self.coffee_price}')
#static method
@staticmethod
def check_weather():
print('Its sunny')
# class method
@classmethod
def change_speciality(cls, speciality):
cls.speciality = speciality
print(f'Speciality changed to {speciality}')
coffee_shop = CoffeeShop('5')
coffee_shop.make_coffee()
coffee_shop.check_weather()
coffee_shop.change_speciality('drip coffee')
coffee_shop.make_coffee()
###Output
making drip coffee for $5
###Markdown
8. what is the difference between "func" and "func()"?
###Code
def func():
print('I am a function!')
func
func()
func
###Output
_____no_output_____
###Markdown
__func__ is the object representing the function which can be assigned to a variable or passed to another function. __func()__ whith parentheses calls the function and returns what it outputs. 9.Explain how the map function works. __map__ returns a list made up of the returned values from applying a function to every element in a sequence.
###Code
def add_three(x):
return x + 3
li = [1, 2, 3]
[i for i in map(add_three, li)]
###Output
_____no_output_____
###Markdown
Above, we added 3 to every element in the list. 10.Explain how the reduce function works. reducetakes a function and sequence and iterates over that sequence. On each iteration, both the current and output from the previous element are passed to the function. In the end, a single value is returned.
###Code
from functools import reduce
def add_three(x, y):
return x + y
li = [1, 2, 3, 4, 5]
reduce(add_three, li)
total = sum(li)
total
###Output
_____no_output_____
###Markdown
11.Explain how the filter function works. Each element is passed to a fucntion which is returned in the outputted sequence if the function returns True and discarded if thr function is False.
###Code
def add_three(x):
if x%2 ==0:
return True
else:
return False
li = [1, 2, 3, 4, 5, 6, 7, 8, 8, 9]
[i for i in filter(add_three, li)]
# note how all elements not divisible by 2 have been removed.
###Output
_____no_output_____
###Markdown
Note how all elements not divisible by 2 have been removed. 12.Does python call by reference or call by value? Immutable objects like strings, numbers and tuples are __call-by- value__. Notice how the value 'name' didn't change outside the function when modified inside. The value of 'name' was assigned to a new block in memory for the scope of that function.
###Code
name = 'chr'
def add_chars(s):
s += 'is'
print(s)
add_chars(name)
print(name)
###Output
chris
chr
###Markdown
Mutable objects like list are __call-by-reference__. Notice how the list defined outside the function was modified inside the function. The parameter in the function pointed to the orginal block in memory that stored the value of li.
###Code
li = [1, 2]
def add_element(seq):
seq.append(3)
print(seq)
add_element(li)
print(li)
###Output
[1, 2, 3]
[1, 2, 3]
###Markdown
13.How to reverse a list? Note how to __reverse()__ is called on the list and mutates it. It doesn't return the mutataed list itself.
###Code
li = ['a', 'b', 'c']
print(li)
li.reverse()
print(li)
###Output
['a', 'b', 'c']
['c', 'b', 'a']
###Markdown
14.How does string multiplication work?Let's see the results of multiplying the string 'cat' by 3.
###Code
'cat' * 3
###Output
_____no_output_____
###Markdown
15.How does list multiplication work?Let's see the result of multiplying a list, [1, 2, 3] by 2.
###Code
[1, 2, 3] * 2
###Output
_____no_output_____
###Markdown
A list is outputted containing the contents of [1, 2, 3] repeated twice. 16.What does "self" refer to in a class? Self refers to the instance of the class itself. It's how we give methods access to and the ability to update the object they belong to. Below, passing self to $__init__()$ gives us the ability to set the __color__ of an instance on initialization
###Code
class Shirt:
def __init__(self, color):
self.color = color
s = Shirt('yellow')
s.color
###Output
_____no_output_____
###Markdown
17.How can you concatenate listd in python? Adding 2 lists together concatinates them. Note that arrays do not function the same way.
###Code
a = [1, 2]
b = [3, 4, 5]
a+b
a = [1, 2, 4]
b = [3, 4, 5]
a+b
###Output
_____no_output_____
###Markdown
18.What is the difference between a shallow and a deep copy? a) __Reference the orginal object__.This points a new name, __li2__, to the same place in memeory to which __li1__ points. So any change we make to __li1__ also occurs to __li2__.
###Code
li1 = [['a'], ['b'], ['c']]
li2 = li1
li1.append(['d'])
print(li2)
###Output
[['a'], ['b'], ['c'], ['d']]
###Markdown
b) __Create a shallow copy of the original__. We can do this with the __lis()__ constructor. A shallow copy creates a new object, but fills it with references to the original. So adding a new object to the original collection, __li3__, doesn't propagate to __li4__, but modifying one of the objects in __li3__ will propagate to __li4__.
###Code
li3 = [['a'], ['b'], ['c']]
li4 = list(li3)
li3.append([4])
print(li4)
li3[0][0] = ['x']
print(li4)
###Output
[['a'], ['b'], ['c']]
[[['x']], ['b'], ['c']]
###Markdown
=> modification to the first list will reflect on the 2nd list but what ever u appened to the first list wont affect the 2nd. c) __Create a deep copy__. This is done with __copy.deepcopy()__. The 2 objects are now completely independent and changes to either have no affect on the other.
###Code
import copy
li5 = [['a'], ['b'], ['c']]
li6 = copy.deepcopy(li5)
li5.append([4])
li5[0][0] = ['x']
print(li6)
###Output
[['a'], ['b'], ['c']]
###Markdown
19.What is the difference between lists an darrays? __Note__: Python libraray has an array object but here we r specifically referring to the commonly used Numpy array. __.__ Lists exist in python's standard library.Arrays are defined by Numpy. __.__ Lists can be populated with different types of data at each index. Arrays require homogeneous elements. __.__ Arithmetic on lists adds or removes elements from the list. Arithmetic on arrays functions per linear algebra. __.__ Arrays also use less memory and come with significantly more functionality. 20.How to concatenate two arrays? Arrays are from Numpy and arithmetic functions like linear algebra. We need to use Numpy's __concatenate__ function to do it.
###Code
import numpy as np
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
np.concatenate((a, b))
c = np.array([8, 2, 3, 4])
d = np.array([4, 5, 6])
np.concatenate((c, d))
###Output
_____no_output_____
###Markdown
21. What do you like about Python? => itโs a โgood enoughโ language for building entire systems end-to-end - a language thatโs more widely applicable and proficient with handing data analytics.- code is simple, it can be read easily.- developers can quickly understand the code written by other developers.- everything visible to the programmer.- Python has a big advantage when it comes to learning to code and debugging problems efficiently.- Python can function with many languages. 22.What is your favorite library in Python? __Pandas__ is an open source Python package, built on top of the Python programming language that provides numerous tools.- It can present data in a way that is suitable for data analysis via its Series and DataFrame data structures.- The package contains multiple methods for convenient data filtering.- Pandas has a variety of utilities to perform Input/Output operations in a seamless manner. It can read data from a variety of formats such as CSV, TSV, MS Excel, etc. 23.Name mutable and immutable objects. __Immutable__ means the state cannot be modifies after creation.Example - int, float, bool, string and tuple.__Mutable means__ the state can be modified after creation. Examples - list, dict and set. 24.How would you round a number to 3 decimal places? Use the round(value, decimal_places)function.
###Code
a = 5.12345
round(a, 3)
a = 5.12345
round(5.12345, 3)
###Output
_____no_output_____
###Markdown
25.How do you Slice a list? Slicing notation takes 3 arguments, __list[start:stop:step]__, where step is the interval at which elements are returned.
###Code
a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
print(a[:2])
###Output
[0, 1]
###Markdown
if we don't specify the starting index position by default it starts from 0.but the ending index position is not included, like in the example above index position 2 is not included.
###Code
print(a[8:])
###Output
[8, 9]
###Markdown
same goes here since we didn't specify the ending it included the rest of index position.
###Code
print(a[2:8])
print(a[2:8:2])
###Output
[2, 4, 6]
###Markdown
Here it started with the 2nd index, stopped at the 8th index and with 2 steps interval. 26.What is pickling? __-__ Pickle in Python is primarily used in serializing and deserializing a Python object structure.__-__ Itโs the process of converting a Python object into a byte stream (pickle module serializes data in the binary format)to store it in a file/database, maintain program state across sessions, or transport data over the network.__-__ The pickled byte stream can be used to re-create the original object hierarchy by unpickling the stream.
###Code
import pickle
obj = [
{'id':1, 'name':'Maleda'},
{'id':2, 'name':'Preeti'}
]
with open('file.p', 'wb') as f:
pickle.dump(obj, f)
with open('file.p', 'rb') as f:
loaded_obj = pickle.load(f)
print(loaded_obj)
###Output
[{'id': 1, 'name': 'Maleda'}, {'id': 2, 'name': 'Preeti'}]
###Markdown
27.What is the difference between dictionaries and json? __Dict__ is python data type, a collection of indexed but unordered keys and values.__JSON__ is just a string which follows a specified format and is intended for transfering data. 28.What ORMs have you used in Python? __ORMs__(object relational mapper) map data models (usually in an app) to database tables and simplifies database transactions. __SQLAlchemy__ is typically used in the context of flask, and Django has it's own ORM. 29.How do any() and all() work? __Any__ takes a sequence and returns true if any element in the sequence is true.__All__ returns true only if all elements in the sequence are true.
###Code
a = [False, False, False]
b = [True, False, False]
c = [True, True, True]
print( any(a) )
print( any(b) )
print( any(c) )
print( all(a) )
print( all(b) )
print( all(c) )
###Output
False
True
True
False
False
True
###Markdown
30.Are dictionaries or lists faster for lookups? Looking up a value in a __list__ takes __o(n)__ times because the whole list needs to be iterated through wntil the valuse is found. Looking up a key in a __dictionary__ takes __o(1)__ time because it's a hash table. This can make a huge time difference if the area lot of values, so dictionaries are generally recommended for speed. But they do have other limmitations like needing unique keys. 31.What is the difference between a module and a package? A module is a file(or collection of files) that can be imported together.
###Code
import sklearn
###Output
_____no_output_____
###Markdown
A package is a directory of modules.
###Code
from sklearn import cross_validation
###Output
_____no_output_____
###Markdown
So packages are modules, but not all modules are packages. 32.How to increment and decrement an integer in Python? Increment and decrement can be done with __+-__ and __-=__.
###Code
value = 5
value +=1
print(value)
value -= 1
value -= 1
print(value)
###Output
4
###Markdown
33.How to return the binary of an integer? Use the __bin()__ function.
###Code
bin(5)
###Output
_____no_output_____
###Markdown
34.How to remove duplicate elements from a list. This can be done by converting the __list__ to a __set__ then back to a __list__.
###Code
a = [1, 1, 1, 2, 3]
a = list(set(a))
print(a)
a = [4, 1, 1, 1, 2, 3]
a = set(a)
print(a)
a = list(a)
print(a)
###Output
[1, 2, 3, 4]
###Markdown
Note that sets will not necessarily maintain the order of a list. 35.How to check if a value exists in a list? We use __in__.
###Code
'a' in ['a', 'b', 'c']
'a' in [1, 2, 3]
###Output
_____no_output_____
###Markdown
36.What is the difference between append and extend? __Append__ adds a value to a list while __extend__ adds values in another list to a list.
###Code
a = [1, 2, 3]
b = [1, 2, 3]
a.append(6)
print(a)
b.extend([4,5])
print(b)
a.append([4,5])
print(a)
b.extend([4,5])
print(b)
###Output
[1, 2, 3, 6, [4, 5]]
[1, 2, 3, 4, 5, 4, 5]
###Markdown
37.How to take an absolute value of an integer? This can be done with __abs()__ function.
###Code
abs(-3)
abs(2)
###Output
_____no_output_____
###Markdown
38.How can you sort a dictionary by key alphabetically? You can't __"sort"__ a dictionary because dictionaries don't have order but you can return a __sorted list__ of __tuples__ which has the keys and values that are in the dictionary.
###Code
d = {'c':3, 'd':4, 'b':2, 'a':1}
sorted(d.items())
###Output
_____no_output_____
###Markdown
39.How to combine two lists into a list of tuples?
###Code
a = ['a', 'b', 'c']
b = [1, 2, 3]
[(a,b) for a,b in zip(a, b)]
a = ['a', 'b', 'c']
b = [1, 2, 3]
c = ['loco', 'coco', 'hawk']
[(a,b, c) for a,b,c in zip(a, b, c)]
a = ['a', 'b', 'c']
b = [1, 2, 3]
c = ['loco', 'coco']
[(a,b, c) for a,b,c in zip(a, b, c)]
###Output
_____no_output_____
###Markdown
So in the last example it left out the last values because on element was missing. 40.How does a class inherit from another class in Python? In the example below, __Audi__, inherits from __Car__. And with that inheritence comes the instance methods of the parent class.
###Code
class Car():
def drive(self):
print('vroom')
class Audi(Car):
pass
audi = Audi()
audi.drive()
###Output
vroom
###Markdown
41.How can you remove all whitespace from a string?
###Code
s = 'A string with white space'
s = s.split()
print(s)
s = ''.join(s)
print(s)
s = 'A string with white space'
s.replace(' ', '')
###Output
_____no_output_____
###Markdown
42.Why would you use enumerate()when iterating on sequence? __Enumerate()__ allows tracking index when iterating over a sequence.
###Code
li = ['a', 'b', 'c', 'd', 'e']
for idx,val in enumerate(li):
print(idx, val)
###Output
0 a
1 b
2 c
3 d
4 e
###Markdown
43.What is the difference between pass, continue and break? __pass__ means do nothing for now.
###Code
a = [1, 2, 3, 4, 5, 6]
for i in a:
if i > 3:
pass
print(i)
###Output
1
2
3
4
5
6
###Markdown
__continue__ continues to tht next element and halts execution for the current element. So __print(i)__ is never reached for values where i < 3.
###Code
for i in a:
if i < 3:
continue
print(i)
###Output
3
4
5
6
###Markdown
__break__ breaks the loop and the sequence is not longer iterated over.
###Code
for i in a:
if i == 3:
break
print(i)
###Output
1
2
###Markdown
44.Convert the following for loop into a list comprehension? This __for__ loop ->
###Code
a = [1, 2, 3, 4, 5]
a2 = []
for i in a:
a2.append(i + 1)
print(a2)
###Output
[2, 3, 4, 5, 6]
###Markdown
Becomes ->
###Code
a3 = [i + 1 for i in a]
print(a3)
###Output
[2, 3, 4, 5, 6]
###Markdown
45.Give an example of the ternary operator. The __ternary__ operator is a one-line __if/else__ statement. The syntax looks like __a if condition else b__.
###Code
x = 5
y = 10
'greater' if x > 6 else 'less'
'greater' if y > 6 else 'less'
###Output
_____no_output_____
###Markdown
46.Check if a string only contains letters. We can use __isalpha()__.
###Code
'123a'.isalpha()
'a'.isalpha()
###Output
_____no_output_____
###Markdown
47.Check if a string only contains numbers. We can use __isnumeric()__.
###Code
'123a'.isnumeric()
'123'.isnumeric()
###Output
_____no_output_____
###Markdown
48.Check if a string only contains numbers and letters. We can use __isalnum()__.
###Code
'123abcd..'.isalnum()
'123abcd'.isalnum()
###Output
_____no_output_____
###Markdown
49.Return a list of keys from a dictionary. This can be done by passing the dictionary to pycthon's __list()__ constructor, __list()__.
###Code
d = {'id':7, 'name':'Maleda', 'color':'brown', 'speed':'very fast'}
list(d)
###Output
_____no_output_____
###Markdown
50.How do you upper and lowercase a string? We can use the __upper()__ and __lower()__ string methods.
###Code
small_world = 'potatocake'
big_world = 'FISHCAKE'
small_world.upper()
big_world.lower()
###Output
_____no_output_____
###Markdown
51.What is the difference between remove, del and pop? __remove()__ removes the first matching value.
###Code
li = ['a', 'b', 'c', 'd']
li.remove('b')
li
###Output
_____no_output_____
###Markdown
__del__ removes an element by index.
###Code
li = ['a', 'b', 'c', 'd']
del li[0]
li
###Output
_____no_output_____
###Markdown
__pop()__ removes an element by index and returns that element.
###Code
li = ['a', 'b', 'c', 'd']
li.pop(2)
li
###Output
_____no_output_____
###Markdown
52.Give an example of dictionary comprehension. Below we'll create a dictionary with letters of the alphabet as keys, and index in the alphabet as values.
###Code
# creating a list of letters
import string
list(string.ascii_lowercase)
alphabet = list(string.ascii_lowercase)
# list comprehension
d = {val:idx for idx, val in enumerate(alphabet)}
d
###Output
_____no_output_____
###Markdown
53.How is execption handling performed in python? Python provide 3 words to handle exceptions, __try__, __except__ and __finally__.
###Code
try:
# try to do this<br>
except:
# if try block fails then do this
finally:
# always do this
###Output
_____no_output_____
###Markdown
Below: -the __try__ block fails because we can not add integers with strings.-the __except__ block sets __val = 10__ and then -__finally__ block prints complete.
###Code
try:
val = 1 + 'A'
except:
val = 10
finally:
print('complete')
print(val)
###Output
complete
10
###Markdown
53 Python Interview Questions and Answers Python questions for data scientist and software engineersNot so long ago I started a new role as a โData Scientistโ which turned out to be โPython Engineerโ in practice.I would have been more prepared if Iโd brushed up on Pythonโs thread lifecycle instead of recommender systems in advance.In that spirit, here are my python interview/job preparation questions and answers. Most data scientists write a lot code so this applies to both scientists and engineers.Whether youโre interviewing candidates, preparing to apply to jobs or just brushing up on Python, I think this list will be invaluable.Questions are unordered. Letโs begin. (1) What is the difference between a list and a tuple?Iโve been asked this question in every python / data science interview Iโve ever had. Know the answer like the back of your hand.- Lists are mutable. They can be modified after creation.- Tuples are immutable. Once a tuple is created it cannot by changed- Lists have order. They are an ordered sequences, typically of the same type of object. Ie: all user names ordered by creation date, ["Seth", "Ema", "Eli"]- Tuples have structure. Different data types may exist at each index. Ie: a database record in memory, (2, "Ema", "2020โ04โ16") id, name, created_at 2. How is string interpolation performed?- F-strings - print(f'hello {name}')- % operator- print("Hello %s" % (name))- format- print("my name is {}".format((name)))
###Code
name = "James"
print(f'hello {name}')
print("Hello %s" % (name))
print("my name is {}".format((name)))
###Output
hello James
Hello James
my name is James
###Markdown
3. What is the difference between โisโ and โ==โ?- is checks identity - == check equality 4. What is a decorator?- A decorator adds functionality to your existing function 5. Explain the range function- creates a range object and takes between 1 and 3 paramaters range(start, stop(not inclusive) , step)
###Code
a = range(1,10,2)
type(a)
b = [i for i in a]
print(b)
print(a)
a = range(1,11)
type(a)
b = [i for i in a]
print(b)
print(a)
a = range(11)
type(a)
b = [i for i in a]
print(b)
print(a)
###Output
[1, 3, 5, 7, 9]
range(1, 10, 2)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
range(1, 11)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
range(0, 11)
###Markdown
6. Define a class named car with 2 properties, โcolorโ and โspeedโ. Then create an instance and return speed.
###Code
class Car():
def __init__(self, color, speed):
self.color = color
self.speed = speed
Volkswagon = Car("white", 120)
return(Volkswagon.speed)
###Output
120
###Markdown
7. What is the difference between instance, static and class methods in python?- instance methods accept self in parameters and change specific instances - class methods changes all instances in the class - do not contain to a specific instance and doesnt effect instance properties 8. What is the difference between โfuncโ and โfunc()โ?- func with out parethesis is simply an object- while func() is calling the function to be executed 9. Explain how the map function works- returns a list of all the elements in a list run through a function 10. Explain how the reduce function works- takes a function and sequence- iterates over that sequence - both the current element and output from the previous element are passed to the function- In the end a seingle element is returned 11. Explain how the filter function works- each element in a sequence is pass through a function- if the function returns true then the element is retained- if the function returns false then the element is discarded 12.Does python call by reference or call by value?- Immutable objects(strings, tuples, numbers) are call by value- mutable objects are call by reference 13. How to reverse a list?- list.reverse() --> lazy does show until pronted 14. How does string multiplication work?- string is concatenated with itself as many times as multiplyer says 15. How does list multiplication work?- repeats list as many times as multiplyer says 16. What does โselfโ refer to in a class?- self refers to the instance of the class 17. How can you concatenate lists in python?- self refers to the instance of the class 18. What is the difference between a shallow and a deep copy?- deep copy create 2 independant verisions of the object- shallow copys will change when an object is changed
###Code
li1 = [['a'],['b'],['c']]
li2 = li1
li1.append(['d'])
print(li2)
li3 = [['a'],['b'],['c']]
li4 = list(li3)
li3.append([4])
print(li4)
li3[0][0] = ['X']
print(li4)
###Output
[['a'], ['b'], ['c']]
[[['X']], ['b'], ['c']]
###Markdown
19. What is the difference between lists and arrays?- lists exsist mainly in pythons standard library while arrays are mostly refered to in numpy - lists can have different types of data at each index, while indexes are homogenius in numpy- Arithmetic on lists adds or removes elements from the list. Arithmetic on arrays functions per linear algebra.- Arrays have increased functionality and take less memory
###Code
import copy
li5 = [['a'],['b'],['c']]
li6 = copy.deepcopy(li5)
li5.append([4])
li5[0][0] = ['X']
print(li6)
###Output
[['a'], ['b'], ['c']]
###Markdown
20. How to concatenate two arrays?- use numpy concatenate function
###Code
import numpy as np
a = np.array([1,2,3])
b = np.array([4,5,6])
np.concatenate((a,b))
###Output
_____no_output_____
###Markdown
21. Why do you like Python?- Readable, not verbose, clear and concise way to do just about everything- Very powerful operator 22. What is your favorite Python Library- Pandas - makes data visualization and manipulation super easy 23. Name mutable and immutable objects- Immutable: Numbers, strings, tuples, float, bool- Mutable: lists, sets, dictionaries 24. How would you round a number to 3 decimal places- using the round function with a 3 as the second parameter and the number as the first parameter.
###Code
a = 3.1415
a = round(a,3)
print(a)
###Output
3.142
###Markdown
25. How do you slice a list?- list[start:stop:step]
###Code
lst = list(range(10))
print(lst)
lst[3:9:2]
###Output
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
###Markdown
26. What is pickling?- pickling is the go-to serialization technique for python 27. What is the difference between dictionaries and JSON?- Dictionary is a python data type - JSON is just a string which follows a specified format and is intended for transferring data. 28. What ORMs have you used in Python?- ORM - Obejct relational mapping- SQLAlchemy in the context if Flask 29. How do any() and all() work?- any - returns true if any elements in the sequence are true - all - returns true if all elements in a sequence are true 30. Are dictionaries or lists faster for lookups?- Dictionaries because they are hash tables and use a filter like method instead of going through each element in a list 31. What is the difference between a module and a package?- packages are modules but not all modules are packages- a package is a directory of modules- a module is a file or collection of files that can be imported together 32. How to increment and decrement an integer in Python?- += --> increment - -= --> decrement 33. How to return the binary of an integer?- bin() --> function 34. How to remove duplicate elements from a list?- convert to a set which is a ordered list of distinct objects - then convert set back to a list 35. How to check if a value exists in a list?- use the in method 36. What is the difference between append and extend?- append adds a value to the list - extend adds values from a list to another list 37. How to take the absolute value of an integer?- use abs() function 38. How to combine two lists into a list of tuples?- using the zip function
###Code
a = [1,2,3]
b = ["a", "b", "c"]
c = [(k,v) for k, v in zip(a,b)]
print (c)
###Output
[(1, 'a'), (2, 'b'), (3, 'c')]
###Markdown
39. How can you sort a dictionary by key, alphabetically?- use the sort function combined with the lst.items()
###Code
d = {'c':3, 'd':4, 'b':2, 'a':1}
sorted(d.items())
###Output
_____no_output_____
###Markdown
40. How does a class inherit from another class in Python?- when creating the class use the following syntax- class NewClass(InhertedClass):- this will allow the NewClass to inhert all functions and properties from the InhertedClass 41. How can you remove all whitespace from a string?- using a join and split function- split the string on white spaces and then rejoin- You could also do this using a if function where the if is != " "
###Code
s = 'A string with white space'
''.join(s.split())
###Output
_____no_output_____
###Markdown
42. Why would you use enumerate() when iterating on a sequence?- to create an index for a list 43. What is the difference between pass, continue and break?- pass = does nothing - continue = moves on to next step in iteration- break = breaks loops 44. Convert the following for loop into a list comprehension.- use abs() function
###Code
a = [1,2,3,4,5]
a2 = []
for i in a:
a2.append(i + 1)
print(a2)
#=> [2, 3, 4, 5, 6]
a3 = [i+1 for i in a]
a3
###Output
_____no_output_____
###Markdown
45. Give an example of the ternary operator.- a if condition else b
###Code
a = 5
b = 10
print("a greater" if a > b else "b greater")
print("a lesser" if a < b else "a greater")
###Output
b greater
a lesser
###Markdown
46. Check if a string only contains numbers.- .isnumeric()
###Code
a = "1342"
b = "dasf"
print(a.isnumeric())
print(b.isnumeric())
###Output
True
False
###Markdown
47. Check if a string only contains letters.- .isalpha()
###Code
a = "alphabetic"
b = "123"
print(a.isalpha())
print(b.isalpha())
###Output
True
False
###Markdown
48. Check if a string only contains numbers and letters.- .isalnum()
###Code
a = "123abcDoReMe"
b = "!@#"
print(a.isalnum())
print(b.isalnum())
###Output
True
False
###Markdown
49. Return a list of keys from a dictionary.- use list and the dictionary name
###Code
d = {'id':7, 'name':'Shiba', 'color':'brown', 'speed':'very slow'}
list(d)
###Output
_____no_output_____
###Markdown
50. How do you upper and lowercase a string?- .upper()- .lower()
###Code
a = "potatoe"
b = "EXCLAMATION"
print(a.upper())
print(b.lower())
###Output
POTATOE
exclamation
###Markdown
51. What is the difference between remove, del and pop?- remove - takes out the first matching value- del - deletes by index- pop - deletes by index and returns that value 52. Give an example of dictionary comprehension.
###Code
import string
list(string.ascii_lowercase)
alphabet = list(string.ascii_lowercase)
d = {val:idx for idx,val in enumerate(alphabet)}
print(d)
###Output
{'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7, 'i': 8, 'j': 9, 'k': 10, 'l': 11, 'm': 12, 'n': 13, 'o': 14, 'p': 15, 'q': 16, 'r': 17, 's': 18, 't': 19, 'u': 20, 'v': 21, 'w': 22, 'x': 23, 'y': 24, 'z': 25}
###Markdown
... A WHOLE BUNCH OF TYPING.... (53) How is exception handling performed in Python?Python provides 3 words to handle exceptions, *try*, *except* and *finally*.The syntax looks like this.
###Code
try:
val = 1 + 'A'
except:
val = 10
finally:
print('complete')
print(val)
###Output
complete
10
###Markdown
53 Python Interview Questions and Answers Python questions for data scientist and software engineersNot so long ago I started a new role as a โData Scientistโ which turned out to be โPython Engineerโ in practice.I would have been more prepared if Iโd brushed up on Pythonโs thread lifecycle instead of recommender systems in advance.In that spirit, here are my python interview/job preparation questions and answers. Most data scientists write a lot code so this applies to both scientists and engineers.Whether youโre interviewing candidates, preparing to apply to jobs or just brushing up on Python, I think this list will be invaluable.Questions are unordered. Letโs begin. (1) What is the difference between a list and a tuple?Iโve been asked this question in every python / data science interview Iโve ever had. Know the answer like the back of your hand.- Lists are mutable. They can be modified after creation.- Tuples are immutable. Once a tuple is created it cannot by changed- Lists have order. They are an ordered sequences, typically of the same type of object. Ie: all user names ordered by creation date, ["Seth", "Ema", "Eli"]- Tuples have structure. Different data types may exist at each index. Ie: a database record in memory, (2, "Ema", "2020โ04โ16") id, name, created_at (53) How is exception handling performed in Python?Python provides 3 words to handle exceptions, *try*, *except* and *finally*.The syntax looks like this.
###Code
try:
val = 1 + 'A'
except:
val = 10
finally:
print('complete')
print(val)
###Output
complete
10
###Markdown
You're Welcome. Yours Truly, Crazy Uncle Kris. (1) What is difference between list and tuple? List are mutable . They can be changed after creation.list are ordered of same data type.represented as l = list[], or l = []for example = ['enne', 'meenene', 'minee', 'moooo'] or [22, 44, 55,77, 88]Tuple are immutable. They cant be changed after creation.tuple have structure different data type may exist at each index.represented as t = tuple()for example = (1, 8965, 'mickey')
###Code
a = [1, 3, 5,88, 99]
print(a[2])
t = (11, 67, 'titu')
print(t)
###Output
(11, 67, 'titu')
###Markdown
(2) How is string interpolation performed? Basically formating. there are three ways to do it:
###Code
name = 'Tanu'
print(f"Hello {name}")
print("Hello %s" %(name))
print('Hello {}'.format(name))
###Output
Hello Tanu
###Markdown
(3). Differnce between 'is' and '=='? 'Is' check identity (whether both obects are same). '==' checks value eqaulity.let say a= 'foo' b = 'bar' c ='foo'
###Code
a = [22, 33, 44, 55]
b = a
c =[22, 33, 44, 55]
print(a == c)
print(a == b)
print(a == c)
print(a is c)
print(id(a))
print(id(b))
print(id(c))
###Output
True
True
True
False
4437997120
4437997120
4437999360
###Markdown
b has a differnt id than a & c 4. What is a decorator? It adds more functionality to the existing function by passing the existing funtion to the decorator which execute the existing funtion as well as the additional code. Decorator acts as a wrapper around the original function. The nature of object gets decorated but the result of original function is not altered.for example:
###Code
def star(func):
def inner(*args, **kwargs):
print("*" * 30)
func(*args, **kwargs)
print("*" * 30)
return inner
def percent(func):
def inner(*args, **kwargs):
print("%" * 30)
func(*args, **kwargs)
print("%"* 30)
return inner
@star
@percent
def printer(msg):
print(msg)
printer("Hello World")
###Output
******************************
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Hello World
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
******************************
###Markdown
(5) Explain the Range Function: Range function defines the range of function in integer value. It can take 1-3 arguments. There are 3 ways to use it.range(stop)range(start, stop)range(start, stop, step)
###Code
[i for i in range(10)]
[i for i in range(5,10)]
[i for i in range(1,10, 2)]
###Output
_____no_output_____
###Markdown
(6) Defining Class car with properties speed and color Instantiate the class
###Code
class Car:
def __init__(self, color, speed):
self.color = color
self.speed = speed
car1 = Car('black', '100mph')
car1.speed
car1.color
###Output
_____no_output_____
###Markdown
(7) Difference between 'func' and 'func()' All functions are also object in python.'func' is basically a object representing the function which canbe assigned to a variable or passed to a another function.'func()' calls the function and returns the output.func is reference to function.'func()' is called on anything will always contain a return value of that function
###Code
def func():
print('I am called Function')
func
def func():
print('I am called Function')
a = func
def getfunc():
print(func)
getfunc()
def printer(func):
print("I am passing the func in another function")
printer(func())
def printer():
print("I am passing the func in another function")
printer()
def func():
print('I am called Function')
func()
###Output
I am called Function
###Markdown
8. Explain how the map function works map() function execute the each item in the iterateale accoring to the function specfied. In other words,map() returns a list made up of return values after applying the function on each element in the sequence.
###Code
def add_num(num):
return num + 1
list = [1, 99, 19, 93]
[i for i in map(add_num, list)]
import random
def id_generator(num):
return random.randrange(10)
list_of_name = ['Alpha', 'Star', 'Moon', 'Sun']
l = [i for i in map(id_generator, list_of_name)]
bases = [10, 20, 30, 40, 50, 60]
index = [1, 2, 3, 4, 5, 6]
powers=[i for i in map(pow, bases, index)]
powers
###Output
_____no_output_____
###Markdown
Using LAMBDA
###Code
result = set(map(lambda x: x*x*x, range(10)))
print(result)
###Output
{0, 1, 64, 512, 8, 343, 216, 729, 27, 125}
###Markdown
Passing Multiple Iterators to map() Using Lambda
###Code
l = [10, 20, 30, 40, 50, 60]
m = [1, 2, 3, 4, 5, 6]
result = set(map(lambda x, y: x*y, l, m))
result
###Output
_____no_output_____
###Markdown
9. Explain how the reduce function works reduce() is in functool module. It receives 2 arguments.Intially when the function is called first two item is called. When they return a result the next item and the result combined.Import Functools
###Code
import functools
def mul(x, y):
return x * y
result = functools.reduce(mul, range(1, 6))
print(result)
###Output
120
###Markdown
10. Explain how the filter function works filter() returns a boolean. It also receives the two arguments function and sequence on which it will be applied.Each item in the sequence goes through the filter function. Only those values which return True are stored in the filter object.
###Code
def isPrime(num):
for n in range(2, num):
if num % n == 0:
return False
else:
return True
result = filter(isPrime, range(15))
print(set(result))
###Output
{3, 5, 7, 9, 11, 13}
###Markdown
Using Lambda Function
###Code
result = [i for i in filter(lambda x : x%2==0 and x%10== 0, range(100))]
result
###Output
_____no_output_____
###Markdown
11. Closures A clousure is a nested function which have acess to local variable from the enclosing function which has finished itsexecution. 3 things have to present -nested functionit has access to local variableit is retured from the enclosing function.
###Code
def make_printer(msg):
msg = "Hello World"
def printer():
print(msg)
return printer
p = make_printer("Hi There")
p()
###Output
Hello World
###Markdown
12. What is the difference between instance, static and class methods in python? Instance method- class methodA class method is a method which is bound to the class and not the object of the class.They have the access to the state of the class as it takes a class parameter that points to the class and not the object instance.It can modify a class state that would apply across all the instances of the class. For example it can modify a class variable that will be applicable to all the instances. Static methodA static method is also a method which is bound to the class and not the object of the class.A static method canโt access or modify class state(variables and methods).It is present in a class because it makes sense for the method to be present in class. static method vs class methodA class method takes cls as first parameter while a static method needs no specific parameters.A class method can access or modify class state while a static method canโt access or modify it.In general, static methods know nothing about class state. They are utility type methods that take some parameters and work upon those parameters. On the other hand class methods must have class as parameter.We use @classmethod decorator in python to create a class method and we use @staticmethod decorator to create a static method in python.
###Code
class Coffee:
specialty = 'latte'
def __init__(self, price):
self.price = price
# instance method
def make_coffee(self):
print(f" This {self.specialty} is for ${self.price}")
# static method
@staticmethod
def check_weather():
print("Its cloudy")
# class method
@classmethod
def change_speciality(cls, specialty):
cls.specialty = specialty
print(f"specialty change to {cls.specialty}")
coffee_shop = Coffee(5)
print(coffee_shop.make_coffee())
print(coffee_shop.check_weather())
print(coffee_shop.change_speciality("expresso"))
print(coffee_shop.make_coffee())
###Output
This latte is for $5
None
Its cloudy
None
specialty change to expresso
None
This expresso is for $5
None
###Markdown
13. How to reverse a list? It reverse the list. But doesnt return the mutated list itself
###Code
a = ['a', 'b', 'c', 'd']
a.reverse()
print(a)
###Output
['d', 'c', 'b', 'a']
###Markdown
14. How does string multiplication work?
###Code
a = 'cat' * 5
print(a)
###Output
catcatcatcatcat
###Markdown
15. How does list multiplication work?
###Code
a = [1, 2, 3, 4, 5, 6, 7, 8, 9] * 2
print(a)
###Output
[1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9]
###Markdown
16. What does โselfโ refer to in a class? Self refers to the instance of a class itself. Its how we give method access to ability to updateof object they belong to.
###Code
class Car:
def __init__(self, color, speed):
self.color = color
self.speed = speed
car1 = Car("black", 100)
car1.color
car1.speed
###Output
_____no_output_____
###Markdown
17. How can you concatenate lists in python?
###Code
a = [1, 2, 3, 4]
b = [22]
a + b
###Output
_____no_output_____
###Markdown
18. Does python call by reference or call by value? Python utilizes a system, which is known as โCall by Object Referenceโ or โCall by assignmentโ. In the event that you pass arguments like whole numbers, strings or tuples to a function, the passing is like call-by-value because you can not change the value of the immutable objects being passed to the function. Whereas passing mutable objects can be considered as call by reference because when their values are changed inside the function, then it will also be reflected outside the function Immutable objects like strings, numbers and tuples are call-by-value.Notice how the value of name didnโt change outside the function when modified inside. The value of name was assigned to a new block in memory for the scope of that function.
###Code
name = 'mat'
def add_chars(s):
s += 'is'
print(s)
add_chars(name)
print(name)
###Output
matis
mat
###Markdown
Mutable objects like list are call-by-reference. Notice how the list defined outside the function was modified inside the function. The parameter in the function pointed to the original block in memory that stored the value of li.
###Code
li = [1,2, 3, 4, 5, 6]
def add_element(num):
num.append(9)
print(num)
add_element(li)
print(li)
###Output
[1, 2, 3, 4, 5, 6, 9]
[1, 2, 3, 4, 5, 6, 9]
###Markdown
19. What is the difference between a shallow and a deep copy? Shallow copy - is the copy which is referncing to the original copy. Any changes occur in original copy it will reflect in child copy also. - Deep copy - is a actual copy of original. The way it populate the copy is recursivly. Both copies are independant of each other. any changes to orignal copy will not refelect on the child copy. - for both we need to import a copy module
###Code
# importing copy module
import copy
li1 = [1, 2, [3,5], 4]
# using copy for shallow copy
li2 = copy.copy(li1)
# using deepcopy for deepcopy
li3 = copy.deepcopy(li1)
###Output
_____no_output_____
###Markdown
20. What is the difference between lists and arrays? Lists exist in pythonโs standard library. Arrays are defined by Numpy.Lists can be populated with different types of data at each index. Arrays require homogeneous elements.Arithmetic on lists adds or removes elements from the list. Arithmetic on arrays functions per linear algebra.Arrays also use less memory and come with significantly more functionality. 21. How to concatenate two arrays?
###Code
import numpy as np
a = np.array([1,2,3])
b = np.array([4,5,6])
np.concatenate((a,b))
###Output
_____no_output_____
###Markdown
22. What do you like about Python? Python is very readable and easy to understand. Compatible. Python offers compatibility with various platforms. So, the developers donโt face the issues that are common for other languages.Object-oriented. Python supports procedure-oriented and object-oriented programming. The first type offers to apply reusable pieces of code. As for OOP, it utilizes objects that are based on data and functionality.Various libraries. There are a lot of libraries for Python. 23. What is you favorite library in Python? Pandas, sqlalchemy, re 24. Name mutable and immutable objects Immutable means the state cannot be modified after creation. Examples are: int, float, bool, string and tuple. Mutable means the state can be modified after creation. Examples are list, dict and set. 25. How would you round a number to 3 decimal places?Use the round(value, decimal_places) function. 26. How do you slice a list? list[start:stop:step]
###Code
a = [0,1,2,3,4,5,6,7,8,9]
print(a[:2])
print(a[2:8:2])
###Output
[0, 1]
[2, 4, 6]
###Markdown
27. What is pickling? Pickling is the go-to method of serializing and unserializing objects in Python.
###Code
import pickle
obj = [
{'id':1, 'name':'Stuffy'},
{'id':2, 'name': 'Fluffy'}
]
with open('file.p', 'wb') as f:
pickle.dump(obj, f)
with open('file.p', 'rb') as f:
data = pickle.load(f)
print(data)
###Output
[{'id': 1, 'name': 'Stuffy'}, {'id': 2, 'name': 'Fluffy'}]
###Markdown
28. How to remove duplicate elements from a list? This can be done by converting the list to a set then back to a list.
###Code
a = [1,1,1,2,3]
a = list(set(a))
print(a)
###Output
[1, 2, 3]
###Markdown
29. How to check if a value exists in a list?Use in
###Code
'a' in ['a','b','c']
'd' in ['a','b','c']
###Output
_____no_output_____
###Markdown
30. How to take the absolute value of an integer?This can be done with the abs() function.
###Code
abs(2.35)
abs(-3)
###Output
_____no_output_____
###Markdown
31. How to combine two lists into a list of tuples? ZIP function
###Code
a = ['a','b','c']
b = [1,2,3]
[(k,v) for k,v in zip(a,b)]
###Output
_____no_output_____
###Markdown
32. How can you sort a dictionary by key, alphabetically? Dictionay cant be sorted. But we can return sorted list of tuple which is a key value pairr that are in dictionary.
###Code
d = {'c':3, 'd':4, 'b':2, 'a':1}
sorted(d.items())
###Output
_____no_output_____
###Markdown
33. What is the difference between append and extend? append adds value to list extends add value to another list
###Code
a = [1,2,3,0,7]
b = [1,2,3]
a.append(6)
print(a)
b.extend([9,8,4])
print(b)
###Output
[1, 2, 3, 0, 7, 6]
[1, 2, 3, 9, 8, 4]
###Markdown
34. What is the difference between dictionaries and JSON? Dict is python datatype, a collection of indexed but unordered keys and values. JSON is just a string which follows a specified format and is intended for transferring data. 35. What ORMs have you used in Python? sqlalchemy, 36. How do any() and all() work? Any takes a sequence and returns true if any element in the sequence is true. All returns true only if all elements in the sequence are true.
###Code
a = [False, False, False]
b = [True, False, False]
c = [True, True, True]
print( any(a) )
print( any(b) )
print( any(c) )
print( all(a) )
print( all(b) )
print( all(c) )
###Output
False
True
True
False
False
True
###Markdown
37. Are dictionaries or lists faster for lookups? Looking up a value in a list takes O(n) time because the whole list needs to be iterated through until the value is found.Looking up a key in a dictionary takes O(1) time because itโs a hash table.This can make a huge time difference if there are a lot of values so dictionaries are generally recommended for speed. But they do have other limitations like needing unique keys. 38. What is the difference between a module and a package? A module is a file (or collection of files) that can be imported together. for example - import sklearnA package is a directory of modules. for ex - from sklearn import cross_validationSo packages are modules, but not all modules are packages. 39. How to increment and decrement an integer in Python?Increments and decrements can be done with +- and -= .
###Code
value = 5
value += 1
print(value)
value = 5
value -= 1
print(value)
###Output
4
###Markdown
40. How to return the binary of an integer? use bin()
###Code
bin(8)
###Output
_____no_output_____
###Markdown
41. How does a class inherit from another class in Python?
###Code
class Car:
# def __init__(self, color, speed, seating):
# self.color = color
# self.speed = speed
# self.seating = seating
def drive(self):
print('vroom')
class Honda(Car):
pass
h = Honda()
h.drive()
###Output
vroom
###Markdown
42. How can you remove all whitespace from a string? split the string on whitespaces and then rejoin using join
###Code
s = 'A string with white space'
s.split()
''.join(s.split())
###Output
_____no_output_____
###Markdown
43. Why would you use enumerate() when iterating on a sequence? enumerate() allows tracking index when iterating over a sequence. The enumerate() method adds counter to an iterable and returns it (the enumerate object).The enumerate() method takes two parameters:iterable - a sequence, an iterator, or objects that supports iterationstart (optional) - enumerate() starts counting from this number. If start is omitted, 0 is taken as start.The enumerate() method adds counter to an iterable and returns it. The returned object is a enumerate object.You can convert enumerate objects to list and tuple using list() and tuple() method respectively.
###Code
grocery = ['bread', 'milk', 'butter']
enumerateGrocery = enumerate(grocery)
print(type(enumerateGrocery))
# converting to list
print(list(enumerateGrocery))
# changing the default counter
enumerateGrocery = enumerate(grocery, 10)
print(list(enumerateGrocery))
###Output
<class 'enumerate'>
[(0, 'bread'), (1, 'milk'), (2, 'butter')]
[(10, 'bread'), (11, 'milk'), (12, 'butter')]
###Markdown
If we have to write that in for loop it will be like this
###Code
for i, items in enumerate(grocery):
print(i, items)
###Output
0 bread
1 milk
2 butter
###Markdown
changing start to 100
###Code
for i, items in enumerate(grocery, 100):
print(i, items)
###Output
100 bread
101 milk
102 butter
###Markdown
44. What is the difference between pass, continue and break? pass - just pass. do nothingcontinue - continues to the next element and halts execution for the current element.break - as soon as the condition met break
###Code
a = [1,2,3,4,5]
for i in a:
if i < 3:
continue
print(i)
for i in a:
if i == 5:
break
print(i)
###Output
1
2
3
4
###Markdown
45. Convert the following for loop into a list comprehension.
###Code
a = [1,2,3,4,5]
a2 = []
for i in a:
a2.append(i + 1)
print(a2)
a3 = [i+1 for i in a]
print(a3)
squares = []
for x in range(10):
squares.append(x**2)
print(squares)
a = [x**2 for x in range(10)]
print(a)
###Output
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
###Markdown
46. Give an example of the ternary operator. The ternary operator is a one-line if/else statement.The syntax looks like a if condition else b.
###Code
x = 15
y = 10
'greater' if x > 6 else 'less'
###Output
_____no_output_____
###Markdown
47. Check if a string only contains numbers.You can use isnumeric().
###Code
'1234ghs'.isnumeric()
'1234'.isnumeric()
###Output
_____no_output_____
###Markdown
48. Check if a string only contains letters. You can use isalpha().
###Code
'apoorva'.isalpha()
'apoorva88'.isalpha()
###Output
_____no_output_____
###Markdown
49. Check if a string only contains numbers and letters.
###Code
'apoorva88@'.isalnum()
'apporva88'.isalnum()
###Output
_____no_output_____
###Markdown
50. Return a list of keys from a dictionary.
###Code
d = {'id':7, 'name':'Shiba', 'color':'brown', 'speed':'very slow'}
list(d)
###Output
_____no_output_____
###Markdown
For VAlues
###Code
list(d.values())
###Output
_____no_output_____
###Markdown
50. How do you upper and lowercase a string? upper()lower()
###Code
a = 'ApoorvaShukla'
a.upper()
a.lower()
###Output
_____no_output_____
###Markdown
51. What is the difference between remove, del and pop?
###Code
l = [1, 2, 3, 4, 5, 7, 9]
###Output
_____no_output_____
###Markdown
remove first matching
###Code
l.remove(4)
l
###Output
_____no_output_____
###Markdown
del removes an element by index.
###Code
del(l[3])
l
###Output
_____no_output_____
###Markdown
pop() removes an element by index and returns that element.
###Code
l.pop()
###Output
_____no_output_____
###Markdown
52. Give an example of dictionary comprehension. A dictionary comprehension takes the form {key: value for (key, value) in iterable}
###Code
keys = ['a','b','c','d','e']
values = [1,2,3,4,5]
d = {k:v for (k,v) in zip(keys, values)}
print(d)
myDict = {x: x**2 for x in [1,2,3,4,5]}
print (myDict)
newdict = {x: x**3 for x in range(10) if x**3 % 4 == 0}
print(newdict)
###Output
{0: 0, 2: 8, 4: 64, 6: 216, 8: 512}
|
examples/research_projects/visual_bert/demo.ipynb | ###Markdown
**Note**: This demo is adapted from the LXMERT Demo present here: https://github.com/huggingface/transformers/tree/main/examples/research_projects/lxmert
###Code
from IPython.display import Image, display
import PIL.Image
import io
import torch
import numpy as np
from processing_image import Preprocess
from visualizing_image import SingleImageViz
from modeling_frcnn import GeneralizedRCNN
from utils import Config
import utils
from transformers import VisualBertForQuestionAnswering, BertTokenizerFast
# URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/images/input.jpg"
URL = "https://vqa.cloudcv.org/media/test2014/COCO_test2014_000000262567.jpg"
OBJ_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/objects_vocab.txt"
ATTR_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/attributes_vocab.txt"
VQA_URL = "https://dl.fbaipublicfiles.com/pythia/data/answers_vqa.txt"
# for visualizing output
def showarray(a, fmt="jpeg"):
a = np.uint8(np.clip(a, 0, 255))
f = io.BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
# load object, attribute, and answer labels
objids = utils.get_data(OBJ_URL)
attrids = utils.get_data(ATTR_URL)
vqa_answers = utils.get_data(VQA_URL)
# load models and model components
frcnn_cfg = Config.from_pretrained("unc-nlp/frcnn-vg-finetuned")
frcnn = GeneralizedRCNN.from_pretrained("unc-nlp/frcnn-vg-finetuned", config=frcnn_cfg)
image_preprocess = Preprocess(frcnn_cfg)
bert_tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
visualbert_vqa = VisualBertForQuestionAnswering.from_pretrained("uclanlp/visualbert-vqa")
# image viz
frcnn_visualizer = SingleImageViz(URL, id2obj=objids, id2attr=attrids)
# run frcnn
images, sizes, scales_yx = image_preprocess(URL)
output_dict = frcnn(
images,
sizes,
scales_yx=scales_yx,
padding="max_detections",
max_detections=frcnn_cfg.max_detections,
return_tensors="pt",
)
# add boxes and labels to the image
frcnn_visualizer.draw_boxes(
output_dict.get("boxes"),
output_dict.pop("obj_ids"),
output_dict.pop("obj_probs"),
output_dict.pop("attr_ids"),
output_dict.pop("attr_probs"),
)
showarray(frcnn_visualizer._get_buffer())
# test_questions_for_url1 = [
# "Where is this scene?",
# "what is the man riding?",
# "What is the man wearing?",
# "What is the color of the horse?"
# ]
test_questions_for_url2 = [
"Where is the cat?",
"What is near the disk?",
"What is the color of the table?",
"What is the color of the cat?",
"What is the shape of the monitor?",
]
# Very important that the boxes are normalized
# normalized_boxes = output_dict.get("normalized_boxes")
features = output_dict.get("roi_features")
for test_question in test_questions_for_url2:
test_question = [test_question]
inputs = bert_tokenizer(
test_question,
padding="max_length",
max_length=20,
truncation=True,
return_token_type_ids=True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors="pt",
)
output_vqa = visualbert_vqa(
input_ids=inputs.input_ids,
attention_mask=inputs.attention_mask,
visual_embeds=features,
visual_attention_mask=torch.ones(features.shape[:-1]),
token_type_ids=inputs.token_type_ids,
output_attentions=False,
)
# get prediction
pred_vqa = output_vqa["logits"].argmax(-1)
print("Question:", test_question)
print("prediction from VisualBert VQA:", vqa_answers[pred_vqa])
###Output
Question: ['Where is the cat?']
prediction from VisualBert VQA: outside
Question: ['What is near the disk?']
prediction from VisualBert VQA: nothing
Question: ['What is the color of the table?']
prediction from VisualBert VQA: brown
Question: ['What is the color of the cat?']
prediction from VisualBert VQA: gray
Question: ['What is the shape of the monitor?']
prediction from VisualBert VQA: square
###Markdown
**Note**: This demo is adapted from the LXMERT Demo present here: https://github.com/huggingface/transformers/tree/main/examples/research_projects/lxmert
###Code
from IPython.display import Image, display
import PIL.Image
import io
import torch
import numpy as np
from processing_image import Preprocess
from visualizing_image import SingleImageViz
from modeling_frcnn import GeneralizedRCNN
from utils import Config
import utils
from transformers import VisualBertForQuestionAnswering, BertTokenizerFast
# URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/images/input.jpg"
URL = "https://vqa.cloudcv.org/media/test2014/COCO_test2014_000000262567.jpg"
OBJ_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/objects_vocab.txt"
ATTR_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/attributes_vocab.txt"
VQA_URL = "https://dl.fbaipublicfiles.com/pythia/data/answers_vqa.txt"
# for visualizing output
def showarray(a, fmt="jpeg"):
a = np.uint8(np.clip(a, 0, 255))
f = io.BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
# load object, attribute, and answer labels
objids = utils.get_data(OBJ_URL)
attrids = utils.get_data(ATTR_URL)
vqa_answers = utils.get_data(VQA_URL)
# load models and model components
frcnn_cfg = Config.from_pretrained("unc-nlp/frcnn-vg-finetuned")
frcnn = GeneralizedRCNN.from_pretrained("unc-nlp/frcnn-vg-finetuned", config=frcnn_cfg)
image_preprocess = Preprocess(frcnn_cfg)
bert_tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
visualbert_vqa = VisualBertForQuestionAnswering.from_pretrained("uclanlp/visualbert-vqa")
# image viz
frcnn_visualizer = SingleImageViz(URL, id2obj=objids, id2attr=attrids)
# run frcnn
images, sizes, scales_yx = image_preprocess(URL)
output_dict = frcnn(
images,
sizes,
scales_yx=scales_yx,
padding="max_detections",
max_detections=frcnn_cfg.max_detections,
return_tensors="pt",
)
# add boxes and labels to the image
frcnn_visualizer.draw_boxes(
output_dict.get("boxes"),
output_dict.pop("obj_ids"),
output_dict.pop("obj_probs"),
output_dict.pop("attr_ids"),
output_dict.pop("attr_probs"),
)
showarray(frcnn_visualizer._get_buffer())
# test_questions_for_url1 = [
# "Where is this scene?",
# "what is the man riding?",
# "What is the man wearing?",
# "What is the color of the horse?"
# ]
test_questions_for_url2 = [
"Where is the cat?",
"What is near the disk?",
"What is the color of the table?",
"What is the color of the cat?",
"What is the shape of the monitor?",
]
# Very important that the boxes are normalized
# normalized_boxes = output_dict.get("normalized_boxes")
features = output_dict.get("roi_features")
for test_question in test_questions_for_url2:
test_question = [test_question]
inputs = bert_tokenizer(
test_question,
padding="max_length",
max_length=20,
truncation=True,
return_token_type_ids=True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors="pt",
)
output_vqa = visualbert_vqa(
input_ids=inputs.input_ids,
attention_mask=inputs.attention_mask,
visual_embeds=features,
visual_attention_mask=torch.ones(features.shape[:-1]),
token_type_ids=inputs.token_type_ids,
output_attentions=False,
)
# get prediction
pred_vqa = output_vqa["logits"].argmax(-1)
print("Question:", test_question)
print("prediction from VisualBert VQA:", vqa_answers[pred_vqa])
###Output
Question: ['Where is the cat?']
prediction from VisualBert VQA: outside
Question: ['What is near the disk?']
prediction from VisualBert VQA: nothing
Question: ['What is the color of the table?']
prediction from VisualBert VQA: brown
Question: ['What is the color of the cat?']
prediction from VisualBert VQA: gray
Question: ['What is the shape of the monitor?']
prediction from VisualBert VQA: square
###Markdown
**Note**: This demo is adapted from the LXMERT Demo present here: https://github.com/huggingface/transformers/tree/master/examples/research_projects/lxmert
###Code
from IPython.display import Image, display
import PIL.Image
import io
import torch
import numpy as np
from processing_image import Preprocess
from visualizing_image import SingleImageViz
from modeling_frcnn import GeneralizedRCNN
from utils import Config
import utils
from transformers import VisualBertForQuestionAnswering, BertTokenizerFast
# URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/images/input.jpg"
URL = "https://vqa.cloudcv.org/media/test2014/COCO_test2014_000000262567.jpg"
OBJ_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/objects_vocab.txt"
ATTR_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/attributes_vocab.txt"
VQA_URL = "https://dl.fbaipublicfiles.com/pythia/data/answers_vqa.txt"
# for visualizing output
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 255))
f = io.BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
# load object, attribute, and answer labels
objids = utils.get_data(OBJ_URL)
attrids = utils.get_data(ATTR_URL)
vqa_answers = utils.get_data(VQA_URL)
# load models and model components
frcnn_cfg = Config.from_pretrained("unc-nlp/frcnn-vg-finetuned")
frcnn = GeneralizedRCNN.from_pretrained("unc-nlp/frcnn-vg-finetuned", config=frcnn_cfg)
image_preprocess = Preprocess(frcnn_cfg)
bert_tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
visualbert_vqa = VisualBertForQuestionAnswering.from_pretrained("uclanlp/visualbert-vqa")
#image viz
frcnn_visualizer = SingleImageViz(URL, id2obj=objids, id2attr=attrids)
# run frcnn
images, sizes, scales_yx = image_preprocess(URL)
output_dict = frcnn(
images,
sizes,
scales_yx=scales_yx,
padding="max_detections",
max_detections=frcnn_cfg.max_detections,
return_tensors="pt"
)
# add boxes and labels to the image
frcnn_visualizer.draw_boxes(
output_dict.get("boxes"),
output_dict.pop("obj_ids"),
output_dict.pop("obj_probs"),
output_dict.pop("attr_ids"),
output_dict.pop("attr_probs"),
)
showarray(frcnn_visualizer._get_buffer())
# test_questions_for_url1 = [
# "Where is this scene?",
# "what is the man riding?",
# "What is the man wearing?",
# "What is the color of the horse?"
# ]
test_questions_for_url2 = [
"Where is the cat?",
"What is near the disk?",
"What is the color of the table?",
"What is the color of the cat?",
"What is the shape of the monitor?",
]
#Very important that the boxes are normalized
# normalized_boxes = output_dict.get("normalized_boxes")
features = output_dict.get("roi_features")
for test_question in test_questions_for_url2:
test_question = [test_question]
inputs = bert_tokenizer(
test_question,
padding="max_length",
max_length=20,
truncation=True,
return_token_type_ids=True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors="pt"
)
output_vqa = visualbert_vqa(
input_ids=inputs.input_ids,
attention_mask=inputs.attention_mask,
visual_embeds=features,
visual_attention_mask=torch.ones(features.shape[:-1]),
token_type_ids=inputs.token_type_ids,
output_attentions=False,
)
# get prediction
pred_vqa = output_vqa["logits"].argmax(-1)
print("Question:", test_question)
print("prediction from VisualBert VQA:", vqa_answers[pred_vqa])
###Output
Question: ['Where is the cat?']
prediction from VisualBert VQA: outside
Question: ['What is near the disk?']
prediction from VisualBert VQA: nothing
Question: ['What is the color of the table?']
prediction from VisualBert VQA: brown
Question: ['What is the color of the cat?']
prediction from VisualBert VQA: gray
Question: ['What is the shape of the monitor?']
prediction from VisualBert VQA: square
###Markdown
**Note**: This demo is adapted from the LXMERT Demo present here: https://github.com/huggingface/transformers/tree/master/examples/research_projects/lxmert
###Code
from IPython.display import Image, display
import PIL.Image
import io
import torch
import numpy as np
from processing_image import Preprocess
from visualizing_image import SingleImageViz
from modeling_frcnn import GeneralizedRCNN
from utils import Config
import utils
from transformers import VisualBertForQuestionAnswering, BertTokenizerFast
# URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/images/input.jpg"
URL = "https://vqa.cloudcv.org/media/test2014/COCO_test2014_000000262567.jpg"
OBJ_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/objects_vocab.txt"
ATTR_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/attributes_vocab.txt"
VQA_URL = "https://dl.fbaipublicfiles.com/pythia/data/answers_vqa.txt"
# for visualizing output
def showarray(a, fmt="jpeg"):
a = np.uint8(np.clip(a, 0, 255))
f = io.BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
# load object, attribute, and answer labels
objids = utils.get_data(OBJ_URL)
attrids = utils.get_data(ATTR_URL)
vqa_answers = utils.get_data(VQA_URL)
# load models and model components
frcnn_cfg = Config.from_pretrained("unc-nlp/frcnn-vg-finetuned")
frcnn = GeneralizedRCNN.from_pretrained("unc-nlp/frcnn-vg-finetuned", config=frcnn_cfg)
image_preprocess = Preprocess(frcnn_cfg)
bert_tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
visualbert_vqa = VisualBertForQuestionAnswering.from_pretrained("uclanlp/visualbert-vqa")
# image viz
frcnn_visualizer = SingleImageViz(URL, id2obj=objids, id2attr=attrids)
# run frcnn
images, sizes, scales_yx = image_preprocess(URL)
output_dict = frcnn(
images,
sizes,
scales_yx=scales_yx,
padding="max_detections",
max_detections=frcnn_cfg.max_detections,
return_tensors="pt",
)
# add boxes and labels to the image
frcnn_visualizer.draw_boxes(
output_dict.get("boxes"),
output_dict.pop("obj_ids"),
output_dict.pop("obj_probs"),
output_dict.pop("attr_ids"),
output_dict.pop("attr_probs"),
)
showarray(frcnn_visualizer._get_buffer())
# test_questions_for_url1 = [
# "Where is this scene?",
# "what is the man riding?",
# "What is the man wearing?",
# "What is the color of the horse?"
# ]
test_questions_for_url2 = [
"Where is the cat?",
"What is near the disk?",
"What is the color of the table?",
"What is the color of the cat?",
"What is the shape of the monitor?",
]
# Very important that the boxes are normalized
# normalized_boxes = output_dict.get("normalized_boxes")
features = output_dict.get("roi_features")
for test_question in test_questions_for_url2:
test_question = [test_question]
inputs = bert_tokenizer(
test_question,
padding="max_length",
max_length=20,
truncation=True,
return_token_type_ids=True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors="pt",
)
output_vqa = visualbert_vqa(
input_ids=inputs.input_ids,
attention_mask=inputs.attention_mask,
visual_embeds=features,
visual_attention_mask=torch.ones(features.shape[:-1]),
token_type_ids=inputs.token_type_ids,
output_attentions=False,
)
# get prediction
pred_vqa = output_vqa["logits"].argmax(-1)
print("Question:", test_question)
print("prediction from VisualBert VQA:", vqa_answers[pred_vqa])
###Output
Question: ['Where is the cat?']
prediction from VisualBert VQA: outside
Question: ['What is near the disk?']
prediction from VisualBert VQA: nothing
Question: ['What is the color of the table?']
prediction from VisualBert VQA: brown
Question: ['What is the color of the cat?']
prediction from VisualBert VQA: gray
Question: ['What is the shape of the monitor?']
prediction from VisualBert VQA: square
|
ignis/tomography-overview.ipynb | ###Markdown
Trusted Notebook" width="500 px" align="left"> Quantum Tomography Overview*** ContributorsGadi Aleksandrowicz ([email protected]), Christopher J. Wood
###Code
import numpy as np
import itertools
import qiskit
from qiskit import QuantumRegister, QuantumCircuit
from qiskit import Aer
import qiskit.ignis.verification.tomography as tomo
from qiskit.quantum_info import state_fidelity
###Output
_____no_output_____
###Markdown
The General TheoryQuantum tomography is an experimental procedure to reconstruct a description of part of quantum system from the measurement outcomes of a specific set of experiments. In Qiskit Ignis we are currently concerned with the following two tomography tasks:1. **Quantum state tomography**: Given a state-preparation circuit that prepares a system in a state, reconstruct a description of the density matrix $\rho$.2. **Quantum process tomograhpy**: Given a circuit, reconstruct a description of the quantum channel $\mathcal{E}$ that describes the circuit.In both cases we rely on the assumption that we have access to a large number of identical copies of the system and so can perform several different measures on it.We can roughly split the tomography process to three stages:1. Preperation: Add suitable initialization/measurement devices to the quantum system.2. Experiment: Obtain measurement data from the quantum system.3. Tomography: Use the obtained data to reconstruct the system's description.Steps 1 and 2 are related to the quantum system being studied, whereas step 3 is a classical computation which can be carried out on standard computers. State Tomography OverviewQuantum state tomography is a method of reconstructing a description of the quantum state of a system from a set of experiments. While the state of ideal quantum system is described by a state-vector the state of an open quantum system (one that may experiences noise or other errors) is given by a density matrix $\rho$. Quantum state tomography aims to reconstruct this density matrix. To do this we assume that the state $\rho$ can be reliably prepared by a state-preparation circuit, and that itcan be subjected to several measurements with respect to different operators; this data can be used to reconstruct $\rho$ or a close approximation of it by several different methods. DefinitionsWe denote by $\mathcal{X}$ the state space of a closed (ideal) quantum system. In quantum computing this is typically the tensor product of $N$ 2-dimensional (qubit) systems $\mathcal{X} = \mathbb{C^{2N}}$. Valid quantum states $|\psi\rangle \in \mathcal{X}$ are those with norm-1:$|\langle\psi|\psi\rangle|^2 = 1$. We denote by $L(\mathcal{X})$ the state space of linear maps on $\mathcal{X}$, ($L: \mathcal{X}\rightarrow\mathcal{X}$). The density-matrix for quantum system with state space $\mathcal{X}$ is a linear map $\rho \in L(\mathcal{X})$ that is also positive-semidefinite, and has trace equal to 1:1. **Unit trace:** $\text{tr}[\rho] = 1$2. **Positive-semidefinite:**: For all $|\psi\rangle \in \mathcal{X}$, $\langle\psi|\rho|\psi\rangle \ge 0$. This is denoted by $\rho \ge 0$. Example: 1-qubit reconstruction using the Pauli basisGiven the Pauli matrices $I=\left(\begin{array}{cc}1 & 0\\0 & 1\end{array}\right),X=\left(\begin{array}{cc}0 & 1\\1 & 0\end{array}\right),Y=\left(\begin{array}{cc}0 & -i\\i & 0\end{array}\right),Z=\left(\begin{array}{cc}1 & 0\\0 & -1\end{array}\right)$
###Code
I = np.array([[1,0],[0,1]])
X = np.array([[0,1],[1,0]])
Y = np.array([[0,-1j],[1j,0]])
Z = np.array([[1,0],[0,-1]])
###Output
_____no_output_____
###Markdown
It is easy to see they constitute an orthonormal basis for $M_2(\mathbb{C})$ with respect to the Hilbert-Schmidt inner product $\left\langle A,B\right\rangle =\frac{1}{2}\text{tr}\left(B^{\dagger}A\right)$
###Code
def HS_product(A,B):
return 0.5*np.trace(np.conj(B).T @ A)
###Output
_____no_output_____
###Markdown
And hence,$$ \rho =\left\langle \rho,I\right\rangle I+\left\langle \rho,X\right\rangle X+\left\langle \rho,Y\right\rangle Y+\left\langle \rho,Z\right\rangle Z = $$$$=\frac{\text{tr}\left(\rho\right)I+\text{tr}\left(X\rho\right)X+\text{tr}\left(Y\rho\right)Y+\text{tr}\left(Z\rho\right)Z}{2}$$The values of $\text{tr}\left(X\rho\right), \text{tr}\left(Y\rho\right), \text{tr}\left(Z\rho\right)$ are the expectation values of $X$, $Y$, $Z$, respectively, and can be approximated by repeated measuring in the $X, Y$ and $Z$ bases. Since $\text{tr}\left(\rho\right)=1$ there is no need for additional measurements for the coefficient of $I$. Example: 1-qubit Linear inversionThe above method can be rephrased in more general form. First, any hermitian operator $H$ has a spectral decomposition of the form $H=\sum \lambda_i P_i$ where $\lambda_i$ is an eigenvalue of $H$ and $P_i$ is the projection operator to the corresponding eigenspace. For the hermitian operators $X,Y,Z$ whose eigenvalues are 1 and -1 we can therefore write* $X = P^X_0-P^X_1$* $Y = P^Y_0-P^Y_1$* $Z = P^Z_0-P^Z_1$Where$$P^X_0=\frac{1}{2}\left(\begin{array}{cc}1 & 1\\1 & 1\end{array}\right), P^X_1=\frac{1}{2}\left(\begin{array}{cc}1 & -1\\-1 & 1\end{array}\right)$$$$P^Y_0=\frac{1}{2}\left(\begin{array}{cc}1 & -i\\i & 1\end{array}\right), P^Y_1=\frac{1}{2}\left(\begin{array}{cc}1 & i\\-i & 1\end{array}\right)$$$$P^Z_0=\left(\begin{array}{cc}1 & 0\\0 & 0\end{array}\right), P^Z_1=\left(\begin{array}{cc}0 & 0\\0 & 1\end{array}\right)$$In the Ignis code, these matrices are defined in **tomography.fitters.utils.pauli_preparation_matrix**. We give an explicit definition here:
###Code
PX0 = 0.5*np.array([[1, 1], [1, 1]])
PX1 = 0.5*np.array([[1, -1], [-1, 1]])
PY0 = 0.5*np.array([[1, -1j], [1j, 1]])
PY1 = 0.5*np.array([[1, 1j], [-1j, 1]])
PZ0 = np.array([[1, 0], [0, 0]])
PZ1 = np.array([[0, 0], [0, 1]])
projectors = [PX0, PX1, PY0, PY1, PZ0, PZ1]
###Output
_____no_output_____
###Markdown
By Born's rule, $\text{tr}\left(P_{i}^{X}\rho\right)$ is the probability for the outcome $\left|i\right\rangle$ when measuring in the X-basis, and this probability can be estimated directly using repeated meausrements in the X-basis. The $Y$ and $Z$ bases are handledsimilarily.The computation $\text{tr}\left(P_{i}^{X}\rho\right)$ can be replaced by the scalar product $\vec{P}_i^X \cdot \vec{\rho}$ where $\vec{E}$ denotes the vector obtained from the operator $E$ by flattening its matrix (the result vector consists of the first row, then the second row etc.)Now we can construct a matrix $$M=\left(\begin{array}{c}\vec{P}_{0}^{X}\\\vec{P}_{1}^{X}\\\vec{P}_{0}^{Y}\\\vec{P}_{1}^{Y}\\\vec{P}_{0}^{Z}\\\vec{P}_{1}^{Z}\end{array}\right)$$Such that $$M\vec{\rho}=\vec{p}=\left(\begin{array}{c}p_{\left|0\right\rangle }^{X}\\p_{\left|1\right\rangle }^{X}\\p_{\left|0\right\rangle }^{Y}\\p_{\left|1\right\rangle }^{Y}\\p_{\left|0\right\rangle }^{Z}\\p_{\left|1\right\rangle }^{Z}\end{array}\right)$$Is the equation relating the density operator to the observed probabilities.
###Code
M = np.array([p.flatten() for p in projectors])
M
###Output
_____no_output_____
###Markdown
Since $M$ can be computed by knowing the operators used in the tomography, and the vector $\vec{p}$ of probabilities can be estimated using the tomography results, all that remains is to solve the equation $M\vec{\rho}=\vec{p}$ for $\vec{\rho}$. If the rank of $M$ is large enough this can be done by multiplying both sides by $M^\dagger$:$M^\dagger M\vec{\rho} = M^\dagger \vec{p}$$\vec{\rho} = (M^\dagger M)^{-1} M^\dagger \vec{p}$In our example, we obtain the matrix $$(M^\dagger M)^{-1} M^\dagger = \left(\begin{array}{cccccc}\frac{1}{6} & \frac{1}{6} & \frac{1}{6} & \frac{1}{6} & \frac{4}{6} & -\frac{2}{6}\\\frac{1}{2} & -\frac{1}{2} & \frac{i}{2} & -\frac{i}{2} & 0 & 0\\\frac{1}{2} & -\frac{1}{2} & -\frac{i}{2} & \frac{i}{2} & 0 & 0\\\frac{1}{6} & \frac{1}{6} & \frac{1}{6} & \frac{1}{6} & -\frac{2}{6} & \frac{4}{6}\end{array}\right)$$
###Code
M_dg = np.conj(M).T
linear_inversion_matrix = np.linalg.inv(M_dg @ M) @ M_dg
###Output
_____no_output_____
###Markdown
Multiplication by the linear inversion matrix performs the reconstruction stage described earlier to obtain the density operator. Example: 2-qubit Linear inversionFor multiple qubit systems the technique of linear inversion remains the same. The projector operators are tensor products of 1-qubit projectors: $6^n$ projectors in total, since we measure according to $3^n$ operators (tensor products of $X,Y,Z$) and each operator has two projectors.
###Code
projectors_2 = [np.kron(p1, p2) for (p1, p2) in itertools.product(projectors, repeat = 2)]
M_2 = np.array([p.flatten() for p in projectors_2])
M_dg_2 = np.conj(M_2).T
linear_inversion_matrix_2 = np.linalg.inv(M_dg_2 @ M_2) @ M_dg_2
###Output
_____no_output_____
###Markdown
We will now attempt to reconstruct the Bell state $\frac{\left|00\right\rangle +\left|11\right\rangle }{\sqrt{2}}$ from simulated tomography results. First, we prepare a quantum circuit which generates this bell state from the input $\left|00\right\rangle$.
###Code
q2 = QuantumRegister(2)
bell = QuantumCircuit(q2)
bell.h(q2[0])
bell.cx(q2[0], q2[1])
bell.qasm()
###Output
_____no_output_____
###Markdown
We now use Ignis' **state_tomography_circuits** procedure which generates the $3^n$ circuits obtained by adding to the bell circuit a measurement according to each of our measurement operators (Pauli by default). Then we execute on a standard simulator.
###Code
qst_bell = tomo.state_tomography_circuits(bell, q2)
job = qiskit.execute(qst_bell, Aer.get_backend('qasm_simulator'), shots=5000)
###Output
_____no_output_____
###Markdown
Now we load the data into the **StateTomographyFitter** which takes results data and can fit to a density matrix
###Code
statefit = tomo.StateTomographyFitter(job.result(), qst_bell)
###Output
_____no_output_____
###Markdown
Here is the data we loaded into the **StateTomographyFitter**
###Code
statefit.data
###Output
_____no_output_____
###Markdown
Now use a private function **\_fitter_data** to explicitly extract the probability vector $\vec{p}$ and projector matrix $M$ that satisfy $M\vec{\rho} = \vec{p}$. For typical usage we don't need to expose this data.
###Code
p, M, weights = statefit._fitter_data(True, 0.5)
###Output
_____no_output_____
###Markdown
Now we use the linear inversion technique to reconstructo $\vec{\rho}$. Since we usually represent density matrices as matrices and not vectors, we use Numpy's **reshape** function to convert $\vec{\rho}$ into $\rho$.
###Code
M_dg = np.conj(M).T
linear_inversion_matrix = np.linalg.inv(M_dg @ M) @ M_dg
rho_bell = linear_inversion_matrix @ p
rho_bell = np.reshape(rho_bell, (4, 4))
print(rho_bell)
###Output
[[ 5.03133333e-01+0.j 5.36666667e-03+0.0095j
-3.56666667e-03-0.00053333j 5.00000000e-01+0.0031j ]
[ 5.36666667e-03-0.0095j 1.66666667e-03+0.j
2.77555756e-17-0.0029j 2.83333333e-03+0.00066667j]
[-3.56666667e-03+0.00053333j 2.77555756e-17+0.0029j
-1.66666667e-03+0.j -8.43333333e-03-0.0093j ]
[ 5.00000000e-01-0.0031j 2.83333333e-03-0.00066667j
-8.43333333e-03+0.0093j 4.96866667e-01+0.j ]]
###Markdown
To check the quality of our solution, we compute the fidelity between the real quantum state (obtained via simulation by a simulator that can return state vectors) and our calculated $\rho$. The closer the fidelity to 1, the better.
###Code
job = qiskit.execute(bell, Aer.get_backend('statevector_simulator'))
psi_bell = job.result().get_statevector(bell)
F_bell = state_fidelity(psi_bell, rho_bell)
print('Fit Fidelity linear inversion =', F_bell)
###Output
Fit Fidelity linear inversion = 0.9999999999999998
###Markdown
Maximum Likelihood Linear inversion works perfectly on accurate data, but tomography data is never fully accurate. Two obvious obstacles are1. Since the number of measurements is limited, we do not obtain the probability vector $\vec{p}$ but an approximation.2. The measurement process might be noisy.This may result in non-accurate or even self-contradicting $\vec{p}$, and the result of linear inversion might not be a density function at all (e.g. not nonnegative, or trace different than 1).Since we want to solve the linear problem $A\vec{x}=\vec{p}$ for $x$, we can turn it into an optimization problem by attempting to minimize $\|A\vec{x}-\vec{p}\|_2$ while subjecting $x$ to additional constraints to ensure it is indeed a density matrix. This is done by **state_cvx_fit**.Another approach is to solve this optimization problem with no further constraints. The result might not be a density operator, i.e. positive semidefinite with trace 1; in this case the algorithm first rescales in order to obtain a density operator. This is done using **state_mle_fit**.
###Code
rho_cvx_bell = statefit.fit(method='cvx')
F_bell = state_fidelity(psi_bell, rho_cvx_bell)
print('Fit Fidelity CVX fit =', F_bell)
rho_mle_bell = statefit.fit(method='lstsq')
F_bell = state_fidelity(psi_bell, rho_mle_bell)
print('Fit Fidelity MLE fit =', F_bell)
###Output
Fit Fidelity CVX fit = 0.9998549234477454
Fit Fidelity MLE fit = 0.9939629890765465
###Markdown
Trusted Notebook" width="500 px" align="left"> Quantum Tomography Overview*** ContributorsGadi Aleksandrowicz ([email protected]), Christopher J. Wood
###Code
import numpy as np
import itertools
import qiskit
from qiskit import QuantumRegister, QuantumCircuit
from qiskit import Aer
import qiskit.ignis.verification.tomography as tomo
from qiskit.quantum_info import state_fidelity
###Output
_____no_output_____
###Markdown
The General TheoryQuantum tomography is an experimental procedure to reconstruct a description of part of quantum system from the measurement outcomes of a specific set of experiments. In Qiskit Ignis we are currently concerned with the following two tomography tasks:1. **Quantum state tomography**: Given a state-preparation circuit that prepares a system in a state, reconstruct a description of the density matrix $\rho$.2. **Quantum process tomograhpy**: Given a circuit, reconstruct a description of the quantum channel $\mathcal{E}$ that describes the circuit.In both cases we rely on the assumption that we have access to a large number of identical copies of the system and so can perform several different measures on it.We can roughly split the tomography process to three stages:1. Preperation: Add suitable initialization/measurement devices to the quantum system.2. Experiment: Obtain measurement data from the quantum system.3. Tomography: Use the obtained data to reconstruct the system's description.Steps 1 and 2 are related to the quantum system being studied, whereas step 3 is a classical computation which can be carried out on standard computers. State Tomography OverviewQuantum state tomography is a method of reconstructing a description of the quantum state of a system from a set of experiments. While the state of ideal quantum system is described by a state-vector the state of an open quantum system (one that may experiences noise or other errors) is given by a density matrix $\rho$. Quantum state tomography aims to reconstruct this density matrix. To do this we assume that the state $\rho$ can be reliably prepared by a state-preparation circuit, and that itcan be subjected to several measurements with respect to different operators; this data can be used to reconstruct $\rho$ or a close approximation of it by several different methods. DefinitionsWe denote by $\mathcal{X}$ the state space of a closed (ideal) quantum system. In quantum computing this is typically the tensor product of $N$ 2-dimensional (qubit) systems $\mathcal{X} = \mathbb{C^{2N}}$. Valid quantum states $|\psi\rangle \in \mathcal{X}$ are those with norm-1:$|\langle\psi|\psi\rangle|^2 = 1$. We denote by $L(\mathcal{X})$ the state space of linear maps on $\mathcal{X}$, ($L: \mathcal{X}\rightarrow\mathcal{X}$). The density-matrix for quantum system with state space $\mathcal{X}$ is a linear map $\rho \in L(\mathcal{X})$ that is also positive-semidefinite, and has trace equal to 1:1. **Unit trace:** $\text{tr}[\rho] = 1$2. **Positive-semidefinite:**: For all $|\psi\rangle \in \mathcal{X}$, $\langle\psi|\rho|\psi\rangle \ge 0$. This is denoted by $\rho \ge 0$. Example: 1-qubit reconstruction using the Pauli basisGiven the Pauli matrices $I=\left(\begin{array}{cc}1 & 0\\0 & 1\end{array}\right),X=\left(\begin{array}{cc}0 & 1\\1 & 0\end{array}\right),Y=\left(\begin{array}{cc}0 & -i\\i & 0\end{array}\right),Z=\left(\begin{array}{cc}1 & 0\\0 & -1\end{array}\right)$
###Code
I = np.array([[1,0],[0,1]])
X = np.array([[0,1],[1,0]])
Y = np.array([[0,-1j],[1j,0]])
Z = np.array([[1,0],[0,-1]])
###Output
_____no_output_____
###Markdown
It is easy to see they constitute an orthonormal basis for $M_2(\mathbb{C})$ with respect to the Hilbert-Schmidt inner product $\left\langle A,B\right\rangle =\frac{1}{2}\text{tr}\left(B^{\dagger}A\right)$
###Code
def HS_product(A,B):
return 0.5*np.trace(np.conj(B).T @ A)
###Output
_____no_output_____
###Markdown
And hence,$$ \rho =\left\langle \rho,I\right\rangle I+\left\langle \rho,X\right\rangle X+\left\langle \rho,Y\right\rangle Y+\left\langle \rho,Z\right\rangle Z = $$$$=\frac{\text{tr}\left(\rho\right)I+\text{tr}\left(X\rho\right)X+\text{tr}\left(Y\rho\right)Y+\text{tr}\left(Z\rho\right)Z}{2}$$The values of $\text{tr}\left(X\rho\right), \text{tr}\left(Y\rho\right), \text{tr}\left(Z\rho\right)$ are the expectation values of $X$, $Y$, $Z$, respectively, and can be approximated by repeated measuring in the $X, Y$ and $Z$ bases. Since $\text{tr}\left(\rho\right)=1$ there is no need for additional measurements for the coefficient of $I$. Example: 1-qubit Linear inversionThe above method can be rephrased in more general form. First, any hermitian operator $H$ has a spectral decomposition of the form $H=\sum \lambda_i P_i$ where $\lambda_i$ is an eigenvalue of $H$ and $P_i$ is the projection operator to the corresponding eigenspace. For the hermitian operators $X,Y,Z$ whose eigenvalues are 1 and -1 we can therefore write* $X = P^X_0-P^X_1$* $Y = P^Y_0-P^Y_1$* $Z = P^Z_0-P^Z_1$Where$$P^X_0=\frac{1}{2}\left(\begin{array}{cc}1 & 1\\1 & 1\end{array}\right), P^X_1=\frac{1}{2}\left(\begin{array}{cc}1 & -1\\-1 & 1\end{array}\right)$$$$P^Y_0=\frac{1}{2}\left(\begin{array}{cc}1 & -i\\i & 1\end{array}\right), P^Y_1=\frac{1}{2}\left(\begin{array}{cc}1 & i\\-i & 1\end{array}\right)$$$$P^Z_0=\left(\begin{array}{cc}1 & 0\\0 & 0\end{array}\right), P^Z_1=\left(\begin{array}{cc}0 & 0\\0 & 1\end{array}\right)$$In the Ignis code, these matrices are defined in **tomography.fitters.utils.pauli_preparation_matrix**. We give an explicit definition here:
###Code
PX0 = 0.5*np.array([[1, 1], [1, 1]])
PX1 = 0.5*np.array([[1, -1], [-1, 1]])
PY0 = 0.5*np.array([[1, -1j], [1j, 1]])
PY1 = 0.5*np.array([[1, 1j], [-1j, 1]])
PZ0 = np.array([[1, 0], [0, 0]])
PZ1 = np.array([[0, 0], [0, 1]])
projectors = [PX0, PX1, PY0, PY1, PZ0, PZ1]
###Output
_____no_output_____
###Markdown
By Born's rule, $\text{tr}\left(P_{i}^{X}\rho\right)$ is the probability for the outcome $\left|i\right\rangle$ when measuring in the X-basis, and this probability can be estimated directly using repeated meausrements in the X-basis. The $Y$ and $Z$ bases are handledsimilarily.The computation $\text{tr}\left(P_{i}^{X}\rho\right)$ can be replaced by the scalar product $\vec{P}_i^X \cdot \vec{\rho}$ where $\vec{E}$ denotes the vector obtained from the operator $E$ by flattening its matrix (the result vector consists of the first row, then the second row etc.)Now we can construct a matrix $$M=\left(\begin{array}{c}\vec{P}_{0}^{X}\\\vec{P}_{1}^{X}\\\vec{P}_{0}^{Y}\\\vec{P}_{1}^{Y}\\\vec{P}_{0}^{Z}\\\vec{P}_{1}^{Z}\end{array}\right)$$Such that $$M\vec{\rho}=\vec{p}=\left(\begin{array}{c}p_{\left|0\right\rangle }^{X}\\p_{\left|1\right\rangle }^{X}\\p_{\left|0\right\rangle }^{Y}\\p_{\left|1\right\rangle }^{Y}\\p_{\left|0\right\rangle }^{Z}\\p_{\left|1\right\rangle }^{Z}\end{array}\right)$$Is the equation relating the density operator to the observed probabilities.
###Code
M = np.array([p.flatten() for p in projectors])
M
###Output
_____no_output_____
###Markdown
Since $M$ can be computed by knowing the operators used in the tomography, and the vector $\vec{p}$ of probabilities can be estimated using the tomography results, all that remains is to solve the equation $M\vec{\rho}=\vec{p}$ for $\vec{\rho}$. If the rank of $M$ is large enough this can be done by multiplying both sides by $M^\dagger$:$M^\dagger M\vec{\rho} = M^\dagger \vec{p}$$\vec{\rho} = (M^\dagger M)^{-1} M^\dagger \vec{p}$In our example, we obtain the matrix $$(M^\dagger M)^{-1} M^\dagger = \left(\begin{array}{cccccc}\frac{1}{6} & \frac{1}{6} & \frac{1}{6} & \frac{1}{6} & \frac{4}{6} & -\frac{2}{6}\\\frac{1}{2} & -\frac{1}{2} & \frac{i}{2} & -\frac{i}{2} & 0 & 0\\\frac{1}{2} & -\frac{1}{2} & -\frac{i}{2} & \frac{i}{2} & 0 & 0\\\frac{1}{6} & \frac{1}{6} & \frac{1}{6} & \frac{1}{6} & -\frac{2}{6} & \frac{4}{6}\end{array}\right)$$
###Code
M_dg = np.conj(M).T
linear_inversion_matrix = np.linalg.inv(M_dg @ M) @ M_dg
###Output
_____no_output_____
###Markdown
Multiplication by the linear inversion matrix performs the reconstruction stage described earlier to obtain the density operator. Example: 2-qubit Linear inversionFor multiple qubit systems the technique of linear inversion remains the same. The projector operators are tensor products of 1-qubit projectors: $6^n$ projectors in total, since we measure according to $3^n$ operators (tensor products of $X,Y,Z$) and each operator has two projectors.
###Code
projectors_2 = [np.kron(p1, p2) for (p1, p2) in itertools.product(projectors, repeat = 2)]
M_2 = np.array([p.flatten() for p in projectors_2])
M_dg_2 = np.conj(M_2).T
linear_inversion_matrix_2 = np.linalg.inv(M_dg_2 @ M_2) @ M_dg_2
###Output
_____no_output_____
###Markdown
We will now attempt to reconstruct the Bell state $\frac{\left|00\right\rangle +\left|11\right\rangle }{\sqrt{2}}$ from simulated tomography results. First, we prepare a quantum circuit which generates this bell state from the input $\left|00\right\rangle$.
###Code
q2 = QuantumRegister(2)
bell = QuantumCircuit(q2)
bell.h(q2[0])
bell.cx(q2[0], q2[1])
bell.qasm()
###Output
_____no_output_____
###Markdown
We now use Ignis' **state_tomography_circuits** procedure which generates the $3^n$ circuits obtained by adding to the bell circuit a measurement according to each of our measurement operators (Pauli by default). Then we execute on a standard simulator.
###Code
qst_bell = tomo.state_tomography_circuits(bell, q2)
job = qiskit.execute(qst_bell, Aer.get_backend('qasm_simulator'), shots=5000)
###Output
_____no_output_____
###Markdown
Now we load the data into the **StateTomographyFitter** which takes results data and can fit to a density matrix
###Code
statefit = tomo.StateTomographyFitter(job.result(), qst_bell)
###Output
_____no_output_____
###Markdown
Here is the data we loaded into the **StateTomographyFitter**
###Code
statefit.data
###Output
_____no_output_____
###Markdown
Now use a private function **\_fitter_data** to explicitly extract the probability vector $\vec{p}$ and projector matrix $M$ that satisfy $M\vec{\rho} = \vec{p}$. For typical usage we don't need to expose this data.
###Code
p, M, weights = statefit._fitter_data(True, 0.5)
###Output
_____no_output_____
###Markdown
Now we use the linear inversion technique to reconstructo $\vec{\rho}$. Since we usually represent density matrices as matrices and not vectors, we use Numpy's **reshape** function to convert $\vec{\rho}$ into $\rho$.
###Code
M_dg = np.conj(M).T
linear_inversion_matrix = np.linalg.inv(M_dg @ M) @ M_dg
rho_bell = linear_inversion_matrix @ p
rho_bell = np.reshape(rho_bell, (4, 4))
print(rho_bell)
###Output
[[ 4.96133333e-01+0.j -1.80000000e-03+0.00386667j
-4.66666667e-04-0.00803333j 5.00000000e-01-0.0087j ]
[-1.80000000e-03-0.00386667j 3.33333333e-04+0.j
1.38777878e-17-0.0067j 2.93333333e-03+0.00776667j]
[-4.66666667e-04+0.00803333j 1.38777878e-17+0.0067j
-3.33333333e-04+0.j 8.00000000e-04-0.00453333j]
[ 5.00000000e-01+0.0087j 2.93333333e-03-0.00776667j
8.00000000e-04+0.00453333j 5.03866667e-01+0.j ]]
###Markdown
To check the quality of our solution, we compute the fidelity between the real quantum state (obtained via simulation by a simulator that can return state vectors) and our calculated $\rho$. The closer the fidelity to 1, the better.
###Code
job = qiskit.execute(bell, Aer.get_backend('statevector_simulator'), shots=1)
psi_bell = job.result().get_statevector(bell)
F_bell = state_fidelity(psi_bell, rho_bell, validate=False)
print('Fit Fidelity linear inversion =', F_bell)
###Output
Fit Fidelity linear inversion = 1.0
###Markdown
Maximum Likelihood Linear inversion works perfectly on accurate data, but tomography data is never fully accurate. Two obvious obstacles are1. Since the number of measurements is limited, we do not obtain the probability vector $\vec{p}$ but an approximation.2. The measurement process might be noisy.This may result in non-accurate or even self-contradicting $\vec{p}$, and the result of linear inversion might not be a density function at all (e.g. not nonnegative, or trace different than 1).Since we want to solve the linear problem $A\vec{x}=\vec{p}$ for $x$, we can turn it into an optimization problem by attempting to minimize $\|A\vec{x}-\vec{p}\|_2$ while subjecting $x$ to additional constraints to ensure it is indeed a density matrix. This is done by **state_cvx_fit**.Another approach is to solve this optimization problem with no further constraints. The result might not be a density operator, i.e. positive semidefinite with trace 1; in this case the algorithm first rescales in order to obtain a density operator. This is done using **state_mle_fit**.
###Code
rho_cvx_bell = statefit.fit(method='cvx')
F_bell = state_fidelity(psi_bell, rho_cvx_bell, validate=False)
print('Fit Fidelity CVX fit =', F_bell)
rho_mle_bell = statefit.fit(method='lstsq')
F_bell = state_fidelity(psi_bell, rho_mle_bell, validate=False)
print('Fit Fidelity MLE fit =', F_bell)
###Output
Fit Fidelity CVX fit = 0.9999153312542011
Fit Fidelity MLE fit = 0.9936235893388716
|
Lecture 4/Data_Cleaning_and_Preparation.ipynb | ###Markdown
Data Cleaning and Preparation---During the course of doing data analysis and modeling, a significant amount of time is spent on data preparation: loading, cleaning, transforming, and rearranging. Such tasks often takes 80% or more of an analystโs time. Sometimes the way that data is stored in files or databases is not in the right format for a particular task. Fortunately, pandas, along with the built-in Python language features, provides you with a high-level, flexible, and fast set of tools to enable you to manipulate data into the right form.$$$$ [Pandas 100 tricks](https://www.kaggle.com/python10pm/pandas-100-tricks) Lecture outline---* Finding and Filling Missing Values* Removing Duplicate Values* Replacing Values* Discretization and Binning* Detecting Outliers* String Manipulations* Variable Transformation Finding and Filling Missing Values---Missing values are pretty common in data cleaning activities. And, they can be there for any number of reasons.For instance, if you are running a survey and a respondent didn't answer a question the missing value isactually an omission. This kind of missing data is called **Missing at Random** if there are other variablesthat might be used to predict the variable which is missing. If there is no relationship to other variables, then we call this data **Missing Completely at Random (MCAR)**, in other words, missing is independent of the observed and unobserved data. **Missing not at random (MNAR)**. When data are MNAR, the fact that the data are missing is systematically related to the unobserved data, that is, the missingness is related to events or factors which are not measured by the researcher.Pandas works with missing data as painless as possible. For example, all of the descriptive statistics on pandas objects exclude missing data by default.> Not only `NaN`, `NA`, `N/A`, `NAT`, `NULL`, and `None` are missing values. There can be other missing values in the data. That's why we need always to know our data Reference[Types of Missing Data](https://www.ncbi.nlm.nih.gov/books/NBK493614/)[Working with missing data](https://pandas.pydata.org/pandas-docs/stable/user_guide/missing_data.html) 
###Code
import pandas as pd
import numpy as np
weather = pd.read_csv("data/weather.csv")
weather.head()
###Output
_____no_output_____
###Markdown
Detecting Missing Values
###Code
weather.isnull() # Retruns boolean seris. True denotes missing value
weather.isna() # Same as "isnull()" method
weather.isnull().any() # Shows all columns with missing value
weather[weather.isnull().any(axis=1)] # Show all rows with missing values
# Something weird happens in "MIN_TEMP_GROUND" column - We'll see later
weather.isnull().all(axis=1).any() # Are there any rows with only null values?
weather.notnull().all() # Are there any columns with no null values at all?
weather["MIN_TEMP_GROUND"].head(30) # Do you see pattern?
every_6th_row_index = pd.Series(range(5, len(weather), 6)) # Indices for every 6th row
every_6th_row_index
weather["MIN_TEMP_GROUND"][every_6th_row_index].notnull().all() # Are all these rows NOT null?
weather['MIN_TEMP_GROUND'].drop(every_6th_row_index).isnull().all() # Are all other rows null?
###Output
_____no_output_____
###Markdown
Handling Missing Values---The strategy of handling missing values depends on the type of missing value and/or the problem and data at hand. We may have huge amount of data and dropping missing values will not affect our aims, or we may have small amount of data and it's desirable to impute the missing values.Let see how can we drop/remove missing values in rows and columns.
###Code
series = pd.Series([1, np.nan, 3.5, np.nan, 7, 10, np.nan])
series
series.dropna() # Removes missing values
series[series.notnull()] # Same as above
###Output
_____no_output_____
###Markdown
Dropping missing values from DataFrame is somewhat different from dropping missing values from Series. For that reason, I create sample DataFrame, to show the effect of `dropna()` method on DataFrame.
###Code
missing_df = pd.DataFrame([[1., 6.5, 3.], [1., np.nan, np.nan],
[np.nan, np.nan, np.nan], [np.nan, 6.5, 3.]], columns=["a", "b", "c"])
missing_df
missing_df.dropna(axis=0, how="any") # The shortest solution - drop everything (Generally not good idea!!!)
missing_df.dropna(axis=0, how="all") # Remove rows if all values are missing
missing_df.dropna(axis=1, how="any") # Drop column if contains at least one missing value
missing_df.dropna(axis=1, how="all") # Drop column if all values are missing
missing_df.dropna(axis=0, how="any", subset=["a", "b"]) # Filter out missing values by column
###Output
_____no_output_____
###Markdown
Rather than filtering out missing data (and potentially discarding other data along with it), you may want to fill in the โholesโ in any number of ways. For most purposes, the `fillna()` method is the workhorse function to use.
###Code
missing_df
missing_df.fillna(value=999) # Fill missing values with a constant
missing_df.fillna(missing_df.mean()) # Fill missing values with a mean
missing_df.fillna(missing_df.mode().iloc[0]) # Fill missing values with a mode
missing_df.fillna({"a": 999, "b": -1, "c": 100}) # Fill missing values by different fill value for each column
missing_df
missing_df.fillna(method="ffill") # Forward Fill - use last valid observation for filling
missing_df.fillna(method="backfill") # Forward fill - use next valid observation to fill gap
###Output
_____no_output_____
###Markdown
Removing Duplicate Values---We may have duplicate values in our data due to several reasons and they can cause some difficulties during data analysis procedure. We have to identify them and then handle them properly. In other words, we have to find unique identifier for each row.
###Code
athletes = pd.read_csv("data/athletes.csv")
athletes.head()
athletes.duplicated() # Boolean series indicating duplicated rows. Uses all columns to find duplicates
athletes.duplicated(subset=["id", "sex"]) # Uses only two columns for duplicate identification
athletes[athletes.duplicated()] # Shows which rows are duplicated
athletes.drop_duplicates() # Removes all duplicate rows
athletes.drop_duplicates(subset=["id", "nationality"], keep="first") # Remove duplicates only considering some columns
###Output
_____no_output_____
###Markdown
Replacing Values---There are situations when we just need to replace values in a Pandas Series or DataFrame. For that reason, we can use `replace()` method
###Code
missing_df.head()
missing_df.replace(to_replace=np.nan, value=999) # Replace all NaN's with 999
missing_df.replace(to_replace=[1.0, 3.0], value=[2.0, 4.0]) # Replace several values
missing_df.replace(to_replace=[1.0, 6.5], value=np.nan) # Replace multiple values at once
###Output
_____no_output_____
###Markdown
Discretization and Binning---Continuous data is often discretized or otherwise separated into `bins` for analysis. Suppose you have data about a group of people in a study, and you want to group them into discrete buckets.Let discretize `weight` column.
###Code
athletes.head()
athletes["weight"].describe()
###Output
_____no_output_____
###Markdown
Before we discretize weight column, let calculate `BMI - Body Mass Index` and then discretize weight according to that values.
###Code
athletes["bmi"] = athletes["weight"] / (athletes["height"] ** 2)
athletes.head()
bins = [0, 18.5, 25, 30, 60]
names = ["underweight", "normal_weight", "overweight", "obese"]
athletes["new_weight"] = pd.cut(athletes["bmi"], bins=bins, labels=names)
athletes.head()
athletes[athletes["new_weight"] == "normal_weight"]["height"].describe()
athletes["new_weight"].value_counts()
###Output
_____no_output_____
###Markdown
If we pass an integer number of bins to cut instead of explicit bin edges, Pandas `cut()` will compute equal-length bins based on the minimum and maximum values in the data.
###Code
pd.cut(athletes["bmi"], bins=4)
###Output
_____no_output_____
###Markdown
Considering the distribution of the data, `cut()` method may not return equal-sized bins, while `qcut()` method by definition return approximately equal-size bins as it bins the data based on sample quantiles.
###Code
athletes["bmi"].describe()
pd.qcut(athletes["bmi"], q=10) # Deciles
pd.qcut(athletes["bmi"], q=4) # Quartiles
###Output
_____no_output_____
###Markdown
Detecting Outliers---The naive approach to detect outliers is to use `InterQuartile Range - IQR`. We can use that approach to check if the `height` column contains some outliers. The formula for `IQR` is the following:$$\text{IQR} = Q_{3} - Q_{1}$$where, $Q_{3}$ and $Q_{1}$ are upper and lower quartiles, respectively.From the above picture, we see that $99\%$ of observations should be inside $\{Q_{1} - 1.5 \times \text{IQR}; Q_{3} + 1.5 \times \text{IQR}\}$
###Code
athletes.head()
q1 = athletes["height"].quantile(.25)
q3 = athletes["height"].quantile(.75)
iqr = q3 - q1
pmin = q1 - 1.5 * iqr
pmax = q3 + 1.5 * iqr
pmin
pmax
athletes[athletes["height"].between(pmin, pmax)] # Values between IQR range
athletes[(athletes["height"].lt(pmin)) | (athletes["height"].gt(pmax))] # Values outside IQR range
###Output
_____no_output_____
###Markdown
String Manipulations---Strings represent letters and other symbols surrounded by quotation marks. Pandas has support of string manipulation and the methods are accessible by `.str` attribute. Strings are represented as `object` data type in Pandas, instead of conventional `str`. Reference[Working with text data](https://pandas.pydata.org/pandas-docs/stable/user_guide/text.html)
###Code
titanic = pd.read_csv("data/titanic.csv")
titanic.head()
titanic["Name"].str.split(".") # Split string on a specified character
titanic["Name"].str.split(".", expand=True) # Split string on a specified character and return DataFrame
titanic["Name"].str.strip() # Remove leading and trailing spaces
titanic["Name"].str.contains("Mrs") # True if sub-string is included in string
titanic["Name"].str.replace("Mrs", "###") # Replace string with other value
titanic["Name"].str.startswith("Mrs") # True if string starts with "Mrs"
titanic["Name"].str.endswith("a") # True if string ends with "a"
titanic["Name"].str.lower() # Lower case letter
titanic["Name"].str.upper() # Upper case letters
titanic["Name"].str.capitalize()
titanic["Name"].str.repeat(3)
###Output
_____no_output_____
###Markdown
Variable Transformation---To do a modeling, at the first stage, one have to take into consideration that we have some fixed set of models and we have to fit the data to our model, and the second is that these models have their assumptions - **which rarely holds in real world**.When the assumptions do not hold, we apply different transformations to our data, in order to have as desirable data format for the model as possible. By doing so, we try to extract as much information from our data as possible.> **The type of variable transformation greatly depends on the type of model we plan to use for modeling.**Transformation methods are classified in two broad class:* **Numeric Variable Transformation** - is turning a numeric variable to another numeric variable. Typically it is meant to change the scale of values and/or to adjust the skewed data distribution to Gaussian-like distribution through some `monotonic transformation`* **Categorical Variable Transformation** - is turning a categorical variable to a numeric variable. Categorical variable transformation is mandatory for most of the machine learning models because they can handle only numeric values. Numerical Variable Transformations---* **Standardization*** **Min-max scaling*** **Logarithmic transformation**
###Code
athletes.head()
###Output
_____no_output_____
###Markdown
Standardization happens using the following formula:$$X'_{i} = \frac{X_{i} - \bar{X_{n}}}{s}$$where, $\bar{X_{n}}$ is an arithmetic average and $s$ is standard deviation.
###Code
(athletes['weight'] - athletes["weight"].mean()) / athletes["weight"].std()
###Output
_____no_output_____
###Markdown
Min-Max Scaling happens using the following formula:$$X'_{i} = \frac{X_{i} - min(X)}{max(X) - min(X)}$$
###Code
((athletes['weight'] - athletes["weight"].min()) / (athletes["weight"].max() - athletes["weight"].min()))
((athletes['weight'] - athletes["weight"].min()) / (athletes["weight"].max() - athletes["weight"].min())).describe()
###Output
_____no_output_____
###Markdown
Logarithmic transformation happens using by natural logarithm. However, we can use logarithm with any base. Also, note that in order to have successful logarithmic transformation the data should not contain zeros or values less than zeros.
###Code
np.log(athletes["weight"]) # Natural logarithm
athletes["weight"].plot.hist() # Raw numbers
np.log(athletes["weight"]).plot.hist() # Natural logarithm
###Output
_____no_output_____
###Markdown
Categorical Variable Transformations---* **One-hot encoding** One-hot encoding is also known as dummy variable, meaning that we create indicator or binary variable containing only zeros and ones. Pandas has built in functionality for dummy variable generation. The best candidate for dummy variable is column `sex`.
###Code
athletes.head()
pd.get_dummies(athletes["sex"]) # Returns dummy variable for any categorical variable
###Output
_____no_output_____
###Markdown
In column `sex` we have two values, `female` and `male`. Have a look at dummy variables above. **They are same**. In the `female` column, 0 denotes male and 1 denotes female, while in `male` column everything is in opposite direction. We conclude that we need to drop one dummy variable, since we have duplicate values.
###Code
pd.get_dummies(athletes["sex"], drop_first=True) # Drops one dummy variable
###Output
_____no_output_____ |
notebook/1_Learning PyTorch/1_1_tensor_tutorial_jp.ipynb | ###Markdown
ใPyTorchใจใฏ๏ผใ===============================================================ใๅ้กใWhat is PyTorch?ใๅ่ใ[Soumith Chintala](http://soumith.ch/)ใๅ
URLใhttps://pytorch.org/tutorials/beginner/blitz/tensor_tutorial.htmlใ็ฟป่จณใ้ป้ๅฝ้ๆ
ๅ ฑใตใผใในISID AIใใฉใณในใใฉใผใกใผใทใงใณใปใณใฟใผใๅพณๅ ๅ
ใๆฅไปใ2020ๅนด10ๆ13ๆฅใใใฅใใผใชใขใซๆฆ่ฆใๆฌใใฅใผใใชใขใซใงใฏใPyTorchใงใฎๆผ็ฎๅฆ็ใซ็จใใTorch Tensorใซใคใใฆใใใฎๆไฝๆนๆณใ่งฃ่ชฌใใพใใ--- PyTorchใจใฏ๏ผ================PyTorchใฏPythonใใใผในใจใใ็งๅญฆ่จ็ฎใฉใคใใฉใชใงใใPyTorchใฏไปฅไธใซ็คบใ2ใคใฎๆฉ่ฝใไฝฟ็จใใใใฆใผใถใผใๅฏพ่ฑกใจใใฆใใพใใ- Numpyใใผในใฎๆผ็ฎใฎไปฃใใใซใGPUใ็จใใ้ซ้ใชๆผ็ฎใฎๅฎๆฝ- ้ซใๆ่ปๆงใจๅฎ่ก้ๅบฆใๆใใใใฃใผใใฉใผใใณใฐใฎใใฉใใใใฉใผใ Tensors๏ผใใณใฝใซ๏ผ-------------TensorใฏNumPy ndarraysใฎใใใชๅคๆฌกๅ
้
ๅใงใใPyTorchใซใใใฆใใณใฝใซใฏGPUไธใงใไฝฟ็จใงใใใใใๅฆ็้ๅบฆใฎๅไธใใใใใจใๅฏ่ฝใงใใ
###Code
%matplotlib inline
from __future__ import print_function
import torch
###Output
_____no_output_____
###Markdown
ใๆณจๆใๅๆๅใใใฆใใชใ่กๅใๅฎฃ่จใปไฝๆใใใฆใใๅฎ้ใซไฝฟ็จใใใใพใงๆ็ขบใชๅคใฏไฟๆใใฆใใพใใใๅฎฃ่จๆใซใกใขใชไธใฎๅฒใๅฝใฆใใใ้ฉๅฝใชๅคใๅๆๅคใจใใฆๅ
ฅใฃใฆใใพใใ ๅๆๅใใใฆใใชใใ3ร5่กๅใ็ๆใใฆใฟใพใใใ๏ผ
###Code
x = torch.empty(5, 3)
print(x)
###Output
tensor([[4.2866e-36, 0.0000e+00, 3.3631e-44],
[0.0000e+00, nan, 6.4460e-44],
[1.1578e+27, 1.1362e+30, 7.1547e+22],
[4.5828e+30, 1.2121e+04, 7.1846e+22],
[9.2198e-39, 7.0374e+22, 1.4359e-36]])
###Markdown
ๆฌกใซใไนฑๆฐใซใใฃใฆๅๆๅใใใ3x5่กๅใ็ๆใใฆใฟใพใใใ:
###Code
x = torch.rand(5, 3)
print(x)
###Output
tensor([[0.8021, 0.2195, 0.3325],
[0.1211, 0.7894, 0.2683],
[0.9716, 0.6030, 0.3051],
[0.1340, 0.8415, 0.5174],
[0.0918, 0.8619, 0.8378]])
###Markdown
longๅใฎๆฐๅค0ใงๅๆๅใใใ่กๅใ็ๆใใๅ ดๅใฏๆฌกใฎ้ใใงใใ
###Code
x = torch.zeros(5, 3, dtype=torch.long)
print(x)
###Output
tensor([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
###Markdown
็ดๆฅใๆฐๅคใๆๅฎใใฆ่กๅใ็ๆใใใใจใใงใใพใใ
###Code
x = torch.tensor([5.5, 3])
print(x)
###Output
tensor([5.5000, 3.0000])
###Markdown
ใใฎไปใซใใใงใซใใtensorใใใจใซใๆฐใใtensorใ็ๆใใใใจใใงใใพใใๆฌใกใฝใใใง็ๆใใใใณใฝใซใฏใใใณใฝใซใฎ็นๆง๏ผไพใใฐใใผใฟๅ๏ผdtypeใชใฉ๏ผใใใใจใฎtensorใใๅผใ็ถใใพใ๏ผใฆใผใถใผใๅคใ็นๆงใ็ดๆฅไธๆธใใใชใ้ใ๏ผใ
###Code
x = x.new_ones(5, 3, dtype=torch.double) # new_* methods take in sizes
print(x)
x = torch.randn_like(x, dtype=torch.float) # override dtype!
print(x) # result has the same size
###Output
tensor([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype=torch.float64)
tensor([[-0.5457, -0.4552, -2.0920],
[-0.6641, 0.9266, -0.6764],
[-0.7897, -1.8249, -0.0382],
[ 0.3420, -0.8151, 0.2744],
[ 1.0132, -1.1335, -0.6098]])
###Markdown
ใใณใฝใซใตใคใบ๏ผsize๏ผโใใณใฝใซใฎๅฝขใใๆฑใใฆใฟใพใใ
###Code
print(x.size())
###Output
torch.Size([5, 3])
###Markdown
ใใกใขใ``torch.Size``ใฏใฟใใซใจใชใฃใฆใใใใใPythonใฎ้ๅธธใฎใฟใใซใจๅๆงใฎๆไฝใๅฏ่ฝใงใใ **ใใณใฝใซใฎๆไฝ๏ผๅคๅฝขใปๅคๆ็ญ๏ผ**PyTorchใซใฏใใณใฝใซใซๅฏพใใๆไฝ๏ผๅคๅฝขใปๅคๆ็ญ๏ผใๅคใ็จๆใใใฆใใพใใใใใงใtensorใๆไฝ๏ผๅคๅฝขใปๅคๆ็ญ๏ผใใ่ฟฝๅ ใฎไพใ็ดนไปใใพใใ ่ฃ่ถณ: ็จไพ1
###Code
y = torch.rand(5, 3)
print(x + y)
###Output
tensor([[ 0.2193, -0.1546, -2.0828],
[ 0.1319, 1.4161, -0.4847],
[-0.5198, -0.9983, 0.3438],
[ 0.4004, -0.6043, 0.5200],
[ 1.4458, -1.1206, 0.2682]])
###Markdown
่ฃ่ถณ: ็จไพ2
###Code
print(torch.add(x, y))
###Output
tensor([[ 0.2193, -0.1546, -2.0828],
[ 0.1319, 1.4161, -0.4847],
[-0.5198, -0.9983, 0.3438],
[ 0.4004, -0.6043, 0.5200],
[ 1.4458, -1.1206, 0.2682]])
###Markdown
่ฃ่ถณ: ๅบๅๅ
ใๅผๆฐใงๆๅฎ
###Code
result = torch.empty(5, 3)
torch.add(x, y, out=result)
print(result)
###Output
tensor([[ 0.2193, -0.1546, -2.0828],
[ 0.1319, 1.4161, -0.4847],
[-0.5198, -0.9983, 0.3438],
[ 0.4004, -0.6043, 0.5200],
[ 1.4458, -1.1206, 0.2682]])
###Markdown
่ฃ่ถณ๏ผใใณใฝใซใใฎใใฎใฎๅคๆด๏ผin-place๏ผใคใณใใฌใผในๅฆ็๏ผ
###Code
# adds x to y
y.add_(x)
print(y)
###Output
tensor([[-0.8722, -1.0651, -6.2668],
[-1.1964, 3.2693, -1.8376],
[-2.0993, -4.6481, 0.2674],
[ 1.0844, -2.2345, 1.0687],
[ 3.4721, -3.3877, -0.9513]])
###Markdown
ใใกใขใใกใฝใใๅใฎๅพใซ``_``ใใคใใใใจใงใๅคๆฐใฎๅ
ๅฎนใๅบๅ็ตๆใง็ฝฎใๆใใใใจใใงใใพใใไพใใฐใ``y.add_(x)``ใฎๅ ดๅxใจyใฎๅคใๅ ็ฎใใ็ตๆใฏyใซไธๆธใใใฆใๆ ผ็ดใใใพใใ NumPyใจๅๆงใใคใณใใฏใทใณใฐใในใฉใคใทใณใฐใ่กใใใจใๅฏ่ฝใงใใ
###Code
print(x[:, 1])
###Output
tensor([-0.4552, 0.9266, -1.8249, -0.8151, -1.1335])
###Markdown
ใชใตใคใบ: tensorใฎๅฝขใๅคใใใๅ ดๅใฏ ``torch.view``ใไฝฟ็จใใฆใใ ใใ:
###Code
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8) # -1ใๆๅฎใใใจไปใซ่จญๅฎใใๆฌกๅ
ใฎๅคใใ่ชๅใง่จ็ฎ
print(x.size(), y.size(), z.size())
###Output
torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])
###Markdown
``.item()``ใไฝฟ็จใใใจใ่ฆ็ด ใ1ใคใใๆใใชใtensorใใใไธญ่บซใฎๆฐๅคใ ใใๅใๅบใใใจใใงใใพใใ
###Code
x = torch.randn(1)
print(x)
print(x.item())
###Output
tensor([0.1095])
0.10949039459228516
###Markdown
**ๅ่:**PyTorchใงใฏใ่ปข็ฝฎใใคใณใใใฏใทใณใฐใในใฉใคใทใณใฐใๆผ็ฎๅฆ็ใ็ทๅฝขไปฃๆฐใไนฑๆฐ็ๆใชใฉใฎ100ใ่ถ
ใใๆฉ่ฝใๆไพใใใฆใใพใใ่ฉณใใใฏ[ใใกใใฎใใผใธ](https://pytorch.org/docs/stable/torch.html)ใใ่ฆงใใ ใใใ NumPyใจใฎๆฅ็ถ------------PyTorchใงใฏTorch TensorใใNumPy Arrayใธใฎๅคๆใใใฎ้ใ็ฐกๅใซ่กใใใจใงใใพใใ๏ผTorch TensorใCPUไธใซใใๅ ดๅ๏ผTorch TensorใจNumPy Arrayใฏใกใขใชไธใฎๅใ้ ๅใซ้
็ฝฎใใใๅคๆใใใใจใใงใใพใใ Torch Tensorใใ NumPy Arrayใธใฎๅคๆ--------
###Code
a = torch.ones(5)
print(a)
b = a.numpy()
print(b)
###Output
[1. 1. 1. 1. 1.]
###Markdown
ใกใขใชใๅ
ฑๆใใฆใใใใใTorch TensorใฎๅคใNumPy Arrayใซใๅๆ ใใใใใจใๅใใใพใใ
###Code
a.add_(1)
print(a)
print(b)
###Output
tensor([2., 2., 2., 2., 2.])
[2. 2. 2. 2. 2.]
###Markdown
NumPy ArrayใใTorch Tensorใธใฎๅคๆ---------NumPy ArrayใใTorch Tensorใธใฎๅคๆใใๅฎนๆใซๅฏ่ฝใงใใ
###Code
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a, 1, out=a)
print(a)
print(b)
###Output
[2. 2. 2. 2. 2.]
tensor([2., 2., 2., 2., 2.], dtype=torch.float64)
###Markdown
CharTensorใ้คใใCPUไธใฎใในใฆใฎTensorใฏNumPyใธใฎๅคๆใใใใณใใฎ้๏ผNumpyใใTensor๏ผใซๅฏพๅฟใใฆใใพใใ CUDA Tensors๏ผCUDA ใใณใฝใซ๏ผ------------tensorใฏ ``.to`` ใกใฝใใใไฝฟ็จใใใใจใงใใใใใใใคในไธใฎใกใขใชใธใจ็งปๅใใใใใจใใงใใพใใ
###Code
# let us run this cell only if CUDA is available
# We will use ``torch.device`` objects to move tensors in and out of GPU
if torch.cuda.is_available():
device = torch.device("cuda") # a CUDA device object
y = torch.ones_like(x, device=device) # directly create a tensor on GPU
x = x.to(device) # or just use strings ``.to("cuda")``
z = x + y
print(z)
print(z.to("cpu", torch.double)) # ``.to`` can also change dtype together!
# ๆฅๆฌ่ช่จณๆณจ๏ผ
# tensor([1.8299], device='cuda:0')
# tensor([1.8299], dtype=torch.float64)
# ใฎใใใชๅบๅ๏ผๅคใฏๅคใใใพใ๏ผใใปใซใฎใใจใซ่กจ็คบใใใใฐใGPUใงใฎCUDAใงใฎใใณใฝใซ่จ็ฎใๆๅใใฆใใพใใ
# ใใใไฝใ่กจ็คบใใใชใใใฐใGoogle ColaroboratoryใGPUไฝฟ็จใขใผใใซใชใฃใฆใใชใใฎใงใ
# ไธใฎใปใซใฎ่ชฌๆใ่ชญใใงใGPUใไฝฟ็จๅฏ่ฝใช็ถๆ
ใซใใฆใฟใฆใใ ใใใ
###Output
tensor([1.1095], device='cuda:0')
tensor([1.1095], dtype=torch.float64)
|
examples/notebooks/07Calibrate_single_wls.ipynb | ###Markdown
7. Calibration of single ended measurement with WLS and confidence intervals A single ended calibration is performed with weighted least squares. Over all timesteps simultaneous. $\gamma$ and $\alpha$ remain constant, while $C$ varies over time. The weights are not considered equal here. The weights kwadratically decrease with the signal strength of the measured Stokes and anti-Stokes signals.The confidence intervals can be calculated as the weights are correctly defined.The confidence intervals consist of two sources of uncertainty.1. Measurement noise in the measured Stokes and anti-Stokes signals. Expressed in a single variance value.2. Inherent to least squares procedures / overdetermined systems, the parameters are estimated with limited certainty and all parameters are correlated. Which is expressen in the covariance matrix.Both sources of uncertainty are propagated to an uncertainty in the estimated temperature via Monte Carlo.
###Code
import os
from dtscalibration import read_silixa_files
import matplotlib.pyplot as plt
%matplotlib inline
filepath = os.path.join('..', '..', 'tests', 'data', 'single_ended')
ds = read_silixa_files(
directory=filepath,
timezone_netcdf='UTC',
file_ext='*.xml')
ds = ds.sel(x=slice(-30, 101)) # only calibrate parts of the fiber
sections = {
'probe1Temperature': [slice(20, 25.5)], # warm bath
'probe2Temperature': [slice(5.5, 15.5)], # cold bath
# 'referenceTemperature': [slice(-24., -4)] # The internal coil is not so uniform
}
ds.sections = sections
print(ds.calibration_single_ended.__doc__)
st_label = 'ST'
ast_label = 'AST'
###Output
_____no_output_____
###Markdown
First calculate the variance in the measured Stokes and anti-Stokes signals, in the forward and backward direction.The Stokes and anti-Stokes signals should follow a smooth decaying exponential. This function fits a decaying exponential to each reference section for each time step. The variance of the residuals between the measured Stokes and anti-Stokes signals and the fitted signals is used as an estimate of the variance in measured signals.
###Code
st_var, resid = ds.variance_stokes(st_label=st_label)
ast_var, _ = ds.variance_stokes(st_label=ast_label)
###Output
_____no_output_____
###Markdown
Similar to the ols procedure, we make a single function call to calibrate the temperature. If the method is `wls` and confidence intervals are passed to `conf_ints`, confidence intervals calculated. As weigths are correctly passed to the least squares procedure, the covariance matrix can be used. This matrix holds the covariances between all the parameters. A large parameter set is generated from this matrix, assuming the parameter space is normally distributed with their mean at the best estimate of the least squares procedure.The large parameter set is used to calculate a large set of temperatures. By using `percentiles` or `quantile` the 95% confidence interval of the calibrated temperature between 2.5% and 97.5% are calculated.The confidence intervals differ per time step. If you would like to calculate confidence intervals of all time steps together you have the option `ci_avg_time_flag=True`. 'We can say with 95% confidence that the temperature remained between this line and this line during the entire measurement period'.
###Code
ds.calibration_single_ended(sections=sections,
st_label=st_label,
ast_label=ast_label,
st_var=st_var,
ast_var=ast_var,
method='wls',
solver='sparse',
store_p_val='p_val',
store_p_cov='p_cov'
)
ds.conf_int_single_ended(
p_val='p_val',
p_cov='p_cov',
st_label=st_label,
ast_label=ast_label,
st_var=st_var,
ast_var=ast_var,
store_tmpf='TMPF',
store_tempvar='_var',
conf_ints=[2.5, 97.5],
mc_sample_size=500,
ci_avg_time_flag=False)
###Output
_____no_output_____
###Markdown
Lets compare our calibrated values with the device calibration
###Code
ds1 = ds.isel(time=0) # take only the first timestep
ds1.TMPF.plot(linewidth=0.8, figsize=(12, 8), label='User calibrated') # plot the temperature calibrated by us
ds1.TMP.plot(linewidth=0.8, label='Device calibrated') # plot the temperature calibrated by the device
ds1.TMPF_MC.plot(linewidth=0.8, hue='CI', label='CI device')
plt.title('Temperature at the first time step')
plt.legend();
ds.TMPF_MC_var.plot(figsize=(12, 8));
ds1.TMPF_MC.sel(CI=2.5).plot(label = '2.5% CI', figsize=(12, 8))
ds1.TMPF_MC.sel(CI=97.5).plot(label = '97.5% CI')
ds1.TMPF.plot(label='User calibrated')
plt.title('User calibrated temperature with 95% confidence interval')
plt.legend();
###Output
_____no_output_____
###Markdown
We can tell from the graph above that the 95% confidence interval widens furtherdown the cable. Lets have a look at the calculated variance along the cable for a single timestep. According to the device manufacturer this should be around 0.0059 degC.
###Code
ds1.TMPF_MC_var.plot(figsize=(12, 8));
###Output
_____no_output_____
###Markdown
The variance of the temperature measurement appears to be larger than what the manufacturer reports. This is already the case for the internal cable; it is not caused by a dirty connector/bad splice on our side. Maybe the length of the calibration section was not sufficient.At 30 m the variance sharply increases. There are several possible explanations. E.g., large temperatures or decreased signal strength.Lets have a look at the Stokes and anti-Stokes signal.
###Code
ds1.ST.plot(figsize=(12, 8))
ds1.AST.plot();
###Output
_____no_output_____
###Markdown
7. Calibration of single ended measurement with WLS and confidence intervals A single-ended calibration is performed where the unknown parameters are estimated using fiber sections that have a reference temperature. The parameters are estimated with a weighted least squares optimization using Stokes and anti-Stokes measurements from all timesteps. Thus Stokes and anti-Stokes measurements with a large signal to noise ratio contribute more towards estimating the optimal parameter set. But an estimate of the noise variance is required.Single-ended calibration requires a few steps. Please have a look at [1] for more information:1. Read the raw data files loaded from your DTS machine2. Define the reference sections: fiber sections that have a known temperature.3. Estimate the variance of the noise in the Stokes and anti-Stokes measurements4. Perform the parameter search and compute the temperature along the entire fiber.5. Compute the confidence intervals for the temperature[1]: des Tombe, B., Schilperoort, B., & Bakker, M. (2020). Estimation of Temperature and Associated Uncertainty from Fiber-Optic Raman-Spectrum Distributed Temperature Sensing. Sensors, 20(8), 2235. https://doi.org/10.3390/s20082235
###Code
import os
import warnings
warnings.simplefilter('ignore') # Hide warnings to avoid clutter in the notebook
from dtscalibration import read_silixa_files
import matplotlib.pyplot as plt
%matplotlib inline
filepath = os.path.join('..', '..', 'tests', 'data', 'single_ended')
ds = read_silixa_files(
directory=filepath,
timezone_netcdf='UTC',
file_ext='*.xml')
ds = ds.sel(x=slice(-30, 101)) # only calibrate parts of the fiber
sections = {
'probe1Temperature': [slice(20, 25.5)], # warm bath
'probe2Temperature': [slice(5.5, 15.5)], # cold bath
# 'referenceTemperature': [slice(-24., -4)] # The internal coil is not so uniform
}
ds.sections = sections
print(ds.calibration_single_ended.__doc__)
###Output
Calibrate the Stokes (`ds.st`) and anti-Stokes (`ds.ast`) data to
temperature using fiber sections with a known temperature
(`ds.sections`) for single-ended setups. The calibrated temperature is
stored under `ds.tmpf` and its variance under `ds.tmpf_var`.
In single-ended setups, Stokes and anti-Stokes intensity is measured
from a single end of the fiber. The differential attenuation is assumed
constant along the fiber so that the integrated differential attenuation
may be written as (Hausner et al, 2011):
.. math::
\int_0^x{\Delta\alpha(x')\,\mathrm{d}x'} \approx \Delta\alpha x
The temperature can now be written from Equation 10 [1]_ as:
.. math::
T(x,t) \approx \frac{\gamma}{I(x,t) + C(t) + \Delta\alpha x}
where
.. math::
I(x,t) = \ln{\left(\frac{P_+(x,t)}{P_-(x,t)}\right)}
.. math::
C(t) = \ln{\left(\frac{\eta_-(t)K_-/\lambda_-^4}{\eta_+(t)K_+/\lambda_+^4}\right)}
where :math:`C` is the lumped effect of the difference in gain at
:math:`x=0` between Stokes and anti-Stokes intensity measurements and
the dependence of the scattering intensity on the wavelength. The
parameters :math:`P_+` and :math:`P_-` are the Stokes and anti-Stokes
intensity measurements, respectively.
The parameters :math:`\gamma`, :math:`C(t)`, and :math:`\Delta\alpha`
must be estimated from calibration to reference sections, as discussed
in Section 5 [1]_. The parameter :math:`C` must be estimated
for each time and is constant along the fiber. :math:`T` in the listed
equations is in Kelvin, but is converted to Celsius after calibration.
Parameters
----------
store_p_cov : str
Key to store the covariance matrix of the calibrated parameters
store_p_val : str
Key to store the values of the calibrated parameters
p_val : array-like, optional
Define `p_val`, `p_var`, `p_cov` if you used an external function
for calibration. Has size 2 + `nt`. First value is :math:`\gamma`,
second is :math:`\Delta \alpha`, others are :math:`C` for each
timestep.
p_var : array-like, optional
Define `p_val`, `p_var`, `p_cov` if you used an external function
for calibration. Has size 2 + `nt`. First value is :math:`\gamma`,
second is :math:`\Delta \alpha`, others are :math:`C` for each
timestep.
p_cov : array-like, optional
The covariances of `p_val`.
If set to False, no uncertainty in the parameters is propagated
into the confidence intervals. Similar to the spec sheets of the DTS
manufacturers. And similar to passing an array filled with zeros.
sections : Dict[str, List[slice]], optional
If `None` is supplied, `ds.sections` is used. Define calibration
sections. Each section requires a reference temperature time series,
such as the temperature measured by an external temperature sensor.
They should already be part of the DataStore object. `sections`
is defined with a dictionary with its keywords of the
names of the reference temperature time series. Its values are
lists of slice objects, where each slice object is a fiber stretch
that has the reference temperature. Afterwards, `sections` is stored
under `ds.sections`.
st_var, ast_var : float, callable, array-like, optional
The variance of the measurement noise of the Stokes signals in the
forward direction. If `float` the variance of the noise from the
Stokes detector is described with a single value.
If `callable` the variance of the noise from the Stokes detector is
a function of the intensity, as defined in the callable function.
Or manually define a variance with a DataArray of the shape
`ds.st.shape`, where the variance can be a function of time and/or
x. Required if method is wls.
store_c : str
Label of where to store C
store_gamma : str
Label of where to store gamma
store_dalpha : str
Label of where to store dalpha; the spatial derivative of alpha.
store_alpha : str
Label of where to store alpha; The integrated differential
attenuation.
alpha(x=0) = 0
store_ta : str
Label of where to store transient alpha's
store_tmpf : str
Label of where to store the calibrated temperature of the forward
direction
variance_suffix : str
String appended for storing the variance. Only used when method
is wls.
method : {'ols', 'wls'}
Use `'ols'` for ordinary least squares and `'wls'` for weighted least
squares. `'wls'` is the default, and there is currently no reason to
use `'ols'`.
solver : {'sparse', 'stats'}
Either use the homemade weighted sparse solver or the weighted
dense matrix solver of statsmodels. The sparse solver uses much less
memory, is faster, and gives the same result as the statsmodels
solver. The statsmodels solver is mostly used to check the sparse
solver. `'stats'` is the default.
matching_sections : List[Tuple[slice, slice, bool]], optional
Provide a list of tuples. A tuple per matching section. Each tuple
has three items. The first two items are the slices of the sections
that are matched. The third item is a boolean and is True if the two
sections have a reverse direction ("J-configuration").
transient_att_x, transient_asym_att_x : iterable, optional
Depreciated. See trans_att
trans_att : iterable, optional
Splices can cause jumps in differential attenuation. Normal single
ended calibration assumes these are not present. An additional loss
term is added in the 'shadow' of the splice. Each location
introduces an additional nt parameters to solve for. Requiring
either an additional calibration section or matching sections.
If multiple locations are defined, the losses are added.
fix_gamma : Tuple[float, float], optional
A tuple containing two floats. The first float is the value of
gamma, and the second item is the variance of the estimate of gamma.
Covariances between gamma and other parameters are not accounted
for.
fix_dalpha : Tuple[float, float], optional
A tuple containing two floats. The first float is the value of
dalpha (:math:`\Delta \alpha` in [1]_), and the second item is the
variance of the estimate of dalpha.
Covariances between alpha and other parameters are not accounted
for.
Returns
-------
References
----------
.. [1] des Tombe, B., Schilperoort, B., & Bakker, M. (2020). Estimation
of Temperature and Associated Uncertainty from Fiber-Optic Raman-
Spectrum Distributed Temperature Sensing. Sensors, 20(8), 2235.
https://doi.org/10.3390/s20082235
Examples
--------
- `Example notebook 7: Calibrate single ended <https://github.com/dtscalibration/python-dts-calibration/blob/master/examples/notebooks/07Calibrate_single_wls.ipynb>`_
###Markdown
First calculate the variance in the measured Stokes and anti-Stokes signals, in the forward and backward direction.The Stokes and anti-Stokes signals should follow a smooth decaying exponential. This function fits a decaying exponential to each reference section for each time step. The variance of the residuals between the measured Stokes and anti-Stokes signals and the fitted signals is used as an estimate of the variance in measured signals.
###Code
st_var, resid = ds.variance_stokes_constant(st_label='st')
ast_var, _ = ds.variance_stokes_constant(st_label='ast')
###Output
_____no_output_____
###Markdown
Similar to the ols procedure, we make a single function call to calibrate the temperature. If the method is `wls` and confidence intervals are passed to `conf_ints`, confidence intervals calculated. As weigths are correctly passed to the least squares procedure, the covariance matrix can be used. This matrix holds the covariances between all the parameters. A large parameter set is generated from this matrix, assuming the parameter space is normally distributed with their mean at the best estimate of the least squares procedure.The large parameter set is used to calculate a large set of temperatures. By using `percentiles` or `quantile` the 95% confidence interval of the calibrated temperature between 2.5% and 97.5% are calculated.The confidence intervals differ per time step. If you would like to calculate confidence intervals of temporal averages or of averages of fiber sections see notebook 16.
###Code
ds.calibration_single_ended(sections=sections,
st_var=st_var,
ast_var=ast_var,
method='wls')
ds.conf_int_single_ended(
st_var=st_var,
ast_var=ast_var,
conf_ints=[2.5, 97.5],
mc_sample_size=500)
###Output
_____no_output_____
###Markdown
Lets compare our calibrated values with the device calibration
###Code
ds1 = ds.isel(time=0) # take only the first timestep
ds1.tmpf.plot(linewidth=0.8, figsize=(12, 8), label='User calibrated') # plot the temperature calibrated by us
ds1.tmp.plot(linewidth=0.8, label='Device calibrated') # plot the temperature calibrated by the device
ds1.tmpf_mc.plot(linewidth=0.8, hue='CI', label='CI device')
plt.title('Temperature at the first time step')
plt.legend();
ds.tmpf_mc_var.plot(figsize=(12, 8));
ds1.tmpf_mc.sel(CI=2.5).plot(label = '2.5% CI', figsize=(12, 8))
ds1.tmpf_mc.sel(CI=97.5).plot(label = '97.5% CI')
ds1.tmpf.plot(label='User calibrated')
plt.title('User calibrated temperature with 95% confidence interval')
plt.legend();
###Output
_____no_output_____
###Markdown
We can tell from the graph above that the 95% confidence interval widens furtherdown the cable. Lets have a look at the calculated variance along the cable for a single timestep. According to the device manufacturer this should be around 0.0059 degC.
###Code
ds1.tmpf_mc_var.plot(figsize=(12, 8));
###Output
_____no_output_____
###Markdown
The variance of the temperature measurement appears to be larger than what the manufacturer reports. This is already the case for the internal cable; it is not caused by a dirty connector/bad splice on our side. Maybe the length of the calibration section was not sufficient.At 30 m the variance sharply increases. There are several possible explanations. E.g., large temperatures or decreased signal strength.Lets have a look at the Stokes and anti-Stokes signal.
###Code
ds1.st.plot(figsize=(12, 8))
ds1.ast.plot();
###Output
_____no_output_____
###Markdown
7. Calibration of single ended measurement with WLS and confidence intervals A single-ended calibration is performed where the unknown parameters are estimated using fiber sections that have a reference temperature. The parameters are estimated with a weighted least squares optimization using Stokes and anti-Stokes measurements from all timesteps. Thus Stokes and anti-Stokes measurements with a large signal to noise ratio contribute more towards estimating the optimal parameter set. But an estimate of the noise variance is required.Single-ended calibration requires a few steps. Please have a look at [1] for more information:1. Read the raw data files loaded from your DTS machine2. Define the reference sections: fiber sections that have a known temperature.3. Estimate the variance of the noise in the Stokes and anti-Stokes measurements4. Perform the parameter search and compute the temperature along the entire fiber.5. Compute the confidence intervals for the temperature[1]: des Tombe, B., Schilperoort, B., & Bakker, M. (2020). Estimation of Temperature and Associated Uncertainty from Fiber-Optic Raman-Spectrum Distributed Temperature Sensing. Sensors, 20(8), 2235. https://doi.org/10.3390/s20082235
###Code
import os
from dtscalibration import read_silixa_files
import matplotlib.pyplot as plt
%matplotlib inline
filepath = os.path.join('..', '..', 'tests', 'data', 'single_ended')
ds = read_silixa_files(
directory=filepath,
timezone_netcdf='UTC',
file_ext='*.xml')
ds = ds.sel(x=slice(-30, 101)) # only calibrate parts of the fiber
sections = {
'probe1Temperature': [slice(20, 25.5)], # warm bath
'probe2Temperature': [slice(5.5, 15.5)], # cold bath
# 'referenceTemperature': [slice(-24., -4)] # The internal coil is not so uniform
}
ds.sections = sections
print(ds.calibration_single_ended.__doc__)
###Output
Calibrate the Stokes (`ds.st`) and anti-Stokes (`ds.ast`) data to
temperature using fiber sections with a known temperature
(`ds.sections`) for single-ended setups. The calibrated temperature is
stored under `ds.tmpf` and its variance under `ds.tmpf_var`.
In single-ended setups, Stokes and anti-Stokes intensity is measured
from a single end of the fiber. The differential attenuation is assumed
constant along the fiber so that the integrated differential attenuation
may be written as (Hausner et al, 2011):
.. math::
\int_0^x{\Delta\alpha(x')\,\mathrm{d}x'} \approx \Delta\alpha x
The temperature can now be written from Equation 10 [1]_ as:
.. math::
T(x,t) \approx \frac{\gamma}{I(x,t) + C(t) + \Delta\alpha x}
where
.. math::
I(x,t) = \ln{\left(\frac{P_+(x,t)}{P_-(x,t)}\right)}
.. math::
C(t) = \ln{\left(\frac{\eta_-(t)K_-/\lambda_-^4}{\eta_+(t)K_+/\lambda_+^4}\right)}
where :math:`C` is the lumped effect of the difference in gain at
:math:`x=0` between Stokes and anti-Stokes intensity measurements and
the dependence of the scattering intensity on the wavelength. The
parameters :math:`P_+` and :math:`P_-` are the Stokes and anti-Stokes
intensity measurements, respectively.
The parameters :math:`\gamma`, :math:`C(t)`, and :math:`\Delta\alpha`
must be estimated from calibration to reference sections, as discussed
in Section 5 [1]_. The parameter :math:`C` must be estimated
for each time and is constant along the fiber. :math:`T` in the listed
equations is in Kelvin, but is converted to Celsius after calibration.
Parameters
----------
store_p_cov : str
Key to store the covariance matrix of the calibrated parameters
store_p_val : str
Key to store the values of the calibrated parameters
p_val : array-like, optional
Define `p_val`, `p_var`, `p_cov` if you used an external function
for calibration. Has size 2 + `nt`. First value is :math:`\gamma`,
second is :math:`\Delta \alpha`, others are :math:`C` for each
timestep.
p_var : array-like, optional
Define `p_val`, `p_var`, `p_cov` if you used an external function
for calibration. Has size 2 + `nt`. First value is :math:`\gamma`,
second is :math:`\Delta \alpha`, others are :math:`C` for each
timestep.
p_cov : array-like, optional
The covariances of `p_val`.
If set to False, no uncertainty in the parameters is propagated
into the confidence intervals. Similar to the spec sheets of the DTS
manufacturers. And similar to passing an array filled with zeros.
sections : Dict[str, List[slice]], optional
If `None` is supplied, `ds.sections` is used. Define calibration
sections. Each section requires a reference temperature time series,
such as the temperature measured by an external temperature sensor.
They should already be part of the DataStore object. `sections`
is defined with a dictionary with its keywords of the
names of the reference temperature time series. Its values are
lists of slice objects, where each slice object is a fiber stretch
that has the reference temperature. Afterwards, `sections` is stored
under `ds.sections`.
st_var, ast_var : float, callable, array-like, optional
The variance of the measurement noise of the Stokes signals in the
forward direction. If `float` the variance of the noise from the
Stokes detector is described with a single value.
If `callable` the variance of the noise from the Stokes detector is
a function of the intensity, as defined in the callable function.
Or manually define a variance with a DataArray of the shape
`ds.st.shape`, where the variance can be a function of time and/or
x. Required if method is wls.
store_c : str
Label of where to store C
store_gamma : str
Label of where to store gamma
store_dalpha : str
Label of where to store dalpha; the spatial derivative of alpha.
store_alpha : str
Label of where to store alpha; The integrated differential
attenuation.
alpha(x=0) = 0
store_ta : str
Label of where to store transient alpha's
store_tmpf : str
Label of where to store the calibrated temperature of the forward
direction
variance_suffix : str
String appended for storing the variance. Only used when method
is wls.
method : {'ols', 'wls'}
Use `'ols'` for ordinary least squares and `'wls'` for weighted least
squares. `'wls'` is the default, and there is currently no reason to
use `'ols'`.
solver : {'sparse', 'stats'}
Either use the homemade weighted sparse solver or the weighted
dense matrix solver of statsmodels. The sparse solver uses much less
memory, is faster, and gives the same result as the statsmodels
solver. The statsmodels solver is mostly used to check the sparse
solver. `'stats'` is the default.
matching_sections : List[Tuple[slice, slice, bool]], optional
Provide a list of tuples. A tuple per matching section. Each tuple
has three items. The first two items are the slices of the sections
that are matched. The third item is a boolean and is True if the two
sections have a reverse direction ("J-configuration").
transient_att_x : iterable, optional
Splices can cause jumps in differential attenuation. Normal single
ended calibration assumes these are not present. An additional loss
term is added in the 'shadow' of the splice. Each location
introduces an additional nt parameters to solve for. Requiring
either an additional calibration section or matching sections.
If multiple locations are defined, the losses are added.
fix_gamma : Tuple[float, float], optional
A tuple containing two floats. The first float is the value of
gamma, and the second item is the variance of the estimate of gamma.
Covariances between gamma and other parameters are not accounted
for.
fix_dalpha : Tuple[float, float], optional
A tuple containing two floats. The first float is the value of
dalpha (:math:`\Delta \alpha` in [1]_), and the second item is the
variance of the estimate of dalpha.
Covariances between alpha and other parameters are not accounted
for.
Returns
-------
References
----------
.. [1] des Tombe, B., Schilperoort, B., & Bakker, M. (2020). Estimation
of Temperature and Associated Uncertainty from Fiber-Optic Raman-
Spectrum Distributed Temperature Sensing. Sensors, 20(8), 2235.
https://doi.org/10.3390/s20082235
Examples
--------
- `Example notebook 7: Calibrate single ended <https://github.com/dtscalibration/python-dts-calibration/blob/master/examples/notebooks/07Calibrate_single_wls.ipynb>`_
###Markdown
First calculate the variance in the measured Stokes and anti-Stokes signals, in the forward and backward direction.The Stokes and anti-Stokes signals should follow a smooth decaying exponential. This function fits a decaying exponential to each reference section for each time step. The variance of the residuals between the measured Stokes and anti-Stokes signals and the fitted signals is used as an estimate of the variance in measured signals.
###Code
st_var, resid = ds.variance_stokes(st_label='st')
ast_var, _ = ds.variance_stokes(st_label='ast')
###Output
_____no_output_____
###Markdown
Similar to the ols procedure, we make a single function call to calibrate the temperature. If the method is `wls` and confidence intervals are passed to `conf_ints`, confidence intervals calculated. As weigths are correctly passed to the least squares procedure, the covariance matrix can be used. This matrix holds the covariances between all the parameters. A large parameter set is generated from this matrix, assuming the parameter space is normally distributed with their mean at the best estimate of the least squares procedure.The large parameter set is used to calculate a large set of temperatures. By using `percentiles` or `quantile` the 95% confidence interval of the calibrated temperature between 2.5% and 97.5% are calculated.The confidence intervals differ per time step. If you would like to calculate confidence intervals of temporal averages or of averages of fiber sections see notebook 16.
###Code
ds.calibration_single_ended(sections=sections,
st_var=st_var,
ast_var=ast_var,
method='wls')
ds.conf_int_single_ended(
st_var=st_var,
ast_var=ast_var,
conf_ints=[2.5, 97.5],
mc_sample_size=500)
###Output
_____no_output_____
###Markdown
Lets compare our calibrated values with the device calibration
###Code
ds1 = ds.isel(time=0) # take only the first timestep
ds1.tmpf.plot(linewidth=0.8, figsize=(12, 8), label='User calibrated') # plot the temperature calibrated by us
ds1.tmp.plot(linewidth=0.8, label='Device calibrated') # plot the temperature calibrated by the device
ds1.tmpf_mc.plot(linewidth=0.8, hue='CI', label='CI device')
plt.title('Temperature at the first time step')
plt.legend();
ds.tmpf_mc_var.plot(figsize=(12, 8));
ds1.tmpf_mc.sel(CI=2.5).plot(label = '2.5% CI', figsize=(12, 8))
ds1.tmpf_mc.sel(CI=97.5).plot(label = '97.5% CI')
ds1.tmpf.plot(label='User calibrated')
plt.title('User calibrated temperature with 95% confidence interval')
plt.legend();
###Output
_____no_output_____
###Markdown
We can tell from the graph above that the 95% confidence interval widens furtherdown the cable. Lets have a look at the calculated variance along the cable for a single timestep. According to the device manufacturer this should be around 0.0059 degC.
###Code
ds1.tmpf_mc_var.plot(figsize=(12, 8));
###Output
_____no_output_____
###Markdown
The variance of the temperature measurement appears to be larger than what the manufacturer reports. This is already the case for the internal cable; it is not caused by a dirty connector/bad splice on our side. Maybe the length of the calibration section was not sufficient.At 30 m the variance sharply increases. There are several possible explanations. E.g., large temperatures or decreased signal strength.Lets have a look at the Stokes and anti-Stokes signal.
###Code
ds1.st.plot(figsize=(12, 8))
ds1.ast.plot();
###Output
_____no_output_____ |
code/bonus/scikit-model-to-json.ipynb | ###Markdown
[Sebastian Raschka](http://sebastianraschka.com), 2016https://github.com/rasbt/python-machine-learning-book Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
###Code
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -d -p scikit-learn,numpy,scipy
# to install watermark just uncomment the following line:
#%install_ext https://raw.githubusercontent.com/rasbt/watermark/master/watermark.py
###Output
_____no_output_____
###Markdown
Bonus Material - Scikit-learn Model Persistence using JSON In many situations, it is desirable to store away a trained model for future use. These situations where we want to persist a model could be the deployment of a model in a web application, for example, or scientific reproducibility of our experiments. I [wrote](http://nbviewer.jupyter.org/github/rasbt/python-machine-learning-book/blob/master/code/ch09/ch09.ipynbSerializing-fitted-scikit-learn-estimators) a little bit about serializing scikit-learn models using `pickle` in context of the web applications that we developed in chapter 8. Also, you can find an excellent [tutorial section](http://scikit-learn.org/stable/modules/model_persistence.html) on scikit-learn's website. Honestly, I would say that pickling Python objects via the [`pickle`](https://docs.python.org/3.5/library/pickle.html), [`dill`](https://pypi.python.org/pypi/dill) or [`joblib`](https://pythonhosted.org/joblib/) modules is probably the most convenient approach to model persistence. However, pickling Python objects can sometimes be a little bit problematic, for example, deserializing a model in Python 3.x that was originally pickled in Python 2.7x and vice versa. Also, pickle offers different protocols (currently the protocols `0-4`), which are not necessarily backwards compatible. Thus, to prepare for the worst case scenario -- corrupted pickle files or version incompatibilities -- there's at least one other (a little bit more tedious) way to model persistence using [JSON](http://www.json.org). > JSON (JavaScript Object Notation) is a lightweight data-interchange format. It is easy for humans to read and write. It is easy for machines to parse and generate. It is based on a subset of the JavaScript Programming Language, Standard ECMA-262 3rd Edition - December 1999. JSON is a text format that is completely language independent but uses conventions that are familiar to programmers of the C-family of languages, including C, C++, C, Java, JavaScript, Perl, Python, and many others. These properties make JSON an ideal data-interchange language. [Source: http://www.json.org] One of the advantages of JSON is that it is a human-readable format. So, if push comes to shove, we should still be able to read the parameter files and model coefficients "manually" and assign these values to the respective scikit-learn estimator or build our own model to reproduce scientific results. Let's see how that works ... First, let us train a simple logistic regression classifier on Iris:
###Code
%matplotlib inline
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from mlxtend.plotting import plot_decision_regions
iris = load_iris()
y, X = iris.target, iris.data[:, [0, 2]] # only use 2 features
lr = LogisticRegression(C=100.0,
class_weight=None,
dual=False,
fit_intercept=True,
intercept_scaling=1,
max_iter=100,
multi_class='multinomial',
n_jobs=1,
penalty='l2',
random_state=1,
solver='newton-cg',
tol=0.0001,
verbose=0,
warm_start=False)
lr.fit(X, y)
plot_decision_regions(X=X, y=y, clf=lr, legend=2)
plt.xlabel('sepal length')
plt.ylabel('petal length')
plt.show()
###Output
_____no_output_____
###Markdown
Luckily, we don't have to retype or copy & paste all the estimator parameters manually if we want to store them away. To get a dictionary of these parameters, we can simply use the handy "get_params" method:
###Code
lr.get_params()
###Output
_____no_output_____
###Markdown
Storing them in JSON format is easy, we simply import the `json` module from Python's standard library and dump the dictionary to a file:
###Code
import json
with open('./sckit-model-to-json/params.json', 'w', encoding='utf-8') as outfile:
json.dump(lr.get_params(), outfile)
###Output
_____no_output_____
###Markdown
When we read the file, we can see that the JSON file is just a 1-to-1 copy of our Python dictionary in text format:
###Code
with open('./sckit-model-to-json/params.json', 'r', encoding='utf-8') as infile:
print(infile.read())
###Output
{"dual": false, "max_iter": 100, "warm_start": false, "verbose": 0, "C": 100.0, "class_weight": null, "random_state": 1, "fit_intercept": true, "multi_class": "multinomial", "intercept_scaling": 1, "penalty": "l2", "solver": "newton-cg", "n_jobs": 1, "tol": 0.0001}
###Markdown
Now, the trickier part is to identify the "fit" parameters of the estimator, i.e., the parameters of our logistic regression model. However, in practice it's actually pretty straight forward to figure it out by heading over to the respective [documentation page](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html): Just look out for the "attributes" in the "Attribute" section that have a trailing underscore (thanks, scikit-learn team, for the beautifully thought-out API!). In case of logistic regression, we are interested in the weights `.coef_`, the bias unit `.intercept_`, and the `classes_` and `n_iter_` attributes.
###Code
attrs = [i for i in dir(lr) if i.endswith('_') and not i.endswith('__')]
print(attrs)
attr_dict = {i: getattr(lr, i) for i in attrs}
###Output
_____no_output_____
###Markdown
In order to deserialize NumPy arrays to JSON objects, we need to cast the arrays to (nested) Python lists first, however, it's not that much of a hassle thanks to the `tolist` method. (Also, consider saving the attributes to separate JSON files, e.g., intercept.json and coef.json, for clarity.)
###Code
import numpy as np
for k in attr_dict:
if isinstance(attr_dict[k], np.ndarray):
attr_dict[k] = attr_dict[k].tolist()
###Output
_____no_output_____
###Markdown
Now, we are ready to dump our "attribute dictionary" to a JSON file:
###Code
with open('./sckit-model-to-json/attributes.json', 'w', encoding='utf-8') as outfile:
json.dump(attr_dict,
outfile,
separators=(',', ':'),
sort_keys=True,
indent=4)
###Output
_____no_output_____
###Markdown
If everything went fine, our JSON file should look like this -- in plaintext format:
###Code
with open('./sckit-model-to-json/attributes.json', 'r', encoding='utf-8') as infile:
print(infile.read())
###Output
{
"classes_":[
0,
1,
2
],
"coef_":[
[
0.42625236403173844,
-8.557501546363858
],
[
1.5644231337040186,
-1.6783659020502222
],
[
-1.990675497337773,
10.235867448186507
]
],
"intercept_":[
27.533384852155145,
4.18509910962595,
-31.71848396177913
],
"n_iter_":[
27
]
}
###Markdown
With similar ease, we can now use `json`'s `loads` method to read the data back from the ".json" files and re-assign them to Python objects. (Imagine the following happens in a new Python session.)
###Code
import codecs
import json
obj_text = codecs.open('./sckit-model-to-json/params.json', 'r', encoding='utf-8').read()
params = json.loads(obj_text)
obj_text = codecs.open('./sckit-model-to-json/attributes.json', 'r', encoding='utf-8').read()
attributes = json.loads(obj_text)
###Output
_____no_output_____
###Markdown
Finally, we just need to initialize a default `LogisticRegression` estimator, feed it the desired parameters via the `set_params` method, and reassign the other attributes using Python's built-in `setattr` (don't forget to recast the Python lists to NumPy arrays, though!):
###Code
%matplotlib inline
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from mlxtend.plotting import plot_decision_regions
import numpy as np
iris = load_iris()
y, X = iris.target, iris.data[:, [0, 2]] # only use 2 features
lr = LogisticRegression()
lr.set_params(**params)
for k in attributes:
if isinstance(attributes[k], list):
setattr(lr, k, np.array(attributes[k]))
else:
setattr(lr, k, attributes[k])
plot_decision_regions(X=X, y=y, clf=lr, legend=2)
plt.xlabel('sepal length')
plt.ylabel('petal length')
plt.show()
###Output
_____no_output_____
###Markdown
[Sebastian Raschka](http://sebastianraschka.com), 2016https://github.com/rasbt/python-machine-learning-book Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
###Code
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -d -p scikit-learn,numpy,scipy
# to install watermark just uncomment the following line:
#%install_ext https://raw.githubusercontent.com/rasbt/watermark/master/watermark.py
###Output
_____no_output_____
###Markdown
Bonus Material - Scikit-learn Model Persistence using JSON In many situations, it is desirable to store away a trained model for future use. These situations where we want to persist a model could be the deployment of a model in a web application, for example, or scientific reproducibility of our experiments. I [wrote](http://nbviewer.jupyter.org/github/rasbt/python-machine-learning-book/blob/master/code/ch09/ch09.ipynbSerializing-fitted-scikit-learn-estimators) a little bit about serializing scikit-learn models using `pickle` in context of the web applications that we developed in chapter 8. Also, you can find an excellent [tutorial section](http://scikit-learn.org/stable/modules/model_persistence.html) on scikit-learn's website. Honestly, I would say that pickling Python objects via the [`pickle`](https://docs.python.org/3.5/library/pickle.html), [`dill`](https://pypi.python.org/pypi/dill) or [`joblib`](https://pythonhosted.org/joblib/) modules is probably the most convenient approach to model persistence. However, pickling Python objects can sometimes be a little bit problematic, for example, deserializing a model in Python 3.x that was originally pickled in Python 2.7x and vice versa. Also, pickle offers different protocols (currently the protocols `0-4`), which are not necessarily backwards compatible. Thus, to prepare for the worst case scenario -- corrupted pickle files or version incompatibilities -- there's at least one other (a little bit more tedious) way to model persistence using [JSON](http://www.json.org). > JSON (JavaScript Object Notation) is a lightweight data-interchange format. It is easy for humans to read and write. It is easy for machines to parse and generate. It is based on a subset of the JavaScript Programming Language, Standard ECMA-262 3rd Edition - December 1999. JSON is a text format that is completely language independent but uses conventions that are familiar to programmers of the C-family of languages, including C, C++, C, Java, JavaScript, Perl, Python, and many others. These properties make JSON an ideal data-interchange language. [Source: http://www.json.org] One of the advantages of JSON is that it is a human-readable format. So, if push comes to shove, we should still be able to read the parameter files and model coefficients "manually" and assign these values to the respective scikit-learn estimator or build our own model to reproduce scientific results. Let's see how that works ... First, let us train a simple logistic regression classifier on Iris:
###Code
%matplotlib inline
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from mlxtend.evaluate import plot_decision_regions
iris = load_iris()
y, X = iris.target, iris.data[:, [0, 2]] # only use 2 features
lr = LogisticRegression(C=100.0,
class_weight=None,
dual=False,
fit_intercept=True,
intercept_scaling=1,
max_iter=100,
multi_class='multinomial',
n_jobs=1,
penalty='l2',
random_state=1,
solver='newton-cg',
tol=0.0001,
verbose=0,
warm_start=False)
lr.fit(X, y)
plot_decision_regions(X=X, y=y, clf=lr, legend=2)
plt.xlabel('sepal length')
plt.ylabel('petal length')
plt.show()
###Output
_____no_output_____
###Markdown
Luckily, we don't have to retype or copy & paste all the estimator parameters manually if we want to store them away. To get a dictionary of these parameters, we can simply use the handy "get_params" method:
###Code
lr.get_params()
###Output
_____no_output_____
###Markdown
Storing them in JSON format is easy, we simply import the `json` module from Python's standard library and dump the dictionary to a file:
###Code
import json
with open('./sckit-model-to-json/params.json', 'w', encoding='utf-8') as outfile:
json.dump(lr.get_params(), outfile)
###Output
_____no_output_____
###Markdown
When we read the file, we can see that the JSON file is just a 1-to-1 copy of our Python dictionary in text format:
###Code
with open('./sckit-model-to-json/params.json', 'r', encoding='utf-8') as infile:
print(infile.read())
###Output
{"dual": false, "max_iter": 100, "warm_start": false, "verbose": 0, "C": 100.0, "class_weight": null, "random_state": 1, "fit_intercept": true, "multi_class": "multinomial", "intercept_scaling": 1, "penalty": "l2", "solver": "newton-cg", "n_jobs": 1, "tol": 0.0001}
###Markdown
Now, the trickier part is to identify the "fit" parameters of the estimator, i.e., the parameters of our logistic regression model. However, in practice it's actually pretty straight forward to figure it out by heading over to the respective [documentation page](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html): Just look out for the "attributes" in the "Attribute" section that have a trailing underscore (thanks, scikit-learn team, for the beautifully thought-out API!). In case of logistic regression, we are interested in the weights `.coef_`, the bias unit `.intercept_`, and the `classes_` and `n_iter_` attributes.
###Code
attrs = [i for i in dir(lr) if i.endswith('_') and not i.endswith('__')]
print(attrs)
attr_dict = {i: getattr(lr, i) for i in attrs}
###Output
_____no_output_____
###Markdown
In order to deserialize NumPy arrays to JSON objects, we need to cast the arrays to (nested) Python lists first, however, it's not that much of a hassle thanks to the `tolist` method. (Also, consider saving the attributes to separate JSON files, e.g., intercept.json and coef.json, for clarity.)
###Code
import numpy as np
for k in attr_dict:
if isinstance(attr_dict[k], np.ndarray):
attr_dict[k] = attr_dict[k].tolist()
###Output
_____no_output_____
###Markdown
Now, we are ready to dump our "attribute dictionary" to a JSON file:
###Code
with open('./sckit-model-to-json/attributes.json', 'w', encoding='utf-8') as outfile:
json.dump(attr_dict,
outfile,
separators=(',', ':'),
sort_keys=True,
indent=4)
###Output
_____no_output_____
###Markdown
If everything went fine, our JSON file should look like this -- in plaintext format:
###Code
with open('./sckit-model-to-json/attributes.json', 'r', encoding='utf-8') as infile:
print(infile.read())
###Output
{
"classes_":[
0,
1,
2
],
"coef_":[
[
0.42625236403173844,
-8.557501546363858
],
[
1.5644231337040186,
-1.6783659020502222
],
[
-1.990675497337773,
10.235867448186507
]
],
"intercept_":[
27.533384852155145,
4.18509910962595,
-31.71848396177913
],
"n_iter_":[
27
]
}
###Markdown
With similar ease, we can now use `json`'s `loads` method to read the data back from the ".json" files and re-assign them to Python objects. (Imagine the following happens in a new Python session.)
###Code
import codecs
import json
obj_text = codecs.open('./sckit-model-to-json/params.json', 'r', encoding='utf-8').read()
params = json.loads(obj_text)
obj_text = codecs.open('./sckit-model-to-json/attributes.json', 'r', encoding='utf-8').read()
attributes = json.loads(obj_text)
###Output
_____no_output_____
###Markdown
Finally, we just need to initialize a default `LogisticRegression` estimator, feed it the desired parameters via the `set_params` method, and reassign the other attributes using Python's built-in `setattr` (don't forget to recast the Python lists to NumPy arrays, though!):
###Code
%matplotlib inline
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from mlxtend.evaluate import plot_decision_regions
import numpy as np
iris = load_iris()
y, X = iris.target, iris.data[:, [0, 2]] # only use 2 features
lr = LogisticRegression()
lr.set_params(**params)
for k in attributes:
if isinstance(attributes[k], list):
setattr(lr, k, np.array(attributes[k]))
else:
setattr(lr, k, attributes[k])
plot_decision_regions(X=X, y=y, clf=lr, legend=2)
plt.xlabel('sepal length')
plt.ylabel('petal length')
plt.show()
###Output
_____no_output_____
###Markdown
[Sebastian Raschka](http://sebastianraschka.com), 2016https://github.com/1iyiwei/pyml Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
###Code
%load_ext watermark
%watermark -a '' -v -d -p scikit-learn,numpy,scipy
# to install watermark just uncomment the following line:
#%install_ext https://raw.githubusercontent.com/rasbt/watermark/master/watermark.py
###Output
_____no_output_____
###Markdown
Bonus Material - Scikit-learn Model Persistence using JSON In many situations, it is desirable to store away a trained model for future use. These situations where we want to persist a model could be the deployment of a model in a web application, for example, or scientific reproducibility of our experiments. I [wrote](http://nbviewer.jupyter.org/github/1iyiwei/pyml/blob/master/code/ch09/ch09.ipynbSerializing-fitted-scikit-learn-estimators) a little bit about serializing scikit-learn models using `pickle` in context of the web applications that we developed in chapter 8. Also, you can find an excellent [tutorial section](http://scikit-learn.org/stable/modules/model_persistence.html) on scikit-learn's website. Honestly, I would say that pickling Python objects via the [`pickle`](https://docs.python.org/3.5/library/pickle.html), [`dill`](https://pypi.python.org/pypi/dill) or [`joblib`](https://pythonhosted.org/joblib/) modules is probably the most convenient approach to model persistence. However, pickling Python objects can sometimes be a little bit problematic, for example, deserializing a model in Python 3.x that was originally pickled in Python 2.7x and vice versa. Also, pickle offers different protocols (currently the protocols `0-4`), which are not necessarily backwards compatible. Thus, to prepare for the worst case scenario -- corrupted pickle files or version incompatibilities -- there's at least one other (a little bit more tedious) way to model persistence using [JSON](http://www.json.org). > JSON (JavaScript Object Notation) is a lightweight data-interchange format. It is easy for humans to read and write. It is easy for machines to parse and generate. It is based on a subset of the JavaScript Programming Language, Standard ECMA-262 3rd Edition - December 1999. JSON is a text format that is completely language independent but uses conventions that are familiar to programmers of the C-family of languages, including C, C++, C, Java, JavaScript, Perl, Python, and many others. These properties make JSON an ideal data-interchange language. [Source: http://www.json.org] One of the advantages of JSON is that it is a human-readable format. So, if push comes to shove, we should still be able to read the parameter files and model coefficients "manually" and assign these values to the respective scikit-learn estimator or build our own model to reproduce scientific results. Let's see how that works ... First, let us train a simple logistic regression classifier on Iris:
###Code
%matplotlib inline
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from mlxtend.evaluate import plot_decision_regions
iris = load_iris()
y, X = iris.target, iris.data[:, [0, 2]] # only use 2 features
lr = LogisticRegression(C=100.0,
class_weight=None,
dual=False,
fit_intercept=True,
intercept_scaling=1,
max_iter=100,
multi_class='multinomial',
n_jobs=1,
penalty='l2',
random_state=1,
solver='newton-cg',
tol=0.0001,
verbose=0,
warm_start=False)
lr.fit(X, y)
plot_decision_regions(X=X, y=y, clf=lr, legend=2)
plt.xlabel('sepal length')
plt.ylabel('petal length')
plt.show()
###Output
_____no_output_____
###Markdown
Luckily, we don't have to retype or copy & paste all the estimator parameters manually if we want to store them away. To get a dictionary of these parameters, we can simply use the handy "get_params" method:
###Code
lr.get_params()
###Output
_____no_output_____
###Markdown
Storing them in JSON format is easy, we simply import the `json` module from Python's standard library and dump the dictionary to a file:
###Code
import json
with open('./sckit-model-to-json/params.json', 'w', encoding='utf-8') as outfile:
json.dump(lr.get_params(), outfile)
###Output
_____no_output_____
###Markdown
When we read the file, we can see that the JSON file is just a 1-to-1 copy of our Python dictionary in text format:
###Code
with open('./sckit-model-to-json/params.json', 'r', encoding='utf-8') as infile:
print(infile.read())
###Output
{"dual": false, "max_iter": 100, "warm_start": false, "verbose": 0, "C": 100.0, "class_weight": null, "random_state": 1, "fit_intercept": true, "multi_class": "multinomial", "intercept_scaling": 1, "penalty": "l2", "solver": "newton-cg", "n_jobs": 1, "tol": 0.0001}
###Markdown
Now, the trickier part is to identify the "fit" parameters of the estimator, i.e., the parameters of our logistic regression model. However, in practice it's actually pretty straight forward to figure it out by heading over to the respective [documentation page](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html): Just look out for the "attributes" in the "Attribute" section that have a trailing underscore (thanks, scikit-learn team, for the beautifully thought-out API!). In case of logistic regression, we are interested in the weights `.coef_`, the bias unit `.intercept_`, and the `classes_` and `n_iter_` attributes.
###Code
attrs = [i for i in dir(lr) if i.endswith('_') and not i.endswith('__')]
print(attrs)
attr_dict = {i: getattr(lr, i) for i in attrs}
###Output
_____no_output_____
###Markdown
In order to deserialize NumPy arrays to JSON objects, we need to cast the arrays to (nested) Python lists first, however, it's not that much of a hassle thanks to the `tolist` method. (Also, consider saving the attributes to separate JSON files, e.g., intercept.json and coef.json, for clarity.)
###Code
import numpy as np
for k in attr_dict:
if isinstance(attr_dict[k], np.ndarray):
attr_dict[k] = attr_dict[k].tolist()
###Output
_____no_output_____
###Markdown
Now, we are ready to dump our "attribute dictionary" to a JSON file:
###Code
with open('./sckit-model-to-json/attributes.json', 'w', encoding='utf-8') as outfile:
json.dump(attr_dict,
outfile,
separators=(',', ':'),
sort_keys=True,
indent=4)
###Output
_____no_output_____
###Markdown
If everything went fine, our JSON file should look like this -- in plaintext format:
###Code
with open('./sckit-model-to-json/attributes.json', 'r', encoding='utf-8') as infile:
print(infile.read())
###Output
{
"classes_":[
0,
1,
2
],
"coef_":[
[
0.42625236403173844,
-8.557501546363858
],
[
1.5644231337040186,
-1.6783659020502222
],
[
-1.990675497337773,
10.235867448186507
]
],
"intercept_":[
27.533384852155145,
4.18509910962595,
-31.71848396177913
],
"n_iter_":[
27
]
}
###Markdown
With similar ease, we can now use `json`'s `loads` method to read the data back from the ".json" files and re-assign them to Python objects. (Imagine the following happens in a new Python session.)
###Code
import codecs
import json
obj_text = codecs.open('./sckit-model-to-json/params.json', 'r', encoding='utf-8').read()
params = json.loads(obj_text)
obj_text = codecs.open('./sckit-model-to-json/attributes.json', 'r', encoding='utf-8').read()
attributes = json.loads(obj_text)
###Output
_____no_output_____
###Markdown
Finally, we just need to initialize a default `LogisticRegression` estimator, feed it the desired parameters via the `set_params` method, and reassign the other attributes using Python's built-in `setattr` (don't forget to recast the Python lists to NumPy arrays, though!):
###Code
%matplotlib inline
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from mlxtend.evaluate import plot_decision_regions
import numpy as np
iris = load_iris()
y, X = iris.target, iris.data[:, [0, 2]] # only use 2 features
lr = LogisticRegression()
lr.set_params(**params)
for k in attributes:
if isinstance(attributes[k], list):
setattr(lr, k, np.array(attributes[k]))
else:
setattr(lr, k, attributes[k])
plot_decision_regions(X=X, y=y, clf=lr, legend=2)
plt.xlabel('sepal length')
plt.ylabel('petal length')
plt.show()
###Output
_____no_output_____ |
_posts/scikit/topic-extract/topic-extract.ipynb | ###Markdown
This is an example of applying Non-negative Matrix Factorization and Latent Dirichlet Allocation on a corpus of documents and extract additive models of the topic structure of the corpus. The output is a list of topics, each represented as a list of terms (weights are not shown).The default parameters (n_samples / n_features / n_topics) should make the example runnable in a couple of tens of seconds. You can try to increase the dimensions of the problem, but be aware that the time complexity is polynomial in NMF. In LDA, the time complexity is proportional to (n_samples * iterations). New to Plotly?Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).You can set up Plotly to work in [online](https://plot.ly/python/getting-started/initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/start-plotting-online).We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! Version
###Code
import sklearn
sklearn.__version__
###Output
_____no_output_____
###Markdown
Imports
###Code
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
###Output
_____no_output_____
###Markdown
Calculations
###Code
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
print("Loading dataset...")
t0 = time()
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
print("done in %0.3fs." % (time() - t0))
# Use tf-idf features for NMF.
print("Extracting tf-idf features for NMF...")
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tfidf = tfidf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("Fitting LDA models with tf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
###Output
Loading dataset...
done in 2.336s.
Extracting tf-idf features for NMF...
done in 0.555s.
Extracting tf features for LDA...
done in 0.514s.
Fitting the NMF model with tf-idf features, n_samples=2000 and n_features=1000...
done in 0.481s.
Topics in NMF model:
Topic #0:
just people don think like know time good make way really say right ve want did ll new use years
Topic #1:
windows use dos using window program os drivers application help software pc running ms screen files version card code work
Topic #2:
god jesus bible faith christian christ christians does heaven sin believe lord life church mary atheism belief human love religion
Topic #3:
thanks know does mail advance hi info interested email anybody looking card help like appreciated information send list video need
Topic #4:
car cars tires miles 00 new engine insurance price condition oil power speed good 000 brake year models used bought
Topic #5:
edu soon com send university internet mit ftp mail cc pub article information hope program mac email home contact blood
Topic #6:
file problem files format win sound ftp pub read save site help image available create copy running memory self version
Topic #7:
game team games year win play season players nhl runs goal hockey toronto division flyers player defense leafs bad teams
Topic #8:
drive drives hard disk floppy software card mac computer power scsi controller apple mb 00 pc rom sale problem internal
Topic #9:
key chip clipper keys encryption government public use secure enforcement phone nsa communications law encrypted security clinton used legal standard
Fitting LDA models with tf features, n_samples=2000 and n_features=1000...
done in 2.511s.
Topics in LDA model:
Topic #0:
edu com mail send graphics ftp pub available contact university list faq ca information cs 1993 program sun uk mit
Topic #1:
don like just know think ve way use right good going make sure ll point got need really time doesn
Topic #2:
christian think atheism faith pittsburgh new bible radio games alt lot just religion like book read play time subject believe
Topic #3:
drive disk windows thanks use card drives hard version pc software file using scsi help does new dos controller 16
Topic #4:
hiv health aids disease april medical care research 1993 light information study national service test led 10 page new drug
Topic #5:
god people does just good don jesus say israel way life know true fact time law want believe make think
Topic #6:
55 10 11 18 15 team game 19 period play 23 12 13 flyers 20 25 22 17 24 16
Topic #7:
car year just cars new engine like bike good oil insurance better tires 000 thing speed model brake driving performance
Topic #8:
people said did just didn know time like went think children came come don took years say dead told started
Topic #9:
key space law government public use encryption earth section security moon probe enforcement keys states lunar military crime surface technology
###Markdown
License Author: Olivier Grisel Lars Buitinck Chyi-Kwei Yau License: BSD 3 clause
###Code
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'topic-extract.ipynb', 'scikit-learn/topics-extraction-with-nmf-lda/', 'Topic Extraction| plotly',
' ',
title = 'Topic Extraction | plotly',
name = 'Topic Extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation ',
has_thumbnail='true', thumbnail='thumbnail/scikit-default.jpg',
language='scikit-learn', page_type='example_index',
display_as='real_dataset', order=2,ipynb='~Diksha_Gabha/2678')
###Output
_____no_output_____ |
notebook/2018-01-29_check_for_vas_in_ovary.ipynb | ###Markdown
Check for *vas* expression
###Code
import csv
import os
import scipy.io
import pandas as pd
###Output
_____no_output_____
###Markdown
Ovary
###Code
# Get data (from cell ranger website)
genome = "dm6.16"
matrices_dir = "../output/ovary1/outs/filtered_gene_bc_matrices"
_matrix_dir = os.path.join(matrices_dir, genome)
mat = scipy.io.mmread(os.path.join(_matrix_dir, "matrix.mtx"))
genes_path = os.path.join(_matrix_dir, "genes.tsv")
gene_ids = [row[0] for row in csv.reader(open(genes_path), delimiter="\t")]
gene_names = [row[1] for row in csv.reader(open(genes_path), delimiter="\t")]
barcodes_path = os.path.join(_matrix_dir, "barcodes.tsv")
barcodes = [row[0] for row in csv.reader(open(barcodes_path), delimiter="\t")]
df = pd.DataFrame(mat.toarray(), index=gene_ids, columns=barcodes)
# check how many reads mapped to vasa
reads = df.loc['FBgn0283442'].sum()
print(f'There were {reads:,} vasa reads.')
###Output
There were 0 vasa reads.
###Markdown
testis
###Code
# Get data (from cell ranger website)
genome = "dm6.16"
matrices_dir = "../output/testis1/outs/filtered_gene_bc_matrices"
_matrix_dir = os.path.join(matrices_dir, genome)
mat = scipy.io.mmread(os.path.join(_matrix_dir, "matrix.mtx"))
genes_path = os.path.join(_matrix_dir, "genes.tsv")
gene_ids = [row[0] for row in csv.reader(open(genes_path), delimiter="\t")]
gene_names = [row[1] for row in csv.reader(open(genes_path), delimiter="\t")]
barcodes_path = os.path.join(_matrix_dir, "barcodes.tsv")
barcodes = [row[0] for row in csv.reader(open(barcodes_path), delimiter="\t")]
df = pd.DataFrame(mat.toarray(), index=gene_ids, columns=barcodes)
# check how many reads mapped to vasa
reads = df.loc['FBgn0283442'].sum()
print(f'There were {reads:,} vasa reads.')
# how many cells has vasa
cells = (df.loc['FBgn0283442'] > 0).sum()
print(f'There were {cells:,} cells expressing vasa.')
###Output
There were 652 vasa reads.
There were 55 cells expressing vasa.
|
code/Old stuff/20181008_combined_data_and_plot-Copy1.ipynb | ###Markdown
Imports
###Code
# Import modules for working with excel sheets and for plotting
# matplotlib: module for plotting
# pandas: module for working with dataframe (can be imported from excel, csv, txt)
# %: ipython magic, to plot graphs in line
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import re
from scipy import stats
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load DataThe following dataset is NOT on GitHub. Make sure your local directory structure is as follows: repository_directory / \ \ / \ \ code assets other files (.gitignore, README.md, LICENSE.txt, ...) / \ / \ Also, if you want to use the functions in this script with another dataset, make sure to change `number_of_rows` and the file name in the following import statements.
###Code
# Import excel file as a `pandas.ExcelFile' object (which basically has all sub-sheets in a big container!)
# also, only import 1302 rows
ca_data1 = pd.ExcelFile('../assets/to_combine/data_before_nov.xlsx')
ca_data2 = pd.ExcelFile('../assets/to_combine/data_after_nov.xlsx')
ca_data_11 = ca_data1.parse("Sheet1")
ca_data_12 = ca_data2.parse("Sheet1")
total_data = ca_data_11.append(ca_data_12)
writer = pd.ExcelWriter('total_data.xlsx', engine='xlsxwriter')
total_data.to_excel(writer, sheet_name='Sheet1')
writer.save()
ax = sns.lineplot(x="Time (s)", y="F/Fo", hue="Genotype", ci=95, data=total_data, hue_order = ["CTRL", "WTOE", "L89A", "L89K", "E120Q", "KO"]).set_title('3 uM UTP, Ca2+ free')
fig = ax.get_figure()
fig.savefig("total_data")
plt.xlim(0, 120)
plt.ylim(0.8, 3.5)
plt.show()
fig = ax.get_figure()
fig.savefig("total_data_zoom")
###Output
_____no_output_____ |
python/holoviz/200623_panel_interact_2d_array.ipynb | ###Markdown
Purpose- Try panel.interact and see how to use it- Apply to a 2D array with selectable number of items, rows, and max items
###Code
import panel as pn
from panel.interact import fixed
pn.extension()
###Output
_____no_output_____
###Markdown
1D and 2D indexing of a 1D list
###Code
num_tot = 11
n_rows = 4
n_cols = int(num_tot / n_rows)
if (num_tot % n_rows) != 0: n_cols += 1
print(num_tot, n_rows, n_cols)
###Output
11 4 3
###Markdown
2D indexes from 1D list
###Code
for im in range(num_tot):
j = im % n_rows
i = int(im / n_rows)
print(im, i, j)
###Output
0 0 0
1 0 1
2 0 2
3 0 3
4 1 0
5 1 1
6 1 2
7 1 3
8 2 0
9 2 1
10 2 2
###Markdown
2D for loops from 1D list
###Code
for i in range(n_cols):
for j in range(n_rows):
im = i * n_rows + j
if im < num_tot:
print(im, i, j)
###Output
0 0 0
1 0 1
2 0 2
3 0 3
4 1 0
5 1 1
6 1 2
7 1 3
8 2 0
9 2 1
10 2 2
###Markdown
Function
###Code
def rows_and_columns(n_tot, n_rows, n_max=12):
if n_tot > n_max: n_tot = n_max
n_cols = int(n_tot / n_rows)
if (n_tot % n_rows) != 0: n_cols += 1
# Set up placeholder panels
row = pn.Row()
for i in range(n_cols):
col = pn.Column()
for j in range(n_rows):
im = i * n_rows + j
if im < n_tot:
# print(im, i, j)
col.append(f"{im}")
row.append(col)
return row
rows_and_columns(n_tot=10, n_rows=4)
###Output
_____no_output_____
###Markdown
Basic panel.interact
###Code
pn.interact(rows_and_columns,
n_tot=(1, 25, None, 11),
n_rows=(1, 8, None, 4),
n_max=(1, 25, None, 12)
)
###Output
_____no_output_____
###Markdown
Use panel.widgets.Select
###Code
pn.interact(rows_and_columns,
n_tot=pn.widgets.Select(name='Number of images', value=11, options=[i + 1 for i in range(25)]),
n_rows=pn.widgets.Select(name='Number of image rows', value=4, options=[i + 1 for i in range(8)]),
n_max=pn.widgets.Select(name='Max number of images', value=10, options=[i + 1 for i in range(20)])
)
###Output
_____no_output_____
###Markdown
Fix n_max
###Code
pn.interact(rows_and_columns,
n_tot=pn.widgets.Select(name='Number of images', value=11, options=[i + 1 for i in range(25)]),
n_rows=pn.widgets.Select(name='Number of image rows', value=4, options=[i + 1 for i in range(8)]),
n_max=fixed(12)
)
###Output
_____no_output_____ |
visualisation/tutorials/A_complex_layout.ipynb | ###Markdown
Exploring arranging several pages in one layout in MagicsThis notebook will help you discover how to plot several maps on one page using Magics.At ECMWF From your workstation:module swap (or load) Magics/new jupyter notebook load this notebook The position of a page is set with the 4 following parameters- **page_x_position / page_y_position** : to position in cm the bottom left corner of the page in its parent- **page_x_length / page_y_length** : the dimension in cm.The drawing area ( where the plotting is rendered) is called subpage and can position into the page using the 4 following parameters- **subpage_x_position / subpage_y_position** : to position in cm the bottom left corner of the drawing area (subpage) in its parent page.- **subpage_x_length / subpage_y_length** : the dimension in cm. Import Magics and define projection, dark background, light background, foreground and load dataWe will compose one image with 4 maps: - top left : temperature at 850 hPa and geopotential height at 500 hPa- top right : precipitation type- bottom left : simulated satellite image IR channel- bottom right : total precipitationFor start let's import Magics and define background, foreground and load data for our maps.
###Code
import Magics.macro as magics
projection = magics.mmap(
subpage_clipping = "on",
subpage_map_library_area = "on",
subpage_map_area_name = "central_europe",
page_id_line = "off"
)
light_background = magics.mcoast(
map_coastline_sea_shade_colour = 'white',
map_coastline_land_shade_colour = 'cream',
map_grid = 'off',
map_coastline_land_shade = 'on',
map_coastline_sea_shade = 'on',
map_label = 'off',
map_coastline_colour = 'tan')
dark_background = magics.mcoast(
map_coastline_colour = "grey",
map_coastline_resolution = "medium",
map_coastline_thickness = 2,
map_coastline_land_shade = "on",
map_coastline_land_shade_colour = "RGB(0.25,0.25,0.25)",
map_coastline_sea_shade = "on",
map_coastline_sea_shade_colour = "black",
map_grid_line_style = "dash",
map_grid_colour = "grey"
)
foreground = magics.mcoast()
# Different meteorological parameter we will plot in this notebook
temperature = magics.mgrib(grib_input_file_name = "t850.grib")
geopotential = magics.mgrib(grib_input_file_name = "z500.grib")
precipitation = magics.mgrib(grib_input_file_name = "total_precipitation.grib")
sat_ir = magics.mgrib(grib_input_file_name = "ssd.grib",
grib_automatic_scaling = "off")
ptype = magics.mgrib(grib_input_file_name = "ptype.grib")
###Output
_____no_output_____
###Markdown
Shading and symbol definitionsNow we define contours for all fields. Temperature - gradients method for shading (Mores shading examples you can find [in this notebook](Contours_shading.ipynb "Shading contours")) Geopotential height - automatic style from ecCharts (More examples of automatic styles you can find [in this notebook](Contours_automtatic.ipynb "Automatic contours"))Precipitation type - advanced symbol plotting (More examples of symbol plotting you can find in [simple symbol](Symbol_simple.ipynb "Symple symbol plotting") and [advanced symbol](Symbol_advanced.ipynb "Advanced symbol plotting") notebooks)Brightness temperature - cell shading Total precipitation - poligon shading with user defined list of level and colours
###Code
# Temperature shading
t_cont = magics.mcont(
legend = "on",
contour = "off",
contour_level_selection_type = "level_list",
contour_level_list = [-30.,-20.,0.,20.,30.],
contour_gradients_step_list = [5, 10 ,10 ,5],
contour_label = "off",
contour_shade = "on",
contour_shade_colour_method = "gradients",
contour_gradients_technique = "rgb",
contour_shade_method = "area_fill",
contour_gradients_colour_list = ["RGB(0.01961,0.251,0.4157)","greenish_blue","white",
"orangish_red","RGB(0.3756,0.06648,0.05582)"],
contour_gradients_waypoint_method = "ignore")
# Geopotential shading - default ECMWF style
ecmwf_cont = magics.mcont(
contour_automatic_setting = "ecmwf",
legend = "off")
# Symbol plotting for precipitation type
symbol_plotting = magics.msymb(
symbol_advanced_table_colour_method = "list",
symbol_advanced_table_height_method = "calculate",
symbol_advanced_table_height_min_value = 1.,
symbol_advanced_table_height_max_value = 1.,
symbol_type = "marker",
symbol_table_mode = "advanced",
symbol_marker_mode = "name",
symbol_advanced_table_selection_type = "list",
symbol_advanced_table_colour_list = ["green","red","blue","navy","turquoise","orange"],
symbol_advanced_table_level_list = [1.00,3.00,5.00,6.00,7.00,8.00,9.00],
symbol_advanced_table_marker_name_list = ["ww_60","ww_67","ww_70","ww_68","ww_69","ww_79"])
# Cell shading for Brightness temperature
ir_clouds = magics.mcont(
contour_shade_colour_direction = "anti_clockwise",
contour_shade_min_level_colour = "white",
contour_shade_max_level_colour = "black",
contour_shade_colour_method = "calculate",
legend = "on",
contour = "off",
contour_level_selection_type = "level_list",
contour_level_list = [170.,199.,201.,203.,205.,207,208,209,210,
211,212,213,214,215,216,217,218,219,220,
221,222,223,224,225,226,227,228,229,230,
231,232,233,234,235,236,237,238,239,240,
241,242,243,244,245,246,247,249,251,253,
255,257,259,261,263,265,267,269,271,273,
275,277,279,281,283,285,287,289,291,293,
295,297,299,301,303,305,307,310,315,320,350],
contour_shade = "on",
contour_shade_technique = "cell_shading",
contour_shade_cell_resolution = 80
)
# Precipitation shading
tp_cont = magics.mcont(
contour_level_selection_type = "level_list",
contour_level_list = [0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500],
contour_shade = "on",
contour_shade_method = "area_fill",
contour_shade_colour_method = "list",
contour_shade_colour_list = ['#ffffd9','#edf8b1','#c7e9b4',
'#7fcdbb','#41b6c4','#1d91c0',
'#225ea8','#253494','#081d58'],
contour = "off",
legend = "on"
)
###Output
_____no_output_____
###Markdown
LegendsLets define legends for our plots. All but one will be on the right side and we will use automatic box mode and let Magics put it in the right position. Only the symbol legend will be on top because it takes less space there.
###Code
# Defining legend for total precipitation
tp_legend = magics.mlegend (
legend_display_type = "continuous",
legend_automatic_position = "right",
legend_title = "on",
legend_title_text = "Total precipitation",
legend_text_font_size = "0.45",
legend_text_colour = "#2b619e",
legend_label_frequency = 1)
# Defining legend for temperature
temp_legend = magics.mlegend (
legend_display_type = "continuous",
legend_automatic_position = "right",
legend_title = "on",
legend_title_text = "Temperature at 850 hPa",
legend_text_font_size = "0.45",
legend_text_colour = "#2b619e",
legend_text_composition = "user_text_only",
legend_values_list = [-30.,-20.,-10.,0.,10.,20.,30.])
# Defninig legend for Simulated satelite image
bt_legend = magics.mlegend(
legend_display_type = "continuous",
legend_automatic_position = "right",
legend_title = "on",
legend_title_text = "Brightness temperature",
legend_text_font_size = "0.45",
legend_text_colour = "#2b619e",
legend_label_frequency = 4,
legend_user_minimum = "on",
legend_user_minimum_text = "< 200",
legend_user_maximum = "on",
legend_user_maximum_text = "> 320",
legend_entry_border = "off")
# Defninig legend for Precipitation type
symb_legend = magics.mlegend(
legend_user_lines = ["Rain","Freezing rain", "Snow", "Wet snow", "Sleet", "Ice pellets"],
legend_box_mode = "positional",
legend_text_composition = "user_text_only",
legend_text_colour = "#2b619e",
legend_text_font_size = 0.4,
legend_box_y_position = 11.1,
legend_box_x_position = -0.7,
legend_box_x_length = 15.00,
legend_box_y_length = 2.5)
###Output
_____no_output_____
###Markdown
Text definitionsFirst four texts are titles at automatic positions, and the last 2 are there as lables for explanation of page and subpage. As you can see, you can add HTML symbols in title too.
###Code
top_left_text = magics.mtext(text_lines = ["Temperature at 850 hPa and Geopotential height at 500 hPa"],
text_font_size = 0.55,
text_colour = "charcoal")
bottom_right_text = magics.mtext(text_lines = [" ☂ Total precipitation ☔ "],
text_colour = "charcoal",
text_font_size = 0.6)
bottom_left_text = magics.mtext(text_lines = ["Brightness temperature"],
text_colour = "charcoal",
text_font_size = 0.6)
top_right_text = magics.mtext(text_lines = [" ❄ Precipitation type ❅"],
text_colour = "charcoal",
text_font_size = 0.6)
additional_text_page = magics.mtext(
text_lines = ["← PAGE →"],
text_colour = "red",
text_font_size = 1.4,
text_mode = "positional",
text_box_x_position = 3.75,
text_box_y_position = 13.50)
additional_text_subpage = magics.mtext(
text_lines = ["← SUBPAGE →"],
text_colour = "red",
text_font_size = 1.2,
text_mode = "positional",
text_box_x_position = 3.25,
text_box_y_position = 4.50)
###Output
_____no_output_____
###Markdown
Finally, A bit of layoutIn order to put 4 maps on our final product we have to define size of the superpage (**super_page_x_length / super_page_y_length**) and positions and lenghts of 4 pages. Top left page will have a page frame in order to help you understand the superpage/page/subpage concept.
###Code
top_left = magics.page(
layout = 'positional',
page_x_length = 15.,
page_y_length = 15.,
page_frame = 'on',
page_id_line = 'off',
page_x_position = 0.,
page_y_position = 14.,
super_page_x_length = 30.00,
super_page_y_length = 30.00)
top_right = magics.page(
page_frame = 'off',
layout = 'positional',
page_x_length = 15.,
page_y_length = 15.,
page_id_line = 'off',
page_x_position = 14.7,
page_y_position = 14.
)
bottom_right = magics.page(
page_frame = 'off',
layout = 'positional',
page_x_length = 15.,
page_y_length = 15.,
page_id_line = 'off',
page_x_position = 14.7,
page_y_position = 0.5
)
bottom_left = magics.page(
page_frame = 'off',
layout = 'positional',
page_x_length = 15.,
page_y_length = 15.,
page_id_line = 'off',
page_x_position = 0.,
page_y_position = 0.5
)
###Output
_____no_output_____
###Markdown
The plot commandAll that is left is to put everything in the plot command. Each page has page definition, projection, data, shading for data, coastline, legend and text.
###Code
magics.plot(top_left, projection, temperature, t_cont, geopotential, ecmwf_cont, foreground, temp_legend,top_left_text,
additional_text_subpage, additional_text_page,
bottom_right, projection, light_background, precipitation, tp_cont, tp_legend, foreground, bottom_right_text,
bottom_left, projection, sat_ir, ir_clouds, foreground, bt_legend, bottom_left_text,
top_right, projection, dark_background, ptype, symbol_plotting, symb_legend, top_right_text)
###Output
_____no_output_____
###Markdown
Exploring arranging several pages in one layout in MagicsThis notebook will help you discover how to plot several maps on one page using Magics. Install MagicsIf you don't have Magics installed, run the next cell to install Magics using conda.
###Code
# Install Magics in the current Jupyter kernel
import sys
!conda install --yes --prefix {sys.prefix} Magics
###Output
_____no_output_____
###Markdown
The position of a page is set with the 4 following parameters- **page_x_position / page_y_position** : to position in cm the bottom left corner of the page in its parent- **page_x_length / page_y_length** : the dimension in cm.The drawing area ( where the plotting is rendered) is called subpage and can position into the page using the 4 following parameters- **subpage_x_position / subpage_y_position** : to position in cm the bottom left corner of the drawing area (subpage) in its parent page.- **subpage_x_length / subpage_y_length** : the dimension in cm. Import Magics and define projection, dark background, light background, foreground and load dataWe will compose one image with 4 maps: - top left : temperature at 850 hPa and geopotential height at 500 hPa- top right : precipitation type- bottom left : simulated satellite image IR channel- bottom right : total precipitationFor start let's import Magics and define background, foreground and load data for our maps.
###Code
import Magics.macro as magics
projection = magics.mmap(
subpage_clipping = "on",
subpage_map_library_area = "on",
subpage_map_area_name = "central_europe",
page_id_line = "off"
)
light_background = magics.mcoast(
map_coastline_sea_shade_colour = 'white',
map_coastline_land_shade_colour = 'cream',
map_grid = 'off',
map_coastline_land_shade = 'on',
map_coastline_sea_shade = 'on',
map_label = 'off',
map_coastline_colour = 'tan')
dark_background = magics.mcoast(
map_coastline_colour = "grey",
map_coastline_resolution = "medium",
map_coastline_thickness = 2,
map_coastline_land_shade = "on",
map_coastline_land_shade_colour = "RGB(0.25,0.25,0.25)",
map_coastline_sea_shade = "on",
map_coastline_sea_shade_colour = "black",
map_grid_line_style = "dash",
map_grid_colour = "grey"
)
foreground = magics.mcoast()
# Different meteorological parameter we will plot in this notebook
temperature = magics.mgrib(grib_input_file_name = "../../data/t850.grib")
geopotential = magics.mgrib(grib_input_file_name = "../../data/z500.grib")
precipitation = magics.mgrib(grib_input_file_name = "../../data/total_precipitation.grib")
sat_ir = magics.mgrib(grib_input_file_name = "../../data/ssd.grib",
grib_automatic_scaling = "off")
ptype = magics.mgrib(grib_input_file_name = "../../data/ptype.grib")
###Output
_____no_output_____
###Markdown
Shading and symbol definitionsNow we define contours for all fields. Temperature - gradients method for shading (Mores shading examples you can find [in this notebook](Contours_shading.ipynb "Shading contours")) Geopotential height - automatic style from ecCharts (More examples of automatic styles you can find [in this notebook](Contours_automtatic.ipynb "Automatic contours"))Precipitation type - advanced symbol plotting (More examples of symbol plotting you can find in [simple symbol](Symbol_simple.ipynb "Symple symbol plotting") and [advanced symbol](Symbol_advanced.ipynb "Advanced symbol plotting") notebooks)Brightness temperature - cell shading Total precipitation - poligon shading with user defined list of level and colours
###Code
# Temperature shading
t_cont = magics.mcont(
legend = "on",
contour = "off",
contour_level_selection_type = "level_list",
contour_level_list = [-30.,-20.,0.,20.,30.],
contour_gradients_step_list = [5, 10 ,10 ,5],
contour_label = "off",
contour_shade = "on",
contour_shade_colour_method = "gradients",
contour_gradients_technique = "rgb",
contour_shade_method = "area_fill",
contour_gradients_colour_list = ["RGB(0.01961,0.251,0.4157)","greenish_blue","white",
"orangish_red","RGB(0.3756,0.06648,0.05582)"],
contour_gradients_waypoint_method = "ignore")
# Geopotential shading - default ECMWF style
ecmwf_cont = magics.mcont(
contour_automatic_setting = "ecmwf",
legend = "off")
# Symbol plotting for precipitation type
symbol_plotting = magics.msymb(
symbol_advanced_table_colour_method = "list",
symbol_advanced_table_height_method = "calculate",
symbol_advanced_table_height_min_value = 1.,
symbol_advanced_table_height_max_value = 1.,
symbol_type = "marker",
symbol_table_mode = "advanced",
symbol_marker_mode = "name",
symbol_advanced_table_selection_type = "list",
symbol_advanced_table_colour_list = ["green","red","blue","navy","turquoise","orange"],
symbol_advanced_table_level_list = [1.00,3.00,5.00,6.00,7.00,8.00,9.00],
symbol_advanced_table_marker_name_list = ["ww_60","ww_67","ww_70","ww_68","ww_69","ww_79"])
# Cell shading for Brightness temperature
ir_clouds = magics.mcont(
contour_shade_colour_direction = "anti_clockwise",
contour_shade_min_level_colour = "white",
contour_shade_max_level_colour = "black",
contour_shade_colour_method = "calculate",
legend = "on",
contour = "off",
contour_level_selection_type = "level_list",
contour_level_list = [170.,199.,201.,203.,205.,207,208,209,210,
211,212,213,214,215,216,217,218,219,220,
221,222,223,224,225,226,227,228,229,230,
231,232,233,234,235,236,237,238,239,240,
241,242,243,244,245,246,247,249,251,253,
255,257,259,261,263,265,267,269,271,273,
275,277,279,281,283,285,287,289,291,293,
295,297,299,301,303,305,307,310,315,320,350],
contour_shade = "on",
contour_shade_technique = "cell_shading",
contour_shade_cell_resolution = 80
)
# Precipitation shading
tp_cont = magics.mcont(
contour_level_selection_type = "level_list",
contour_level_list = [0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500],
contour_shade = "on",
contour_shade_method = "area_fill",
contour_shade_colour_method = "list",
contour_shade_colour_list = ['#ffffd9','#edf8b1','#c7e9b4',
'#7fcdbb','#41b6c4','#1d91c0',
'#225ea8','#253494','#081d58'],
contour = "off",
legend = "on"
)
###Output
_____no_output_____
###Markdown
LegendsLets define legends for our plots. All but one will be on the right side and we will use automatic box mode and let Magics put it in the right position. Only the symbol legend will be on top because it takes less space there.
###Code
# Defining legend for total precipitation
tp_legend = magics.mlegend (
legend_display_type = "continuous",
legend_automatic_position = "right",
legend_title = "on",
legend_title_text = "Total precipitation",
legend_text_font_size = "0.45",
legend_text_colour = "#2b619e",
legend_label_frequency = 1)
# Defining legend for temperature
temp_legend = magics.mlegend (
legend_display_type = "continuous",
legend_automatic_position = "right",
legend_title = "on",
legend_title_text = "Temperature at 850 hPa",
legend_text_font_size = "0.45",
legend_text_colour = "#2b619e",
legend_text_composition = "user_text_only",
legend_values_list = [-30.,-20.,-10.,0.,10.,20.,30.])
# Defninig legend for Simulated satelite image
bt_legend = magics.mlegend(
legend_display_type = "continuous",
legend_automatic_position = "right",
legend_title = "on",
legend_title_text = "Brightness temperature",
legend_text_font_size = "0.45",
legend_text_colour = "#2b619e",
legend_label_frequency = 4,
legend_user_minimum = "on",
legend_user_minimum_text = "< 200",
legend_user_maximum = "on",
legend_user_maximum_text = "> 320",
legend_entry_border = "off")
# Defninig legend for Precipitation type
symb_legend = magics.mlegend(
legend_user_lines = ["Rain","Freezing rain", "Snow", "Wet snow", "Sleet", "Ice pellets"],
legend_box_mode = "positional",
legend_text_composition = "user_text_only",
legend_text_colour = "#2b619e",
legend_text_font_size = 0.4,
legend_box_y_position = 11.1,
legend_box_x_position = -0.7,
legend_box_x_length = 15.00,
legend_box_y_length = 2.5)
###Output
_____no_output_____
###Markdown
Text definitionsFirst four texts are titles at automatic positions, and the last 2 are there as lables for explanation of page and subpage. As you can see, you can add HTML symbols in title too.
###Code
top_left_text = magics.mtext(text_lines = ["Temperature at 850 hPa and Geopotential height at 500 hPa"],
text_font_size = 0.55,
text_colour = "charcoal")
bottom_right_text = magics.mtext(text_lines = [" ☂ Total precipitation ☔ "],
text_colour = "charcoal",
text_font_size = 0.6)
bottom_left_text = magics.mtext(text_lines = ["Brightness temperature"],
text_colour = "charcoal",
text_font_size = 0.6)
top_right_text = magics.mtext(text_lines = [" ❄ Precipitation type ❅"],
text_colour = "charcoal",
text_font_size = 0.6)
additional_text_page = magics.mtext(
text_lines = ["← PAGE →"],
text_colour = "red",
text_font_size = 1.4,
text_mode = "positional",
text_box_x_position = 3.75,
text_box_y_position = 13.50)
additional_text_subpage = magics.mtext(
text_lines = ["← SUBPAGE →"],
text_colour = "red",
text_font_size = 1.2,
text_mode = "positional",
text_box_x_position = 3.25,
text_box_y_position = 4.50)
###Output
_____no_output_____
###Markdown
Finally, A bit of layoutIn order to put 4 maps on our final product we have to define size of the superpage (**super_page_x_length / super_page_y_length**) and positions and lenghts of 4 pages. Top left page will have a page frame in order to help you understand the superpage/page/subpage concept.
###Code
top_left = magics.page(
layout = 'positional',
page_x_length = 15.,
page_y_length = 15.,
page_frame = 'on',
page_id_line = 'off',
page_x_position = 0.,
page_y_position = 14.,
super_page_x_length = 30.00,
super_page_y_length = 30.00)
top_right = magics.page(
page_frame = 'off',
layout = 'positional',
page_x_length = 15.,
page_y_length = 15.,
page_id_line = 'off',
page_x_position = 14.7,
page_y_position = 14.
)
bottom_right = magics.page(
page_frame = 'off',
layout = 'positional',
page_x_length = 15.,
page_y_length = 15.,
page_id_line = 'off',
page_x_position = 14.7,
page_y_position = 0.5
)
bottom_left = magics.page(
page_frame = 'off',
layout = 'positional',
page_x_length = 15.,
page_y_length = 15.,
page_id_line = 'off',
page_x_position = 0.,
page_y_position = 0.5
)
###Output
_____no_output_____
###Markdown
The plot commandAll that is left is to put everything in the plot command. Each page has page definition, projection, data, shading for data, coastline, legend and text.
###Code
magics.plot(top_left, projection, temperature, t_cont, geopotential, ecmwf_cont, foreground, temp_legend,top_left_text,
additional_text_subpage, additional_text_page,
bottom_right, projection, light_background, precipitation, tp_cont, tp_legend, foreground, bottom_right_text,
bottom_left, projection, sat_ir, ir_clouds, foreground, bt_legend, bottom_left_text,
top_right, projection, dark_background, ptype, symbol_plotting, symb_legend, top_right_text)
###Output
_____no_output_____ |
_jupyter/2019-11-15-test.ipynb | ###Markdown
Testing for github
###Code
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# reading data
df=pd.read_csv("customers data_UCI.csv")
df.head(2)
plt.scatter(df["Fresh"], df["Milk"])
###Output
_____no_output_____ |
assignments/3/tests/3_2_trend-and-mean-strategies.ipynb | ###Markdown
Mustafa Kutay Yabas - EC581 - 29.10.2016 Assignment 3We will test two trading strategies based on return runs.- Trend Following Strategy - Buy after n days of positive return - Sell after m days of negative return- Mean Reversion Strategy - Buy after n days of negative return - Sell after m days of positive return Steps1. Apply two different trading algo for these strategies in quantstrat2. Apply these strategies to BIST100 index data3. For each strategy, optimize n and m using a grid search over set 1,2,...,1004. Compare optimized versions of these two strategies5. Is it better to be a trend-follower or a contrarian in BIST
###Code
# load libraries
library(quantstrat)
library(Quandl)
# define instruments
currency("USD")
stock("BIST", currency="USD", multiplier=1)
# get data
date_from = "2005-08-01"
date_to = "2016-05-25"
BIST<-Quandl("GOOG/INDEXIST_XU100", type="xts", start_date = date_from, end_date = date_to)
BIST<-na.omit(BIST)
BIST<-xts(coredata(BIST), as.POSIXct(time(BIST)))
# define strategy component names
portfolio_name = "investiphi"
strategy_trend = "trend_following"
strategy_mean = "mean_reversion"
account_trend = "account_trend"
account_mean = "account_mean"
# remove if defined before
rm.strat(portfolio_name)
rm.strat(strategy_trend)
rm.strat(strategy_mean)
rm.strat(account_trend)
rm.strat(account_mean)
# create .blotter and .strategy environments
.blotter<-new.env()
.strategy<-new.env()
# init portfolio and accoiunt in .blotter
init_eq <- 100000 # 100k
init_date <- as.character(as.Date(date_from) - 1)
initPortf(portfolio_name, symbols="BIST", initDate=init_date, currency="USD")
initAcct(account_trend, portfolios=portfolio_name, initDate=init_date, currency="USD", initEq = init_eq)
initAcct(account_mean, portfolios=portfolio_name, initDate=init_date, currency="USD", initEq = init_eq)
initOrders(portfolio_name, initDate=init_date)
# init strategies
strategy(strategy_trend, store=TRUE)
strategy(strategy_mean, store=TRUE)
# you can see whats inside
temp <- get("USD", envir=FinancialInstrument:::.instrument)
summary(temp)
consecutive_days<-function(days_pos,days_neg, stock, posneg = TRUE) {
#days_pos <- 4
#days_neg <- 4
#n_day_signals <- data.frame(positive = logical(length(time(stock))), negative = logical(length(time(stock))))
n_day_signals <- data.frame(sigcol = logical(length(time(stock))))
n_day_signals <- xts( n_day_signals, as.POSIXct(time(stock)) )
n_day_signals[1,1] <- NA
#Signal <- xts(c("Positive", "Negative"), as.POSIXct(time(BIST)))
sign_counter <- 1
sign_last <- -1
for (i in 2:length(time(stock))) {
sign_temp <- sign( as.numeric ( as.numeric( stock[i,4]) - as.numeric( stock[i-1,4]) ) )
if (sign_temp == sign_last) {
sign_counter <- sign_counter + 1
} else {
sign_counter <- 1
sign_last <- sign_temp
}
if (posneg) {
if (sign_counter == days_pos && sign_last == 1) {
n_day_signals[i,1] <- TRUE
} else {
n_day_signals[i,1] <- NA
}
} else {
if (sign_counter == days_neg && sign_last == -1) {
n_day_signals[i,1] <- TRUE
} else {
n_day_signals[i,1] <- NA
}
}
}
if (posneg == TRUE) {
return( n_day_signals$sigcol)
} else {
return(n_day_signals$sigcol)
}
}
add.signal(strategy_mean, name="consecutive_days",
arguments = list(days_pos = 4, days_neg = 4, stock=BIST, posneg=TRUE),
label="short"
)
add.signal(strategy_mean, name="consecutive_days",
arguments = list(days_pos = 4, days_neg = 4, stock=BIST, posneg=FALSE),
label="long"
)
add.signal(strategy_trend, name="consecutive_days",
arguments = list(days_pos = 4, days_neg = 4, stock=BIST, posneg=TRUE),
label="tlong"
)
add.signal(strategy_trend, name="consecutive_days",
arguments = list(days_pos = 4, days_neg = 4, stock=BIST, posneg=FALSE),
label="tshort"
)
order_qty = 1
# strategy_mean rules
add.rule(strategy_mean, name='ruleSignal',
arguments=list(sigcol='sigcol.short',
sigval=1,
orderside='short',
ordertype='market',
orderqty=-order_qty,
TxnFees=0,
replace=FALSE),
type='enter',
label='EnterShort'
)
add.rule(strategy_mean, name='ruleSignal',
arguments=list(sigcol='sigcol.long',
sigval=1,
orderside='long',
ordertype='market',
orderqty='all',
TxnFees=0,
replace=TRUE),
type='exit',
label='Exit2Long'
)
add.rule(strategy_mean, name='ruleSignal',
arguments=list(sigcol='sigcol.long',
sigval=TRUE,
orderside='long',
ordertype='market',
orderqty=order_qty,
TxnFees=0,
replace=FALSE),
type='enter',
label='EnterLong'
)
add.rule(strategy_mean, name='ruleSignal',
arguments=list(sigcol='sigcol.short',
sigval=TRUE,
orderside='short',
ordertype='market',
orderqty='all',
TxnFees=0,
replace=TRUE),
type='exit',
label='Exit2Short'
)
# strategy_trend rules
add.rule(strategy_trend, name='ruleSignal',
arguments=list(sigcol='sigcol.tshort',
sigval=1,
orderside='short',
ordertype='market',
orderqty=-order_qty,
TxnFees=0,
replace=FALSE),
type='enter',
label='EnterShort'
)
add.rule(strategy_trend, name='ruleSignal',
arguments=list(sigcol='sigcol.tlong',
sigval=1,
orderside='long',
ordertype='market',
orderqty='all',
TxnFees=0,
replace=TRUE),
type='exit',
label='Exit2Long'
)
add.rule(strategy_trend, name='ruleSignal',
arguments=list(sigcol='sigcol.tlong',
sigval=TRUE,
orderside='long',
ordertype='market',
orderqty=order_qty,
TxnFees=0,
replace=FALSE),
type='enter',
label='EnterLong'
)
add.rule(strategy_trend, name='ruleSignal',
arguments=list(sigcol='sigcol.tshort',
sigval=TRUE,
orderside='short',
ordertype='market',
orderqty='all',
TxnFees=0,
replace=TRUE),
type='exit',
label='Exit2Short'
)
#summary(get("mean_reversion", envir=.strategy))
# apply strategy
applyStrategy(strategy_mean, portfolio_name)
updatePortf(portfolio_name)
updateAcct(account_mean)
updateEndEq(account_mean)
applyStrategy(strategy_trend, portfolio_name)
updatePortf(portfolio_name)
updateAcct(account_trend)
updateEndEq(account_trend)
getEndEq(account_mean, date_to)
getEndEq(account_trend, date_to)
chart.Posn(portfolio_name, "BIST")
###Output
_____no_output_____ |
notebooks/Fairness - 210420.ipynb | ###Markdown
Fairness over different groupsHow well does the model do on different subsets of the test data?- 360 giving data predictions- 42 data predictions
###Code
import json
import pandas as pd
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
cd ..
from nutrition_labels.evaluate import merge_grants
###Output
/Users/gallaghe/Code/nutrition-labels/build/virtualenv/lib/python3.7/site-packages/gensim/similarities/__init__.py:15: UserWarning: The gensim.similarities.levenshtein submodule is disabled, because the optional Levenshtein package <https://pypi.org/project/python-Levenshtein/> is unavailable. Install Levenhstein (e.g. `pip install python-Levenshtein`) to suppress this warning.
warnings.warn(msg)
2021-04-29 11:30:31 wellcomeml.logger WARNING: If you want to use hdbscan you need to runpip3 install hdbscan --no-cache-dir --no-binary :all: --no-build-isolation Read more https://github.com/wellcometrust/WellcomeML/issues/197
###Markdown
Get grants data (title, description, year, organisation)
###Code
grant_data = pd.read_csv('data/raw/wellcome-grants-awarded-2005-2019.csv')
grant_data.drop_duplicates(subset=['Internal ID'], inplace=True)
len(grant_data)
###Output
_____no_output_____
###Markdown
Get test data
###Code
model_date = '210402'
# To get ground truth:
model_dir = f'models/{model_date}'
model_name = f'bert_log_reg_{model_date}' # it doesn't actually matter which model you choose, since all ground truth is the same
training_info_file = f'{model_dir}/training_information.json'
with open(training_info_file, 'r') as file:
for line in file:
model_data = json.loads(line)
model_name = list(model_data.keys())[0]
if model_name==model_name:
raw_test_data = [(grant_id, m['Truth']) for grant_id, m in model_data[model_name].items() if m['Test/train']=='Test']
raw_train_data = [(grant_id, m['Truth']) for grant_id, m in model_data[model_name].items() if m['Test/train']=='Train']
break
raw_test_data = pd.DataFrame(raw_test_data, columns = ['Reference', 'Truth'])
raw_train_data = pd.DataFrame(raw_train_data, columns = ['Reference', 'Truth'])
###Output
_____no_output_____
###Markdown
Merge with 360 giving predictions
###Code
# To get predictions:
predictions_date = '210406'
model_360_preds = pd.read_csv(f'data/processed/predictions/{predictions_date}/wellcome-grants-awarded-2005-2019_tagged.csv')
len(model_360_preds)
model_360_preds.rename({'Tech grant prediction': 'Tech grant 360 prediction', 'Grant ID': 'Grant ID 1'}, axis=1, inplace=True)
test_data = pd.merge(
raw_test_data,
model_360_preds.drop_duplicates(subset=['Grant ID 1']),
how="left",
left_on='Reference',
right_on='Grant ID 1'
)
test_data.head(2)
training_label_name = 'Truth'
test_data = merge_grants(
test_data,
grant_data,
'Grant ID 1',
'Internal ID',
training_label_name
)
len(test_data)
train_data = pd.merge(
raw_train_data,
model_360_preds.drop_duplicates(subset=['Grant ID 1']),
how="left",
left_on='Reference',
right_on='Grant ID 1'
)
print(len(train_data))
training_label_name = 'Truth'
train_data = merge_grants(
train_data,
grant_data,
'Grant ID 1',
'Internal ID',
training_label_name
)
print(len(train_data))
###Output
469
###Markdown
Merge with 42 predictions
###Code
# To get predictions:
predictions_date = '210403'
model_42_preds = pd.read_csv(f'data/processed/predictions/{predictions_date}/all_grants_fortytwo_info_210420_tagged.csv')
len(model_42_preds)
model_42_preds.rename({'Tech grant prediction': 'Tech grant 42 prediction', 'Grant ID': 'Grant ID 2'}, axis=1, inplace=True)
test_data = pd.merge(
test_data,
model_42_preds.drop_duplicates(subset=['Grant ID 2']),
how="left",
left_on='Reference',
right_on='Grant ID 2'
)
len(test_data)
all(test_data['Tech grant 360 prediction']== test_data['Tech grant 42 prediction'])
###Output
_____no_output_____
###Markdown
Evaluate fairnessAll the predictions are the same, so the fairness results will be the same for both
###Code
# Found by manually looking at the list in the test data (so might not be conclusive!)
golden_triangle = [
'University College London', 'Imperial College London', "King's College London",
'University of Oxford',
'University of Cambridge',
'Exeter College Oxford'
]
def group_data_cols(test_data, golden_triangle):
# Golden triangle or not
test_data['Recipient organisation'] = ['Golden triangle' if org in golden_triangle else 'Not golden triangle' for org in test_data['Recipient Org:Name']]
# Greater london, international or not
region_grouped = []
for region in test_data['Region']:
if region == 'Greater London':
region_grouped.append('Greater London')
elif region == 'International':
region_grouped.append('International')
else:
region_grouped.append('UK, not greater London')
test_data['Region grouped'] = region_grouped
test_data['Recipient Org:Country grouped'] = ['UK' if g=='United Kingdom' else 'Not UK' for g in test_data['Recipient Org:Country']]
test_data['Financial Year grouped'] = [
'<2010' if int(g[0:4])<2010 else (
'2010-2015' if int(g[0:4])<2015 else (
'2015-2017' if int(g[0:4])<2017 else '>=2017')
) for g in test_data['Financial Year']]
test_data['Description length'] = test_data['Description'].agg(lambda x: len(x))
bins = [0,1000, 1250,1500, 2000, 3000, 4000]
test_data['Description length binned'] = pd.cut(test_data['Description length'], bins)
test_data['Title length'] = test_data['Title'].agg(lambda x: len(x))
bins = [0,250, 500,750, 1000, 2000]
test_data['Title length binned'] = pd.cut(test_data['Title length'], bins)
test_data["Title plus Description"] = test_data["Title"] + ' ' + test_data["Description"]
test_data["Title plus Description length"] = test_data["Title plus Description"].agg(lambda x: len(x))
bins = [0,1000, 1500, 2000, 3000, max(test_data["Title plus Description length"])]
test_data['Title plus Description length binned'] = pd.cut(test_data['Title plus Description length'], bins)
return test_data
test_data = group_data_cols(test_data, golden_triangle)
train_data = group_data_cols(train_data, golden_triangle)
data_types = [
'Recipient organisation',
'Region grouped',
'Recipient Org:Country grouped',
'Financial Year grouped',
'Title plus Description length binned',
]
def evaluate_data(data, pred_col):
y = data['Truth'].tolist()
y_predict = data[pred_col].tolist()
scores = {
'Sample size': len(data),
'accuracy': accuracy_score(y, y_predict),
'f1': f1_score(y, y_predict, average='binary'),
'precision_score': precision_score(y, y_predict, zero_division=0, average='binary'),
'recall_score': recall_score(y, y_predict, zero_division=0, average='binary')}
return scores
fairness_results = []
for column in data_types:
for pred_col in ['Tech grant 360 prediction']:
result = test_data.groupby(column).apply(lambda x: evaluate_data(x, pred_col)).to_dict()
for column_type, type_results in result.items():
this_test_data = test_data[test_data[column]==column_type]
column_results = {
'Prediction type': pred_col,
'Data type': column,
'Type': column_type,
'Train proportion in this class': sum(train_data[column]==column_type)/len(train_data),
'Test proportion true': sum(this_test_data['Truth']==1)/len(this_test_data)
}
for metric, value in type_results.items():
column_results[metric] = value
fairness_results.append(column_results)
fairness_results_df = pd.DataFrame(fairness_results).round(3)
fairness_results_df
fairness_results_df.to_csv(f'data/processed/fairness/fairness_results_{model_date}.csv')
###Output
_____no_output_____ |
Chapter_11/5_NN_Deep.ipynb | ###Markdown
๊น๊ณ ๋์ NN์ผ๋ก MNIST ํ์ตํ๊ธฐ ์ฌ์ฉํ ๋ชจ๋ ์ถ๊ฐ
###Code
import tensorflow as tf
import matplotlib.pyplot as plt
import random
###Output
/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
###Markdown
MNIST ๋ฐ์ดํฐ ๋ถ๋ฌ์ค๊ธฐ
###Code
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
###Output
WARNING:tensorflow:From <ipython-input-2-5c81c5f4c9da>:3: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From /anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Please write your own downloading logic.
WARNING:tensorflow:From /anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting MNIST_data/train-images-idx3-ubyte.gz
WARNING:tensorflow:From /anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting MNIST_data/train-labels-idx1-ubyte.gz
WARNING:tensorflow:From /anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.one_hot on tensors.
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz
WARNING:tensorflow:From /anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
###Markdown
์์ ์ ์
###Code
learning_rate = 0.001
training_epochs = 15
batch_size = 100
###Output
_____no_output_____
###Markdown
์
๋ ฅ๊ฐ placeholder ์ ์ธ
###Code
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
###Output
_____no_output_____
###Markdown
๊น๊ณ ๋์ Neural Network ๊ตฌ์ฑํ๊ธฐ**Xavier Initialization**์ฌ์ฉ5๊ฐ์ Layer๋ฅผ ์ฌ์ฉํ๋ Neural Network๊ตฌ์ฑ
###Code
W1 = tf.get_variable("W1", shape=[784, 512],
initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.random_normal([512]))
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
W2 = tf.get_variable("W2", shape=[512, 512],
initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.random_normal([512]))
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
W3 = tf.get_variable("W3", shape=[512, 512],
initializer=tf.contrib.layers.xavier_initializer())
b3 = tf.Variable(tf.random_normal([512]))
L3 = tf.nn.relu(tf.matmul(L2, W3) + b3)
W4 = tf.get_variable("W4", shape=[512, 512],
initializer=tf.contrib.layers.xavier_initializer())
b4 = tf.Variable(tf.random_normal([512]))
L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
W5 = tf.get_variable("W5", shape=[512, 10],
initializer=tf.contrib.layers.xavier_initializer())
b5 = tf.Variable(tf.random_normal([10]))
hypothesis = tf.matmul(L4, W5) + b5
###Output
_____no_output_____
###Markdown
์์คํจ์์ ์ต์ ํ ๋ฐฉ๋ฒ ์ ์
###Code
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=hypothesis, labels=Y)
)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
###Output
_____no_output_____
###Markdown
Session ์ด๊ธฐํ
###Code
sess = tf.Session()
sess.run(tf.global_variables_initializer())
###Output
_____no_output_____
###Markdown
๋ชจ๋ธ ํ์ต ์งํ
###Code
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys}
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print('Learning Finished!')
###Output
Epoch: 0001 cost = 0.291983139
Epoch: 0002 cost = 0.104170327
Epoch: 0003 cost = 0.070643487
Epoch: 0004 cost = 0.050214080
Epoch: 0005 cost = 0.040219263
Epoch: 0006 cost = 0.034975592
Epoch: 0007 cost = 0.030978311
Epoch: 0008 cost = 0.025430245
Epoch: 0009 cost = 0.026338585
Epoch: 0010 cost = 0.020523844
Epoch: 0011 cost = 0.017850943
Epoch: 0012 cost = 0.016786734
Epoch: 0013 cost = 0.016248527
Epoch: 0014 cost = 0.017074123
Epoch: 0015 cost = 0.011885058
Learning Finished!
###Markdown
๋ชจ๋ธ ํ
์คํธ ๋ฐ ์ ํ๋ ํ์ธ
###Code
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(
'Accuracy:',
sess.run(
accuracy, feed_dict={X: mnist.test.images, Y: mnist.test.labels}
)
)
###Output
Accuracy: 0.9731
###Markdown
**Xavier Initialization**๋ฅผ ์ฌ์ฉํด ๋ **Deep**ํ๊ฒ**Neural Network**๋ฅผ ๊ตฌ์ฑํ์์์๋ ๋ถ๊ตฌํ๊ณ ์ ํ๋๋์ด์ ๊ฒ์๊ธ์์ ์์ฑํ ๊ฒ๋ณด๋ค ๋ฎ๊ฒ ๋์๋ค.์ด๋ ์๋ง๋ **Overfitting**์ด ๋ฐ์ํ ์ํฉ์ผ๋ก ์ถ์ธก์ด ๋๋ค.**Overfitting**์ ๋ฐฉ์งํ๊ธฐ ์ํด **Drop out**์ด๋ผ๋ ๋ฐฉ๋ฒ์ ์ฌ์ฉํ ์ ์๋ค. ์์์ ์ ์ ์์ธกํ๊ธฐ
###Code
r = random.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r+1], 1)))
print(
"Prediction: ",
sess.run(
tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r+1]}
)
)
###Output
Label: [3]
Prediction: [3]
###Markdown
์์ธกํ ์ ์ ๊ทธ๋ฆฌ๊ธฐ
###Code
plt.imshow(
mnist.test.images[r:r + 1].reshape(28, 28),
cmap='Greys', interpolation='nearest'
)
plt.show()
###Output
_____no_output_____ |
python/10_Data Manipulation with Pandas - Part 1.ipynb | ###Markdown
----------------------------------------------------------CHAPTER 2 Dataset I/O
###Code
import pandas as pd
# File CSV
df_csv = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/sample_csv.csv")
print(df_csv.head(3)) # Menampilkan 3 data teratas
# File TSV
df_tsv = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/sample_tsv.tsv", sep='\t')
print(df_tsv.head(3)) # Menampilkan 3 data teratas
import pandas as pd
# File JSON
url = "https://storage.googleapis.com/dqlab-dataset/covid2019-api-herokuapp-v2.json"
df_json = pd.read_json(url)
print(df_json.head(10)) # Menampilkan 10 data teratas
import pandas as pd
# Baca file sample_csv.csv
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/sample_csv.csv")
# Tampilkan 3 data teratas
print("Tiga data teratas:\n", df.head(3))
# Tampilkan 3 data terbawah
print("Tiga data terbawah:\n", df.tail(3))
###Output
Tiga data teratas:
order_id order_date customer_id city province product_id \
0 1612339 2019-01-01 18055 Jakarta Selatan DKI Jakarta P0648
1 1612339 2019-01-01 18055 Jakarta Selatan DKI Jakarta P3826
2 1612339 2019-01-01 18055 Jakarta Selatan DKI Jakarta P1508
brand quantity item_price
0 BRAND_C 4 1934000
1 BRAND_V 8 604000
2 BRAND_G 12 747000
Tiga data terbawah:
order_id order_date customer_id city province product_id \
98 1612390 2019-01-01 12681 Makassar Sulawesi Selatan P3354
99 1612390 2019-01-01 12681 Makassar Sulawesi Selatan P3357
100 1612390 2019-01-01 12681 Makassar Sulawesi Selatan P0422
brand quantity item_price
98 BRAND_S 24 450000
99 BRAND_S 24 450000
100 BRAND_B 4 1325000
###Markdown
-------------------------------------------------------CHAPTER 3 Indexing, Slicing, dan Transforming
###Code
import pandas as pd
# Baca file TSV sample_tsv.tsv
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/sample_tsv.tsv", sep="\t")
# Index dari df
print("Index:",df.index)
# Column dari df
print("Columns:",df.columns)
import pandas as pd
# Baca file TSV sample_tsv.tsv
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/sample_tsv.tsv", sep="\t")
# Set multi index df
df_x = df.set_index(['order_date', 'city', 'customer_id'])
# Print nama dan level dari multi index
for name, level in zip(df_x.index.names, df_x.index.levels):
print(name,':',level)
import pandas as pd
# Baca file sample_tsv.tsv untuk 10 baris pertama saja
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/sample_tsv.tsv", sep="\t", nrows=10)
# Cetak data frame awal
print("Dataframe awal:\n", df)
# Set index baru
df.index = ["Pesanan ke-" + str(i) for i in range(1, 11)]
# Cetak data frame dengan index baru
print("Dataframe dengan index baru:\n", df)
import pandas as pd
# Baca file sample_tsv.tsv dan set lah index_col sesuai instruksi
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/sample_tsv.tsv", sep="\t", index_col=["order_date","order_id"])
# Cetak data frame untuk 8 data teratas
print("Dataframe:\n", df.head(8))
import pandas as pd
# Baca file sample_csv.csv
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/sample_csv.csv")
# Slice langsung berdasarkan kolom
df_slice = df.loc[(df["customer_id"] == "18055") &
(df["product_id"].isin(["P0029","P0040","P0041","P0116","P0117"]))
]
print("Slice langsung berdasarkan kolom:\n", df_slice)
import pandas as pd
# Baca file sample_csv.csv
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/sample_csv.csv")
# Set index dari df sesuai instruksi
df = df.set_index(["order_date","order_id","product_id"])
# Slice sesuai intruksi
df_slice = df.loc[("2019-01-01",1612339,["P2154","P2159"]),:]
print("Slice df:\n", df_slice)
import pandas as pd
# Baca file sample_csv.csv
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/sample_csv.csv")
# Tampilkan tipe data
print("Tipe data df:\n", df.dtypes)
# Ubah tipe data kolom order_date menjadi datetime
df["order_date"] = pd.to_datetime(df["order_date"])
# Tampilkan tipe data df setelah transformasi
print("\nTipe data df setelah transformasi:\n", df.dtypes)
import pandas as pd
# Baca file sample_csv.csv
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/sample_csv.csv")
# Tampilkan tipe data
print("Tipe data df:\n", df.dtypes)
# Ubah tipe data kolom quantity menjadi tipe data numerik float
df["quantity"] = pd.to_numeric(df["quantity"], downcast="float")
# Ubah tipe data kolom city menjadi tipe data category
df["city"] = df["city"].astype("category")
# Tampilkan tipe data df setelah transformasi
print("\nTipe data df setelah transformasi:\n", df.dtypes)
import pandas as pd
# Baca file sample_csv.csv
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/sample_csv.csv")
# Cetak 5 baris teratas kolom brand
print("Kolom brand awal:\n", df["brand"].head())
# Gunakan method apply untuk merubah isi kolom menjadi lower case
df["brand"] = df["brand"].apply(lambda x: x.lower())
# Cetak 5 baris teratas kolom brand
print("Kolom brand setelah apply:\n", df["brand"].head())
# Gunakan method map untuk mengambil kode brand yaitu karakter terakhirnya
df["brand"] = df["brand"].map(lambda x: x[-1])
# Cetak 5 baris teratas kolom brand
print("Kolom brand setelah map:\n", df["brand"].head())
import numpy as np
import pandas as pd
# number generator, set angka seed menjadi suatu angka, bisa semua angka, supaya hasil random nya selalu sama ketika kita run
np.random.seed(1234)
# create dataframe 3 baris dan 4 kolom dengan angka random
df_tr = pd.DataFrame(np.random.rand(3,4))
# Cetak dataframe
print("Dataframe:\n", df_tr)
# Cara 1 dengan tanpa define function awalnya, langsung pake fungsi anonymous lambda x
df_tr1 = df_tr.applymap(lambda x: x**2 + 3*x + 2)
print("\nDataframe - cara 1:\n", df_tr1)
# Cara 2 dengan define function
def qudratic_fun(x):
return x**2 + 3*x + 2
df_tr2 = df_tr.applymap(qudratic_fun)
print("\nDataframe - cara 2:\n", df_tr2)
###Output
Dataframe:
0 1 2 3
0 0.191519 0.622109 0.437728 0.785359
1 0.779976 0.272593 0.276464 0.801872
2 0.958139 0.875933 0.357817 0.500995
Dataframe - cara 1:
0 1 2 3
0 2.611238 4.253346 3.504789 4.972864
1 4.948290 2.892085 2.905825 5.048616
2 5.792449 5.395056 3.201485 3.753981
Dataframe - cara 2:
0 1 2 3
0 2.611238 4.253346 3.504789 4.972864
1 4.948290 2.892085 2.905825 5.048616
2 5.792449 5.395056 3.201485 3.753981
###Markdown
-------------------------------------------------------CHAPTER 4 Handling Missing Values
###Code
import pandas as pd
# Baca file "public data covid19 jhu csse eu.csv"
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/CHAPTER%204%20-%20missing%20value%20-%20public%20data%20covid19%20.csv")
# Cetak info dari df
print(df.info())
# Cetak jumlah missing value di setiap kolom
mv = df.isna().sum()
print("\nJumlah missing value per kolom:\n", mv)
import pandas as pd
# Baca file "public data covid19 jhu csse eu.csv"
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/CHAPTER%204%20-%20missing%20value%20-%20public%20data%20covid19%20.csv")
# Cetak ukuran awal dataframe
print("Ukuran awal df: %d baris, %d kolom." % df.shape)
# Drop kolom yang seluruhnya missing value dan cetak ukurannya
df = df.dropna(axis=1, how="all")
print("Ukuran df setelah buang kolom dengan seluruh data missing: %d baris, %d kolom." % df.shape)
# Drop baris jika ada satu saja data yang missing dan cetak ukurannya
df = df.dropna(axis=0, how="any")
print("Ukuran df setelah dibuang baris yang memiliki sekurangnya 1 missing value: %d baris, %d kolom." % df.shape)
import pandas as pd
# Baca file "public data covid19 jhu csse eu.csv"
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/CHAPTER%204%20-%20missing%20value%20-%20public%20data%20covid19%20.csv")
# Cetak unique value pada kolom province_state
print("Unique value awal:\n", df["province_state"].unique())
# Ganti missing value dengan string "unknown_province_state"
df["province_state"] = df["province_state"].fillna("unknown_province_state")
# Cetak kembali unique value pada kolom province_state
print("Unique value setelah fillna:\n", df["province_state"].unique())
import pandas as pd
# Baca file "https://storage.googleapis.com/dqlab-dataset/CHAPTER%204%20-%20missing%20value%20-%20public%20data%20covid19%20.csv"
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/CHAPTER%204%20-%20missing%20value%20-%20public%20data%20covid19%20.csv")
# Cetak nilai mean dan median awal
print("Awal: mean = %f, median = %f." % (df["active"].mean(), df["active"].median()))
# Isi missing value kolom active dengan median
df_median = df["active"].fillna(df["active"].median())
# Cetak nilai mean dan median awal setelah diisi dengan median
print("Fillna median: mean = %f, median = %f." % (df_median.mean(), df_median.median()))
# Isi missing value kolom active dengan mean
df_mean = df["active"].fillna(df["active"].mean())
# Cetak nilai mean dan median awal setelah diisi dengan mean
print("Fillna mean: mean = %f, median = %f." % (df_mean.mean(), df_mean.median()))
import numpy as np
import pandas as pd
# Data
ts = pd.Series({
"2020-01-01":9,
"2020-01-02":np.nan,
"2020-01-05":np.nan,
"2020-01-07":24,
"2020-01-10":np.nan,
"2020-01-12":np.nan,
"2020-01-15":33,
"2020-01-17":np.nan,
"2020-01-16":40,
"2020-01-20":45,
"2020-01-22":52,
"2020-01-25":75,
"2020-01-28":np.nan,
"2020-01-30":np.nan
})
# Isi missing value menggunakan interpolasi linier
ts = ts.interpolate()
# Cetak time series setelah interpolasi linier
print("Setelah diisi missing valuenya:\n", ts)
###Output
Setelah diisi missing valuenya:
2020-01-01 9.0
2020-01-02 14.0
2020-01-05 19.0
2020-01-07 24.0
2020-01-10 27.0
2020-01-12 30.0
2020-01-15 33.0
2020-01-17 36.5
2020-01-16 40.0
2020-01-20 45.0
2020-01-22 52.0
2020-01-25 75.0
2020-01-28 75.0
2020-01-30 75.0
dtype: float64
###Markdown
-------------------------------------------------------------CHAPTER 5
###Code
import pandas as pd
# 1. Baca dataset
print("[1] BACA DATASET")
df = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/retail_raw_test.csv", low_memory=False)
print(" Dataset:\n", df.head())
print(" Info:\n", df.info())
# 2. Ubah tipe data
print("\n[2] UBAH TIPE DATA")
df["customer_id"] = df["customer_id"].apply(lambda x: x.split("'")[1]).astype("int64")
df["quantity"] = df["quantity"].apply(lambda x: x.split("'")[1]).astype("int64")
df["item_price"] = df["item_price"].apply(lambda x: x.split("'")[1]).astype("int64")
print(" Tipe data:\n", df.dtypes)
# 3. Transform "product_value" supaya bentuknya seragam dengan format "PXXXX", assign ke kolom baru "product_id", dan drop kolom "product_value", jika terdapat nan gantilah dengan "unknown"
print("\n[3] TRANSFORM product_value MENJADI product_id")
# Buat fungsi
import math
def impute_product_value(val):
if math.isnan(val):
return "unknown"
else:
return 'P' + '{:0>4}'.format(str(val).split('.')[0])
# Buat kolom "product_id"
df["product_id"] = df["product_value"].apply(lambda x: impute_product_value(x))
# Hapus kolom "product_value"
df.drop(["product_value"], axis=1, inplace=True)
# Cetak 5 data teratas
print(df.head())
# 4. Tranform order_date menjadi value dengan format "YYYY-mm-dd"
print("\n[4] TRANSFORM order_date MENJADI FORMAT YYYY-mm-dd")
months_dict = {
"Jan":"01",
"Feb":"02",
"Mar":"03",
"Apr":"04",
"May":"05",
"Jun":"06",
"Jul":"07",
"Aug":"08",
"Sep":"09",
"Oct":"10",
"Nov":"11",
"Dec":"12"
}
df["order_date"] = pd.to_datetime(df["order_date"].apply(lambda x: str(x)[-4:] + "-" + months_dict[str(x)[:3]] + "-" + str(x)[4:7]))
print(" Tipe data:\n", df.dtypes)
# 5. Mengatasi data yang hilang di beberapa kolom
print("\n[5] HANDLING MISSING VALUE")
# Kolom "city" dan "province" masih memiliki missing value, nilai yang hilang di kedua kolom ini diisi saja dengan "unknown"
df[["city","province"]] = df[["city","province"]].fillna("unknown")
# Kolom brand juga masih memiliki missing value, Ganti value NaN menjadi "no_brand"
df["brand"] = df["brand"].fillna("no_brand")
# Cek apakah masih terdapat missing value di seluruh kolom
print(" Info:\n", df.info())
# 6. Membuat kolom baru "city/province" dengan menggabungkan kolom "city" dan kolom "province" dan delete kolom asalnya
print("\n[6] MEMBUAT KOLOM BARU city/province")
df["city/province"] = df["city"] + "/" + df["province"]
# drop kolom "city" dan "province" karena telah digabungkan
df.drop(["city","province"], axis=1, inplace=True)
# Cetak 5 data teratas
print(df.head())
# 7. Membuat hierarchical index yang terdiri dari kolom "city/province", "order_date", "customer_id", "order_id", "product_id"
print("\n[7] MEMBUAT HIERACHICAL INDEX")
df = df.set_index(["city/province","order_date","customer_id","order_id","product_id"])
# urutkanlah berdasarkan index yang baru
df = df.sort_index()
# Cetak 5 data teratas
print(df.head())
# 8. Membuat kolom "total_price" yang formula nya perkalian antara kolom "quantity" dan kolom "item_price"
print("\n[8] MEMBUAT KOLOM total_price")
df["total_price"] = df["quantity"] * df["item_price"]
# Cetak 5 data teratas
print(df.head())
# 9. Slice dataset agar hanya terdapat data bulan Januari 2019
print("\n[9] SLICE DATASET UNTUK BULAN JANUARI 2019 SAJA")
idx = pd.IndexSlice
df_jan2019 = df.loc[idx[:, "2019-01-01":"2019-01-31"], :]
print("Dataset akhir:\n", df_jan2019)
# END OF PROJECT
###Output
[1] BACA DATASET
Dataset:
order_id order_date customer_id city province brand \
0 1730350 Dec 11, 2019 '13447 Surakarta Jawa Tengah BRAND_F
1 1677490 Jul 31, 2019 '0 NaN NaN BRAND_F
2 1704211 Oct 18, 2019 '16128 Jakarta Pusat DKI Jakarta BRAND_H
3 1679695 Aug 07, 2019 '16225 Yogyakarta Yogyakarta BRAND_H
4 1679080 Aug 05, 2019 '0 NaN NaN BRAND_E
quantity item_price product_value
0 '24 '113000 1374.0
1 '1 '1164000 1370.0
2 '12 '747000 1679.0
3 '6 '590000 1708.0
4 '2 '740000 1201.0
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5000 entries, 0 to 4999
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 order_id 5000 non-null int64
1 order_date 5000 non-null object
2 customer_id 5000 non-null object
3 city 3802 non-null object
4 province 3802 non-null object
5 brand 4995 non-null object
6 quantity 5000 non-null object
7 item_price 5000 non-null object
8 product_value 4995 non-null float64
dtypes: float64(1), int64(1), object(7)
memory usage: 351.7+ KB
Info:
None
[2] UBAH TIPE DATA
Tipe data:
order_id int64
order_date object
customer_id int64
city object
province object
brand object
quantity int64
item_price int64
product_value float64
dtype: object
[3] TRANSFORM product_value MENJADI product_id
order_id order_date customer_id city province brand \
0 1730350 Dec 11, 2019 13447 Surakarta Jawa Tengah BRAND_F
1 1677490 Jul 31, 2019 0 NaN NaN BRAND_F
2 1704211 Oct 18, 2019 16128 Jakarta Pusat DKI Jakarta BRAND_H
3 1679695 Aug 07, 2019 16225 Yogyakarta Yogyakarta BRAND_H
4 1679080 Aug 05, 2019 0 NaN NaN BRAND_E
quantity item_price product_id
0 24 113000 P1374
1 1 1164000 P1370
2 12 747000 P1679
3 6 590000 P1708
4 2 740000 P1201
[4] TRANSFORM order_date MENJADI FORMAT YYYY-mm-dd
Tipe data:
order_id int64
order_date datetime64[ns]
customer_id int64
city object
province object
brand object
quantity int64
item_price int64
product_id object
dtype: object
[5] HANDLING MISSING VALUE
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5000 entries, 0 to 4999
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 order_id 5000 non-null int64
1 order_date 5000 non-null datetime64[ns]
2 customer_id 5000 non-null int64
3 city 5000 non-null object
4 province 5000 non-null object
5 brand 5000 non-null object
6 quantity 5000 non-null int64
7 item_price 5000 non-null int64
8 product_id 5000 non-null object
dtypes: datetime64[ns](1), int64(4), object(4)
memory usage: 351.7+ KB
Info:
None
[6] MEMBUAT KOLOM BARU city/province
order_id order_date customer_id brand quantity item_price product_id \
0 1730350 2019-12-11 13447 BRAND_F 24 113000 P1374
1 1677490 2019-07-31 0 BRAND_F 1 1164000 P1370
2 1704211 2019-10-18 16128 BRAND_H 12 747000 P1679
3 1679695 2019-08-07 16225 BRAND_H 6 590000 P1708
4 1679080 2019-08-05 0 BRAND_E 2 740000 P1201
city/province
0 Surakarta/Jawa Tengah
1 unknown/unknown
2 Jakarta Pusat/DKI Jakarta
3 Yogyakarta/Yogyakarta
4 unknown/unknown
[7] MEMBUAT HIERACHICAL INDEX
brand \
city/province order_date customer_id order_id product_id
Banda Aceh/Aceh 2019-04-17 12818 1642480 P1936 BRAND_K
2019-11-12 12360 1715116 P0758 BRAND_C
P3042 BRAND_R
2019-12-09 12374 1729036 P1660 BRAND_G
Bandar Lampung/Lampung 2019-01-15 12515 1619257 P0628 BRAND_C
quantity \
city/province order_date customer_id order_id product_id
Banda Aceh/Aceh 2019-04-17 12818 1642480 P1936 24
2019-11-12 12360 1715116 P0758 8
P3042 12
2019-12-09 12374 1729036 P1660 4
Bandar Lampung/Lampung 2019-01-15 12515 1619257 P0628 12
item_price
city/province order_date customer_id order_id product_id
Banda Aceh/Aceh 2019-04-17 12818 1642480 P1936 450000
2019-11-12 12360 1715116 P0758 695000
P3042 310000
2019-12-09 12374 1729036 P1660 2795000
Bandar Lampung/Lampung 2019-01-15 12515 1619257 P0628 695000
[8] MEMBUAT KOLOM total_price
brand \
city/province order_date customer_id order_id product_id
Banda Aceh/Aceh 2019-04-17 12818 1642480 P1936 BRAND_K
2019-11-12 12360 1715116 P0758 BRAND_C
P3042 BRAND_R
2019-12-09 12374 1729036 P1660 BRAND_G
Bandar Lampung/Lampung 2019-01-15 12515 1619257 P0628 BRAND_C
quantity \
city/province order_date customer_id order_id product_id
Banda Aceh/Aceh 2019-04-17 12818 1642480 P1936 24
2019-11-12 12360 1715116 P0758 8
P3042 12
2019-12-09 12374 1729036 P1660 4
Bandar Lampung/Lampung 2019-01-15 12515 1619257 P0628 12
item_price \
city/province order_date customer_id order_id product_id
Banda Aceh/Aceh 2019-04-17 12818 1642480 P1936 450000
2019-11-12 12360 1715116 P0758 695000
P3042 310000
2019-12-09 12374 1729036 P1660 2795000
Bandar Lampung/Lampung 2019-01-15 12515 1619257 P0628 695000
total_price
city/province order_date customer_id order_id product_id
Banda Aceh/Aceh 2019-04-17 12818 1642480 P1936 10800000
2019-11-12 12360 1715116 P0758 5560000
P3042 3720000
2019-12-09 12374 1729036 P1660 11180000
Bandar Lampung/Lampung 2019-01-15 12515 1619257 P0628 8340000
[9] SLICE DATASET UNTUK BULAN JANUARI 2019 SAJA
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.