code
stringlengths 2.5k
6.36M
| kind
stringclasses 2
values | parsed_code
stringlengths 0
404k
| quality_prob
float64 0
0.98
| learning_prob
float64 0.03
1
|
---|---|---|---|---|
## Iris Dataset
This is a well-known data set containing iris species and sepal and petal measurements.
Various plotting methods in Pandas: [visualization guide](http://pandas.pydata.org/pandas-docs/version/0.18.1/visualization.html)
```
import os
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
filepath = "datasets/iris_data.csv"
data = pd.read_csv(filepath)
data.head()
# Number of rows
print(f'Number of rows: {data.shape[0]}')
# Column names
print(f'\nColumn names: {data.columns.tolist()}')
# Data types
print(f'\nData types:\n{data.dtypes}')
# number of each species present
data.species.value_counts()
# mean, median, and quantiles and ranges (max-min) for each petal and sepal measurement
stats_df = data.describe()
stats_df.loc['range'] = stats_df.loc['max'] - stats_df.loc['min']
out_fields = ['mean','25%','50%','75%', 'range']
stats_df = stats_df.loc[out_fields]
stats_df.rename({'50%': 'median'}, inplace=True)
stats_df
# mean and median for each species
data.groupby('species').agg(['mean', 'median'])
# If certain fields need to be aggregated differently, we can do:
from pprint import pprint
agg_dict = {field: ['mean', 'median'] for field in data.columns if field != 'species'}
agg_dict['petal_length'] = 'max'
pprint(agg_dict)
data.groupby('species').agg(agg_dict)
import matplotlib.pyplot as plt
%matplotlib inline
# A simple scatter plot of sepal_length vs sepal_width
ax = plt.axes()
ax.scatter(data.sepal_length, data.sepal_width)
# Label the axes
ax.set(xlabel='Sepal Length (cm)',
ylabel='Sepal Width (cm)',
title='Sepal Length vs Width');
# Histogram of petal_length
ax = plt.axes()
ax.hist(data.petal_length, bins=25);
ax.set(xlabel='Petal Length (cm)',
ylabel='Frequency',
title='Distribution of Petal Lengths');
# Histogram of each feature together
import seaborn as sns
sns.set_context('notebook')
ax = data.plot.hist(bins=25, alpha=0.5)
ax.set_xlabel('Size (cm)')
ax.set_title('Distribution of each feature');
# Histogram of each feature separately
fig,axes=plt.subplots(figsize=(15,10))
axList = data.hist(bins=25,ax=axes)
for ax in axList.flatten():
if ax.is_last_row(): # Adding x label to last row
ax.set_xlabel('Size (cm)')
if ax.is_first_col(): # Adding y label to first column
ax.set_ylabel('Frequency')
plt.tight_layout()
# Boxplots of each feature separately
fig,ax=plt.subplots(figsize=(15,10))
data.boxplot(by='species',ax=ax)
plt.tight_layout();
# single boxplot where the features are separated in the x-axis and species are colored with different hues
plot_data = data.set_index('species').stack().to_frame().reset_index().rename(columns={0:'size', 'level_1':'measurement'})
print(plot_data.head())
sns.set_style('white')
sns.set_context('notebook')
sns.set_palette('dark')
f = plt.figure(figsize=(15,10))
sns.boxplot(x='measurement', y='size',
hue='species', data=plot_data);
# Pairplot to examine the correlation between each of the measurements
sns.set_context('talk')
sns.pairplot(data, hue='species');
```
|
github_jupyter
|
import os
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
filepath = "datasets/iris_data.csv"
data = pd.read_csv(filepath)
data.head()
# Number of rows
print(f'Number of rows: {data.shape[0]}')
# Column names
print(f'\nColumn names: {data.columns.tolist()}')
# Data types
print(f'\nData types:\n{data.dtypes}')
# number of each species present
data.species.value_counts()
# mean, median, and quantiles and ranges (max-min) for each petal and sepal measurement
stats_df = data.describe()
stats_df.loc['range'] = stats_df.loc['max'] - stats_df.loc['min']
out_fields = ['mean','25%','50%','75%', 'range']
stats_df = stats_df.loc[out_fields]
stats_df.rename({'50%': 'median'}, inplace=True)
stats_df
# mean and median for each species
data.groupby('species').agg(['mean', 'median'])
# If certain fields need to be aggregated differently, we can do:
from pprint import pprint
agg_dict = {field: ['mean', 'median'] for field in data.columns if field != 'species'}
agg_dict['petal_length'] = 'max'
pprint(agg_dict)
data.groupby('species').agg(agg_dict)
import matplotlib.pyplot as plt
%matplotlib inline
# A simple scatter plot of sepal_length vs sepal_width
ax = plt.axes()
ax.scatter(data.sepal_length, data.sepal_width)
# Label the axes
ax.set(xlabel='Sepal Length (cm)',
ylabel='Sepal Width (cm)',
title='Sepal Length vs Width');
# Histogram of petal_length
ax = plt.axes()
ax.hist(data.petal_length, bins=25);
ax.set(xlabel='Petal Length (cm)',
ylabel='Frequency',
title='Distribution of Petal Lengths');
# Histogram of each feature together
import seaborn as sns
sns.set_context('notebook')
ax = data.plot.hist(bins=25, alpha=0.5)
ax.set_xlabel('Size (cm)')
ax.set_title('Distribution of each feature');
# Histogram of each feature separately
fig,axes=plt.subplots(figsize=(15,10))
axList = data.hist(bins=25,ax=axes)
for ax in axList.flatten():
if ax.is_last_row(): # Adding x label to last row
ax.set_xlabel('Size (cm)')
if ax.is_first_col(): # Adding y label to first column
ax.set_ylabel('Frequency')
plt.tight_layout()
# Boxplots of each feature separately
fig,ax=plt.subplots(figsize=(15,10))
data.boxplot(by='species',ax=ax)
plt.tight_layout();
# single boxplot where the features are separated in the x-axis and species are colored with different hues
plot_data = data.set_index('species').stack().to_frame().reset_index().rename(columns={0:'size', 'level_1':'measurement'})
print(plot_data.head())
sns.set_style('white')
sns.set_context('notebook')
sns.set_palette('dark')
f = plt.figure(figsize=(15,10))
sns.boxplot(x='measurement', y='size',
hue='species', data=plot_data);
# Pairplot to examine the correlation between each of the measurements
sns.set_context('talk')
sns.pairplot(data, hue='species');
| 0.548915 | 0.882276 |
# Machine Learning at Scale, Part II
For this tutorial, we'll dig deeper into BIDMach's learning architecture. The examples so far have use convenience functions which assembled together a Data Source, Learner, Model, Updater and Mixin classes to make a trainable model. This time we'll separate out those components and see how they can be customized.
The dataset is from UCI and comprises Pubmed abstracts. It is about 7.3GB in text form. We'll compute an LDA topic model for this dataset.
First lets initialize BIDMach again.
```
import $exec.^.lib.bidmach_notebook_init
if (Mat.hasCUDA > 0) GPUmem
```
Check the GPU memory again, and make sure you dont have any dangling processes.
## Large-scale Topic Models
A **Topic model** is a representation of a Bag-Of-Words corpus as several factors or topics. Each topic should represent a theme that recurs in the corpus. Concretely, the output of the topic model will be an (ntopics x nfeatures) matrix we will call <code>tmodel</code>. Each row of that matrix represents a topic, and the elements of that row are word probabilities for the topic (i.e. the rows sum to 1). There is more about topic models <a href="http://en.wikipedia.org/wiki/Topic_model">here on wikipedia</a>.
The **element <code>tmodel(i,j)</code> holds the probability that word j belongs to topic i**. Later we will examine the topics directly and try to make sense of them.
Lets construct a learner with a files data source. Most model classes will accept a String argument, and assume it is a pattern for accessing a collection of files. To create the learner, we pass this pattern (which will be invoked with <string> format i) to enumerate one filename.
```
val mdir = "../data/uci/pubmed_parts/";
val (nn, opts) = LDA.learner(mdir+"part%02d.smat.lz4")
```
Note that this dataset is quite large, and isnt one of the ones loaded by <code>getdata.sh</code> in the <code>scripts</code> directory. You need to run the script <code>getpubmed.sh</code> separately (and plan a long walk or bike ride while you wait...).
This datasource uses just this sequence of files, and each matrix has 141043 rows. A number of options are listed below that control the files datasource. Most of these dont need to be set (you'll notice they're just set to their default values), but its useful to know about them for customizing data sources.
```
opts.nstart = 0; // Starting file number
opts.nend = 10; // Ending file number
opts.order = 0; // (0) sample order, 0=linear, 1=random
opts.lookahead = 2; // (2) number of prefetch threads
opts.featType = 1; // (1) feature type, 0=binary, 1=linear
// These are specific to SfilesSource:
opts.eltsPerSample = 400 // how many rows to allocate (non-zeros per sample)
```
We're ready to go. LDA is a popular topic model, described <a href="http://en.wikipedia.org/wiki/Latent_Dirichlet_allocation">here on wikipedia</a>.
We use a fast version of LDA which uses an incremental multiplicative update described by Hoffman, Blei and Bach
<a href="https://www.cs.princeton.edu/~blei/papers/HoffmanBleiBach2010b.pdf">here</a>
### Tuning Options
Add tuning options for minibatch size (say 100k), number of passes (4) and dimension (<code>dim = 256</code>).
```
opts.batchSize=20000
opts.npasses=2
opts.dim=256
```
You invoke the learner the same way as before. You can change the options above after each run to optimize performance.
```
nn.train
```
Each training run creates a <code>results</code> matrix which is essentially a graph of the log likelihood vs number of input samples. The first row is the likelihood values, the second is the corresponding number of input samples procesed. We can plot the results here:
```
plot(nn.results(1,?), nn.results(0,?))
```
## Evaluation
To evaluate the model, we save the model matrix itself, and also load a dictionary of the terms in the corpus.
```
val tmodel = FMat(nn.modelmat)
val dict = Dict(loadSBMat(mdir+"../pubmed.term.sbmat.lz4"))
```
The dictionary allows us to look up terms by their index, e.g. <code>dict(1000)</code>, by their string represenation <code>dict("book")</code>, and by matrices of these, e.g. <code>dict(ii)</code> where <code>ii</code> is an IMat. Try a few such queries to the dict here:
Next we evaluate the entropy of each dimension of the model. Recall that the entropy of a discrete probability distribution is $E = -\sum_{i=1}^n p_i \ln(p_i)$. The rows of the matrix are the topic probabilities.
Compute the entropies for each topic:
```
val ent = -(tmodel dotr ln(tmodel))
ent.t // put them in a horizontal line
```
Get the mean value (should be positive)
```
mean(ent)
```
Find the smallest and largest entropy topic indices (use maxi2 and mini2). Call them <code>elargest</code> and <code>esmallest</code>.
```
val (vlargest,elargest) = maxi2(ent)
val (vsmallest,esmallest) = mini2(ent)
```
Now we'll sort the probabilities within each topic to bring the highest probability terms to the beginning. We sort down (descending order) along dimension 2 (rows) to do this. <code>bestv</code> gets the sorted values and <code>besti</code> gets the sorted indices which are the feature indices.
```
val (bestp, besti) = sortdown2(tmodel,2)
```
Now examine the 100 strongest terms in each topic:
```
dict(besti(elargest,0->100))
dict(besti(esmallest,0->100))
```
Do you notice any difference in the coherence of these two topics?
> TODO: Fill in your answer here
By sorting the entropies, find the 2nd and 3rd smallest entropy topics. Give the top 100 terms in each topic below:
```
val (sent, ient) = sort2(ent)
// words for 2nd lowest entropy topic
dict(besti(ient(1),0->100))
// words for 3rd lowest entropy topic
dict(besti(ient(2),0->100))
```
## Running more topics
What would you expect to happen to the average topic entropy if you run fewer topics?
> TODO: answer here
Change the opts.dim argument above and try it. First note the entropy at dim = 256 below. Then run again with <code>dim=64</code> and put the new value below:
<table>
<tr>
<th>dim</th>
<th>mean entropy</th>
</tr>
<tr>
<td>64</td>
<td>...</td>
</tr>
<tr>
<td>256</td>
<td>...</td>
</tr>
</table>
|
github_jupyter
|
import $exec.^.lib.bidmach_notebook_init
if (Mat.hasCUDA > 0) GPUmem
val mdir = "../data/uci/pubmed_parts/";
val (nn, opts) = LDA.learner(mdir+"part%02d.smat.lz4")
opts.nstart = 0; // Starting file number
opts.nend = 10; // Ending file number
opts.order = 0; // (0) sample order, 0=linear, 1=random
opts.lookahead = 2; // (2) number of prefetch threads
opts.featType = 1; // (1) feature type, 0=binary, 1=linear
// These are specific to SfilesSource:
opts.eltsPerSample = 400 // how many rows to allocate (non-zeros per sample)
opts.batchSize=20000
opts.npasses=2
opts.dim=256
nn.train
plot(nn.results(1,?), nn.results(0,?))
val tmodel = FMat(nn.modelmat)
val dict = Dict(loadSBMat(mdir+"../pubmed.term.sbmat.lz4"))
val ent = -(tmodel dotr ln(tmodel))
ent.t // put them in a horizontal line
mean(ent)
val (vlargest,elargest) = maxi2(ent)
val (vsmallest,esmallest) = mini2(ent)
val (bestp, besti) = sortdown2(tmodel,2)
dict(besti(elargest,0->100))
dict(besti(esmallest,0->100))
val (sent, ient) = sort2(ent)
// words for 2nd lowest entropy topic
dict(besti(ient(1),0->100))
// words for 3rd lowest entropy topic
dict(besti(ient(2),0->100))
| 0.217088 | 0.982624 |
This exercise will require you to pull some data from the Qunadl API. Qaundl is currently the most widely used aggregator of financial market data.
As a first step, you will need to register a free account on the http://www.quandl.com website.
After you register, you will be provided with a unique API key, that you should store:
```
# Store the API key as a string - according to PEP8, constants are always named in all upper case
API_KEY = ''
```
Qaundl has a large number of data sources, but, unfortunately, most of them require a Premium subscription. Still, there are also a good number of free datasets.
For this mini project, we will focus on equities data from the Frankfurt Stock Exhange (FSE), which is available for free. We'll try and analyze the stock prices of a company called Carl Zeiss Meditec, which manufactures tools for eye examinations, as well as medical lasers for laser eye surgery: https://www.zeiss.com/meditec/int/home.html. The company is listed under the stock ticker AFX_X.
You can find the detailed Quandl API instructions here: https://docs.quandl.com/docs/time-series
While there is a dedicated Python package for connecting to the Quandl API, we would prefer that you use the *requests* package, which can be easily downloaded using *pip* or *conda*. You can find the documentation for the package here: http://docs.python-requests.org/en/master/
Finally, apart from the *requests* package, you are encouraged to not use any third party Python packages, such as *pandas*, and instead focus on what's available in the Python Standard Library (the *collections* module might come in handy: https://pymotw.com/3/collections/ ).
Also, since you won't have access to DataFrames, you are encouraged to us Python's native data structures - preferably dictionaries, though some questions can also be answered using lists.
You can read more on these data structures here: https://docs.python.org/3/tutorial/datastructures.html
Keep in mind that the JSON responses you will be getting from the API map almost one-to-one to Python's dictionaries. Unfortunately, they can be very nested, so make sure you read up on indexing dictionaries in the documentation provided above.
```
# First, import the relevant modules
import requests
# Now, call the Quandl API and pull out a small sample of the data (only one day) to get a glimpse
# into the JSON structure that will be returned
database_code = 'FSE'
dataset_code = 'AFX_X'
return_format = 'json'
url = f'https://www.quandl.com/api/v3/datasets/{database_code}/{dataset_code}/data.{return_format}'
params_1 = dict(api_key=API_KEY, start_date='2014-01-01', end_date='2014-01-02')
res = requests.get(url, params = params_1)
print(res.status_code)
# Inspect the JSON structure of the object you created, and take note of how nested it is,
# as well as the overall structure
json_data = res.json()
print(json_data)
```
These are your tasks for this mini project:
1. Collect data from the Franfurt Stock Exchange, for the ticker AFX_X, for the whole year 2017 (keep in mind that the date format is YYYY-MM-DD).
2. Convert the returned JSON object into a Python dictionary.
3. Calculate what the highest and lowest opening prices were for the stock in this period.
4. What was the largest change in any one day (based on High and Low price)?
5. What was the largest change between any two days (based on Closing Price)?
6. What was the average daily trading volume during this year?
7. (Optional) What was the median trading volume during this year. (Note: you may need to implement your own function for calculating the median.)
## 1. Collect data from the Franfurt Stock Exchange, for the ticker AFX_X, for the whole year 2017 (keep in mind that the date format is YYYY-MM-DD)
```
params_2017 = dict(api_key=API_KEY, start_date='2017-01-01', end_date='2017-12-31')
res = requests.get(url, params = params_2017)
json_2017 = res.json()
print(json_2017)
```
## 2. Convert the returned JSON object into a Python dictionary.
```
import json
dict_2017 = json.loads(res.text)
dict_2017 = dict_2017['dataset_data']
dict_2017
#Pull the data columns
import pandas as pd
data = dict_2017['data']
column_names = dict_2017['column_names']
print(list(enumerate(column_names)))
```
## 3. Calculate what the highest and lowest opening prices were for the stock in this period.
```
data[0][1]
highest_opening_price = data[0][1]
lowest_opening_price = data[0][1]
for row in data:
open_price = row[1]
if open_price:
highest_opening_price = max(open_price,highest_opening_price)
lowest_opening_price = min(open_price, lowest_opening_price)
print('The highest opening price for the stock was: $' + '{:,.2f}'.format(highest_opening_price))
print('The highest opening price for the stock was: $' + '{:,.2f}'.format(lowest_opening_price))
```
## 4.What was the largest change in any one day (based on High and Low price)?
```
data[1]
#largest_change = 0
largest_change = data[0][2] - data[0][3]
for row in data:
change = row[2] - row[3]
if change >= largest_change:
largest_change = change
print('The largest stock price change on any given day was: $' + '{:,.2f}'.format(largest_change))
```
## 5.What was the largest change between any two days (based on Closing Price)?
```
closing_price_day_1 = data[0][4]
closing_price_day_2 = data[1][4]
largest_change_close = abs(closing_price_day_2 - closing_price_day_1)
for i, row in enumerate(data):
if (i%2 == 0):
day_1_close = row[4]
if i == 0:
i +=1
else:
change_close = abs(day_2_close - day_1_close)
if change_close >= largest_change_close:
largest_change_close = change_close
i += 1
else:
largest_change_close
i += 1
largest_change_close
else:
day_2_close = row[4]
change_close = abs(day_2_close - day_1_close)
if change_close >= largest_change_close:
largest_change_close = change_close
i += 1
else:
largest_change_close
i += 1
largest_change_close
print('The largest change in closing price between any 2 days was on: $' + '{:,.2f}'.format(largest_change_close))
```
## 6. What was the average daily trading volume during this year?
```
n_rows = len(data)
#print(n_rows)
average_trading_volume = 0
for row in data:
average_trading_volume += row[6]
print('The average daily trading volume during this year was: $' + '{:,.2f}'.format(average_trading_volume/n_rows))
```
## 7. (Optional) What was the median trading volume during this year. (Note: you may need to implement your own function for calculating the median.)
```
volume = []
for row in data:
volume.append(row[6])
def median(data_list):
sorted_data_list = sorted(data_list)
n_rows = len(data_list)
for index in data:
if (n_rows % 2):
index = n_rows // 2
return sorted_data_list[index]
else:
index = (n_rows - 1) // 2
return (sorted_data_list[index] + sorted_data_list[index + 1])/2.0
print('The median trading volume during this year was: $' + '{:,.2f}'.format(median(volume)))
```
|
github_jupyter
|
# Store the API key as a string - according to PEP8, constants are always named in all upper case
API_KEY = ''
# First, import the relevant modules
import requests
# Now, call the Quandl API and pull out a small sample of the data (only one day) to get a glimpse
# into the JSON structure that will be returned
database_code = 'FSE'
dataset_code = 'AFX_X'
return_format = 'json'
url = f'https://www.quandl.com/api/v3/datasets/{database_code}/{dataset_code}/data.{return_format}'
params_1 = dict(api_key=API_KEY, start_date='2014-01-01', end_date='2014-01-02')
res = requests.get(url, params = params_1)
print(res.status_code)
# Inspect the JSON structure of the object you created, and take note of how nested it is,
# as well as the overall structure
json_data = res.json()
print(json_data)
params_2017 = dict(api_key=API_KEY, start_date='2017-01-01', end_date='2017-12-31')
res = requests.get(url, params = params_2017)
json_2017 = res.json()
print(json_2017)
import json
dict_2017 = json.loads(res.text)
dict_2017 = dict_2017['dataset_data']
dict_2017
#Pull the data columns
import pandas as pd
data = dict_2017['data']
column_names = dict_2017['column_names']
print(list(enumerate(column_names)))
data[0][1]
highest_opening_price = data[0][1]
lowest_opening_price = data[0][1]
for row in data:
open_price = row[1]
if open_price:
highest_opening_price = max(open_price,highest_opening_price)
lowest_opening_price = min(open_price, lowest_opening_price)
print('The highest opening price for the stock was: $' + '{:,.2f}'.format(highest_opening_price))
print('The highest opening price for the stock was: $' + '{:,.2f}'.format(lowest_opening_price))
data[1]
#largest_change = 0
largest_change = data[0][2] - data[0][3]
for row in data:
change = row[2] - row[3]
if change >= largest_change:
largest_change = change
print('The largest stock price change on any given day was: $' + '{:,.2f}'.format(largest_change))
closing_price_day_1 = data[0][4]
closing_price_day_2 = data[1][4]
largest_change_close = abs(closing_price_day_2 - closing_price_day_1)
for i, row in enumerate(data):
if (i%2 == 0):
day_1_close = row[4]
if i == 0:
i +=1
else:
change_close = abs(day_2_close - day_1_close)
if change_close >= largest_change_close:
largest_change_close = change_close
i += 1
else:
largest_change_close
i += 1
largest_change_close
else:
day_2_close = row[4]
change_close = abs(day_2_close - day_1_close)
if change_close >= largest_change_close:
largest_change_close = change_close
i += 1
else:
largest_change_close
i += 1
largest_change_close
print('The largest change in closing price between any 2 days was on: $' + '{:,.2f}'.format(largest_change_close))
n_rows = len(data)
#print(n_rows)
average_trading_volume = 0
for row in data:
average_trading_volume += row[6]
print('The average daily trading volume during this year was: $' + '{:,.2f}'.format(average_trading_volume/n_rows))
volume = []
for row in data:
volume.append(row[6])
def median(data_list):
sorted_data_list = sorted(data_list)
n_rows = len(data_list)
for index in data:
if (n_rows % 2):
index = n_rows // 2
return sorted_data_list[index]
else:
index = (n_rows - 1) // 2
return (sorted_data_list[index] + sorted_data_list[index + 1])/2.0
print('The median trading volume during this year was: $' + '{:,.2f}'.format(median(volume)))
| 0.267026 | 0.941331 |
```
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import pandas as pd
import scipy as sp
import seaborn as sns
pd.set_option('display.max_columns', None)
# Input Paths
# HEMNET_PREDICTIONS_PATH = Path('/gpfs1/scratch/90days/s4436005/TCGA/03_08_20_v2/Slide_Predictions.csv')
# TUMOUR_PURITY_PATH = Path('/gpfs1/scratch/90days/s4436005/TCGA/COAD_tumor_purity.txt')
# TP53_PATH = Path('/QRISdata/Q1139/ST_Projects/HEMnet/HEMnet_Data/TCGA/COAD/samples_with_TP53.txt'
CLINICAL_PATH = Path('/QRISdata/Q1139/ST_Projects/HEMnet/HEMnet_Data/TCGA/COAD/COADREAD_clin_all.proc.txt')
HEMNET_TCGA_PATH = Path('/QRISdata/Q1139/ST_Projects/HEMnet/Figure_Data/TCGA_Validation/hemnet_tcga_combined.csv')
OUTPUT_PATH = Path('/QRISdata/Q1139/ST_Projects/HEMnet/Figure_Data/TCGA_Validation/20210619')
VERBOSE = True
#Verbose functions
if VERBOSE:
verbose_print = lambda *args: print(*args)
verbose_save_img = lambda img, path, img_type: img.save(path, img_type)
verbose_save_fig = lambda fig, path, dpi=300: fig.savefig(path, dpi=dpi, bbox_inches = 'tight')
else:
verbose_print = lambda *args: None
verbose_save_img = lambda *args: None
verbose_save_fig = lambda *args: None
# Load HEMnet predictions and sequencing estimates of tumour purity
# hemnet_preds = pd.read_csv(HEMNET_PREDICTIONS_PATH, index_col = 0)
# tumour_purity = pd.read_csv(TUMOUR_PURITY_PATH, sep = '\t')
# tp53 = pd.read_csv(TP53_PATH, sep = '\t', header=None)
clinical = pd.read_csv(CLINICAL_PATH, sep='\t')
hemnet_tcga = pd.read_csv(HEMNET_TCGA_PATH, index_col=0)
hemnet_tcga
clinical
#Get sample ID
clinical['ID'] = clinical['sample'].str.replace(r'TCGA-', '')
clinical = clinical.reset_index()
clinical = clinical.set_index('ID')
clinical.head()
hemnet_tcga_clinical = pd.concat([hemnet_tcga, clinical], join='inner', axis=1)
hemnet_tcga_clinical
hemnet_tcga_clinical.to_csv(OUTPUT_PATH.joinpath('hemnet_tcga_clinical.csv'))
hemnet_tcga_clinical.groupby('ClinicalStage')['sample'].nunique()
hemnet_tcga_clinical.groupby('MSIstatus')['sample'].nunique()
hemnet_tcga_clinical.groupby('CMS-RFclassifier')['sample'].nunique()
hemnet_tcga_clinical[hemnet_tcga_clinical['ClinicalStage']==1.0]
combined = hemnet_tcga_clinical
# Label data points by Clinical Status
fig, ax = plt.subplots(figsize = (7,7))
x_column = 'ABSOLUTE'
y_column = 'Cancer_Area_Proportion'
# combined_filtered = combined[[x_column, y_column, 'Mutation_Type']].dropna()
combined_filtered = combined[[x_column, y_column, 'ClinicalStage']].dropna()
x_values = combined_filtered[x_column].values
y_values = combined_filtered[y_column].values
# Classify mutation type impacts
# combined_filtered['ClinicalStage'] = combined_filtered['ClinicalStage'].replace({
# 1.0: 'Stage 1',
# 2.0: 'Stage 2',
# 3.0: 'Stage 3',
# 4.0: 'Stage 4'
# })
# sns.scatterplot(x='ABSOLUTE', y='Cancer_Area_Proportion', hue='ClinicalStage',
# hue_order=['Stage 1', 'Stage 2', 'Stage 3', 'Stage 4'],
# data = combined_filtered, s=75, legend='full', ax = ax)
sns.scatterplot(x='ABSOLUTE', y='Cancer_Area_Proportion', hue='ClinicalStage',
data = combined_filtered, s=75, legend='full', ax = ax)
ax.legend(title='Clinical Stage')
# plt.legend(bbox_to_anchor=(0.4, 0.91), borderaxespad=0)
plt.legend(bbox_to_anchor=(0.3, 0.91), borderaxespad=0)
linreg = sp.stats.linregress(x_values, y_values)
ax.plot(np.linspace(0,10, 50), linreg.intercept + linreg.slope * np.linspace(0,10, 50), color='grey')
ax.text(0.02 , 1.05 , f'Slope={linreg.slope :.2f} Intercept={linreg.intercept :.2f} $R^2$={linreg.rvalue :.2f} p={linreg.pvalue :.3f}'
, fontsize = 15)
ax.set_xlabel(f'Tumour Purity estimated by {x_column} method', fontsize = 15)
ax.set_ylabel('Cancer Area Proportion', fontsize = 15)
ax.set_xlim(0,1)
ax.set_ylim(0,1.10)
plt.rcParams['svg.fonttype'] = 'none'
verbose_save_fig(fig, OUTPUT_PATH.joinpath('HEMnet_vs_ABSOLUTE_scatterplot_clinical_stage.svg'))
# Label data points by Clinical Status
fig, ax = plt.subplots(figsize = (7,7))
x_column = 'ABSOLUTE'
y_column = 'Cancer_Area_Proportion'
# combined_filtered = combined[[x_column, y_column, 'Mutation_Type']].dropna()
combined_filtered = combined[[x_column, y_column, 'MSIstatus']].dropna()
x_values = combined_filtered[x_column].values
y_values = combined_filtered[y_column].values
# # Classify mutation type impacts
# combined_filtered['ClinicalStage'] = combined_filtered['ClinicalStage'].replace({
# 1.0: 'Stage 1',
# 2.0: 'Stage 2',
# 3.0: 'Stage 3',
# 4.0: 'Stage 4'
# })
sns.scatterplot(x='ABSOLUTE', y='Cancer_Area_Proportion', hue='MSIstatus',
data = combined_filtered, s=75, legend='full', ax = ax)
ax.legend(title='MSI Status')
# plt.legend(bbox_to_anchor=(0.4, 0.91), borderaxespad=0)
plt.legend(bbox_to_anchor=(0.3, 0.91), borderaxespad=0)
linreg = sp.stats.linregress(x_values, y_values)
ax.plot(np.linspace(0,10, 50), linreg.intercept + linreg.slope * np.linspace(0,10, 50), color='grey')
ax.text(0.02 , 1.05 , f'Slope={linreg.slope :.2f} Intercept={linreg.intercept :.2f} $R^2$={linreg.rvalue :.2f} p={linreg.pvalue :.3f}'
, fontsize = 15)
ax.set_xlabel(f'Tumour Purity estimated by {x_column} method', fontsize = 15)
ax.set_ylabel('Cancer Area Proportion', fontsize = 15)
ax.set_xlim(0,1)
ax.set_ylim(0,1.10)
plt.rcParams['svg.fonttype'] = 'none'
verbose_save_fig(fig, OUTPUT_PATH.joinpath('HEMnet_vs_ABSOLUTE_scatterplot_MSI_status.svg'))
# Label data points by Clinical Status
fig, ax = plt.subplots(figsize = (7,7))
x_column = 'ABSOLUTE'
y_column = 'Cancer_Area_Proportion'
# combined_filtered = combined[[x_column, y_column, 'Mutation_Type']].dropna()
combined_filtered = combined[[x_column, y_column, 'CMS-RFclassifier']].dropna()
x_values = combined_filtered[x_column].values
y_values = combined_filtered[y_column].values
# # Classify mutation type impacts
# combined_filtered['ClinicalStage'] = combined_filtered['ClinicalStage'].replace({
# 1.0: 'Stage 1',
# 2.0: 'Stage 2',
# 3.0: 'Stage 3',
# 4.0: 'Stage 4'
# })
sns.scatterplot(x='ABSOLUTE', y='Cancer_Area_Proportion', hue='CMS-RFclassifier',
hue_order=['CMS1', 'CMS2', 'CMS3', 'CMS4', 'NOLBL'],
data = combined_filtered, s=75, legend='full', ax = ax)
ax.legend(title='CMS-RFclassifier')
# plt.legend(bbox_to_anchor=(0.4, 0.91), borderaxespad=0)
plt.legend(bbox_to_anchor=(0.3, 0.91), borderaxespad=0)
linreg = sp.stats.linregress(x_values, y_values)
ax.plot(np.linspace(0,10, 50), linreg.intercept + linreg.slope * np.linspace(0,10, 50), color='grey')
ax.text(0.02 , 1.05 , f'Slope={linreg.slope :.2f} Intercept={linreg.intercept :.2f} $R^2$={linreg.rvalue :.2f} p={linreg.pvalue :.3f}'
, fontsize = 15)
ax.set_xlabel(f'Tumour Purity estimated by {x_column} method', fontsize = 15)
ax.set_ylabel('Cancer Area Proportion', fontsize = 15)
ax.set_xlim(0,1)
ax.set_ylim(0,1.10)
plt.rcParams['svg.fonttype'] = 'none'
verbose_save_fig(fig, OUTPUT_PATH.joinpath('HEMnet_vs_ABSOLUTE_scatterplot_CMS-RFclassifier.svg'))
```
|
github_jupyter
|
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import pandas as pd
import scipy as sp
import seaborn as sns
pd.set_option('display.max_columns', None)
# Input Paths
# HEMNET_PREDICTIONS_PATH = Path('/gpfs1/scratch/90days/s4436005/TCGA/03_08_20_v2/Slide_Predictions.csv')
# TUMOUR_PURITY_PATH = Path('/gpfs1/scratch/90days/s4436005/TCGA/COAD_tumor_purity.txt')
# TP53_PATH = Path('/QRISdata/Q1139/ST_Projects/HEMnet/HEMnet_Data/TCGA/COAD/samples_with_TP53.txt'
CLINICAL_PATH = Path('/QRISdata/Q1139/ST_Projects/HEMnet/HEMnet_Data/TCGA/COAD/COADREAD_clin_all.proc.txt')
HEMNET_TCGA_PATH = Path('/QRISdata/Q1139/ST_Projects/HEMnet/Figure_Data/TCGA_Validation/hemnet_tcga_combined.csv')
OUTPUT_PATH = Path('/QRISdata/Q1139/ST_Projects/HEMnet/Figure_Data/TCGA_Validation/20210619')
VERBOSE = True
#Verbose functions
if VERBOSE:
verbose_print = lambda *args: print(*args)
verbose_save_img = lambda img, path, img_type: img.save(path, img_type)
verbose_save_fig = lambda fig, path, dpi=300: fig.savefig(path, dpi=dpi, bbox_inches = 'tight')
else:
verbose_print = lambda *args: None
verbose_save_img = lambda *args: None
verbose_save_fig = lambda *args: None
# Load HEMnet predictions and sequencing estimates of tumour purity
# hemnet_preds = pd.read_csv(HEMNET_PREDICTIONS_PATH, index_col = 0)
# tumour_purity = pd.read_csv(TUMOUR_PURITY_PATH, sep = '\t')
# tp53 = pd.read_csv(TP53_PATH, sep = '\t', header=None)
clinical = pd.read_csv(CLINICAL_PATH, sep='\t')
hemnet_tcga = pd.read_csv(HEMNET_TCGA_PATH, index_col=0)
hemnet_tcga
clinical
#Get sample ID
clinical['ID'] = clinical['sample'].str.replace(r'TCGA-', '')
clinical = clinical.reset_index()
clinical = clinical.set_index('ID')
clinical.head()
hemnet_tcga_clinical = pd.concat([hemnet_tcga, clinical], join='inner', axis=1)
hemnet_tcga_clinical
hemnet_tcga_clinical.to_csv(OUTPUT_PATH.joinpath('hemnet_tcga_clinical.csv'))
hemnet_tcga_clinical.groupby('ClinicalStage')['sample'].nunique()
hemnet_tcga_clinical.groupby('MSIstatus')['sample'].nunique()
hemnet_tcga_clinical.groupby('CMS-RFclassifier')['sample'].nunique()
hemnet_tcga_clinical[hemnet_tcga_clinical['ClinicalStage']==1.0]
combined = hemnet_tcga_clinical
# Label data points by Clinical Status
fig, ax = plt.subplots(figsize = (7,7))
x_column = 'ABSOLUTE'
y_column = 'Cancer_Area_Proportion'
# combined_filtered = combined[[x_column, y_column, 'Mutation_Type']].dropna()
combined_filtered = combined[[x_column, y_column, 'ClinicalStage']].dropna()
x_values = combined_filtered[x_column].values
y_values = combined_filtered[y_column].values
# Classify mutation type impacts
# combined_filtered['ClinicalStage'] = combined_filtered['ClinicalStage'].replace({
# 1.0: 'Stage 1',
# 2.0: 'Stage 2',
# 3.0: 'Stage 3',
# 4.0: 'Stage 4'
# })
# sns.scatterplot(x='ABSOLUTE', y='Cancer_Area_Proportion', hue='ClinicalStage',
# hue_order=['Stage 1', 'Stage 2', 'Stage 3', 'Stage 4'],
# data = combined_filtered, s=75, legend='full', ax = ax)
sns.scatterplot(x='ABSOLUTE', y='Cancer_Area_Proportion', hue='ClinicalStage',
data = combined_filtered, s=75, legend='full', ax = ax)
ax.legend(title='Clinical Stage')
# plt.legend(bbox_to_anchor=(0.4, 0.91), borderaxespad=0)
plt.legend(bbox_to_anchor=(0.3, 0.91), borderaxespad=0)
linreg = sp.stats.linregress(x_values, y_values)
ax.plot(np.linspace(0,10, 50), linreg.intercept + linreg.slope * np.linspace(0,10, 50), color='grey')
ax.text(0.02 , 1.05 , f'Slope={linreg.slope :.2f} Intercept={linreg.intercept :.2f} $R^2$={linreg.rvalue :.2f} p={linreg.pvalue :.3f}'
, fontsize = 15)
ax.set_xlabel(f'Tumour Purity estimated by {x_column} method', fontsize = 15)
ax.set_ylabel('Cancer Area Proportion', fontsize = 15)
ax.set_xlim(0,1)
ax.set_ylim(0,1.10)
plt.rcParams['svg.fonttype'] = 'none'
verbose_save_fig(fig, OUTPUT_PATH.joinpath('HEMnet_vs_ABSOLUTE_scatterplot_clinical_stage.svg'))
# Label data points by Clinical Status
fig, ax = plt.subplots(figsize = (7,7))
x_column = 'ABSOLUTE'
y_column = 'Cancer_Area_Proportion'
# combined_filtered = combined[[x_column, y_column, 'Mutation_Type']].dropna()
combined_filtered = combined[[x_column, y_column, 'MSIstatus']].dropna()
x_values = combined_filtered[x_column].values
y_values = combined_filtered[y_column].values
# # Classify mutation type impacts
# combined_filtered['ClinicalStage'] = combined_filtered['ClinicalStage'].replace({
# 1.0: 'Stage 1',
# 2.0: 'Stage 2',
# 3.0: 'Stage 3',
# 4.0: 'Stage 4'
# })
sns.scatterplot(x='ABSOLUTE', y='Cancer_Area_Proportion', hue='MSIstatus',
data = combined_filtered, s=75, legend='full', ax = ax)
ax.legend(title='MSI Status')
# plt.legend(bbox_to_anchor=(0.4, 0.91), borderaxespad=0)
plt.legend(bbox_to_anchor=(0.3, 0.91), borderaxespad=0)
linreg = sp.stats.linregress(x_values, y_values)
ax.plot(np.linspace(0,10, 50), linreg.intercept + linreg.slope * np.linspace(0,10, 50), color='grey')
ax.text(0.02 , 1.05 , f'Slope={linreg.slope :.2f} Intercept={linreg.intercept :.2f} $R^2$={linreg.rvalue :.2f} p={linreg.pvalue :.3f}'
, fontsize = 15)
ax.set_xlabel(f'Tumour Purity estimated by {x_column} method', fontsize = 15)
ax.set_ylabel('Cancer Area Proportion', fontsize = 15)
ax.set_xlim(0,1)
ax.set_ylim(0,1.10)
plt.rcParams['svg.fonttype'] = 'none'
verbose_save_fig(fig, OUTPUT_PATH.joinpath('HEMnet_vs_ABSOLUTE_scatterplot_MSI_status.svg'))
# Label data points by Clinical Status
fig, ax = plt.subplots(figsize = (7,7))
x_column = 'ABSOLUTE'
y_column = 'Cancer_Area_Proportion'
# combined_filtered = combined[[x_column, y_column, 'Mutation_Type']].dropna()
combined_filtered = combined[[x_column, y_column, 'CMS-RFclassifier']].dropna()
x_values = combined_filtered[x_column].values
y_values = combined_filtered[y_column].values
# # Classify mutation type impacts
# combined_filtered['ClinicalStage'] = combined_filtered['ClinicalStage'].replace({
# 1.0: 'Stage 1',
# 2.0: 'Stage 2',
# 3.0: 'Stage 3',
# 4.0: 'Stage 4'
# })
sns.scatterplot(x='ABSOLUTE', y='Cancer_Area_Proportion', hue='CMS-RFclassifier',
hue_order=['CMS1', 'CMS2', 'CMS3', 'CMS4', 'NOLBL'],
data = combined_filtered, s=75, legend='full', ax = ax)
ax.legend(title='CMS-RFclassifier')
# plt.legend(bbox_to_anchor=(0.4, 0.91), borderaxespad=0)
plt.legend(bbox_to_anchor=(0.3, 0.91), borderaxespad=0)
linreg = sp.stats.linregress(x_values, y_values)
ax.plot(np.linspace(0,10, 50), linreg.intercept + linreg.slope * np.linspace(0,10, 50), color='grey')
ax.text(0.02 , 1.05 , f'Slope={linreg.slope :.2f} Intercept={linreg.intercept :.2f} $R^2$={linreg.rvalue :.2f} p={linreg.pvalue :.3f}'
, fontsize = 15)
ax.set_xlabel(f'Tumour Purity estimated by {x_column} method', fontsize = 15)
ax.set_ylabel('Cancer Area Proportion', fontsize = 15)
ax.set_xlim(0,1)
ax.set_ylim(0,1.10)
plt.rcParams['svg.fonttype'] = 'none'
verbose_save_fig(fig, OUTPUT_PATH.joinpath('HEMnet_vs_ABSOLUTE_scatterplot_CMS-RFclassifier.svg'))
| 0.532182 | 0.267408 |
## Methods 1 - A model setup
The main idea of the study is to estimate an upped bound of alkalinity generation in the Wadden Sea.
The calculation of alkalinity changes is based on the concept of "explicitly conservative form of total alkalinity" ($\text{TA}_{\text{ec}}$) ([Wolf-Gladrow et al., 2007]):
$\text{TA}_{\text{ec}} = \lbrack\text{Na}^{+}\rbrack + 2\lbrack\text{Mg}^{2 +}\rbrack + 2\lbrack\text{Ca}^{2 +}\rbrack + \lbrack \text{K}^{+}\rbrack + 2\lbrack\text{Sr}^{2 +}\rbrack + \text{TNH}_{3} - \lbrack\text{Cl}^{-}\rbrack - \lbrack\text{Br}^{-}\rbrack - \lbrack\text{NO}_{3}^{-}\rbrack - \text{TPO}_{4} - 2\text{TSO}_{4} - \text{THF} - \text{THNO}_{2}$
, where
$\text{TNH}_{3} = \lbrack\text{NH}_{3}\rbrack + \lbrack\text{NH}_{4}^{+}\rbrack$,
$\text{TPO}_{4} = \lbrack\text{H}_{3}\text{PO}_{4}\rbrack + \lbrack \text{H}_{2}\text{PO}_{4}^{-}\rbrack + \lbrack\text{HPO}_{4}^{2 -}\rbrack + \lbrack\text{PO}_{4}^{3 -}\rbrack$,
$\text{TSO}_{4} = \lbrack\text{SO}_{4}^{2 -}\rbrack + \lbrack\text{HSO}_{4}^{-}\rbrack$,
$\text{THF} = \lbrack \text{F}^{-}\rbrack + \lbrack\text{HF}\rbrack$, and
$\text{THNO}_{2} = \lbrack\text{NO}_{2}^{-}\rbrack + \lbrack\text{HNO}_{2}\rbrack$.
[Wolf-Gladrow et al., 2007]: https://doi.org/10.1016/j.marchem.2007.01.006
Increase or decrease of concentrations of any of the $\text{TA}_{\text{ec}}$ compounds will change alkalinity.
For example, an increase of concentration of $\lbrack\text{Ca}^{2 +}\rbrack$ by 1 mole will increase TA by 2 moles.
Or an increase of concentration of $\lbrack\text{NO}_{3}^{-}\rbrack$ by 1 mole will decrease TA by 1 mole.
These changes can be caused by biogeochemical reactions or other sources like freshwater fluxes, riverine inputs, fluxes to and from sediments (see for example [Zeebe and Wolf-Gladrow (2001)], [Follows et al. (2006)], [Wolf-Gladrow et al. (2007)]).
[Zeebe and Wolf-Gladrow (2001)]: https://www.elsevier.com/books/co2-in-seawater-equilibrium-kinetics-isotopes/zeebe/978-0-444-50946-8
[Follows et al. (2006)]: https://doi.org/10.1016/j.ocemod.2005.05.004
[Wolf-Gladrow et al. (2007)]: https://doi.org/10.1016/j.marchem.2007.01.006
In order to estimate alkalinity generation in the Wadden Sea, we should consider all processes going on in the Wadden Sea that can change the concentrations of species in $\text{TA}_{\text{ec}}$.
These processes are biogeochemical transformations listed in [Wolf-Gladrow et al. (2007)] and transport processes within and between water column and sediments of the Wadden Sea, advective exchange of the Wadden Sea with the surrounding areas.
The reason that we have to include sediments along with the water column is the following.
Some biogeochemical transformations in the coastal area, which can change TA, are active in the water column, others are more active and sediments.
For example, typically denitrification takes place in the sediments, in the absence of oxygen ([Libes, 2011]).
Primary production is often higher in the water column, where sunlight is more available ([Libes, 2011]).
Therefore, we should consider both the water column and sediments.
In this study, to calculate alkalinity, we consider both the water column and sediments of the Wadden Sea.
We use a vertically resolved 1-D box as a proxy of the Wadden Sea, we split this box into different layers (to resolve a vertical resolution), calculate the necessary biogeochemical reactions increments for each layer, and evaluate the mixing between these layers.
Also, we take into consideration the exchange of the water column of the 1-D box with an external pool (the Wadden Sea surrounding areas).
[Wolf-Gladrow et al. (2007)]: https://doi.org/10.1016/j.marchem.2007.01.006
[Libes, 2011]: https://www.elsevier.com/books/introduction-to-marine-biogeochemistry/libes/978-0-12-088530-5
A model setup for calculations consist of:
1. The 1-D Sympagic-Pelagic-Benthic transport Model, SPBM ([Yakubov et al., 2019], https://github.com/BottomRedoxModel/SPBM). It is a governing program resolving a transport equation (diffusive and vertical advective (sinking, burying) terms) between and within the water column and sediments. SPBM also parametrizes horizontal exchange with the external pool (the Wadden Sea surrounding areas).
2. A biogeochemical model (https://github.com/BottomRedoxModel/brom_niva_module/tree/dev-sham). It sends sources minus sinks terms to the transport model. The biogeochemical model is explained thoroughly in the Methods 2 section.
The software is written in Fortran 2003.
The SPBM and biogeochemical model are linked through the Framework for Aquatic Biogeochemical Models, FABM ([Bruggeman and Bolding, 2014]).
[Yakubov et al., 2019]: https://doi.org/10.3390/w11081582
[Bruggeman and Bolding, 2014]: https://doi.org/10.1016/j.envsoft.2014.04.002
### The grid
For calculations, to study alkalinity generation in the Wadden Sea we use the vertically resolved box (the modeling domain) containing the water column (the water domain) and sediments (the sediment domain).
This vertically resolved box is a proxy of the Wadden Sea.
Assuming a mean depth of the Wadden Sea of 2.5 m ([van Beusekom et al., 1999]), we split the water domain into two layers of 1.25 m and 1.15 m depth.
Near the bottom, we have a benthic boundary layer (BBL) consisting of 2 layers of 0.05 m depth each.
The BBL is a layer with eddy diffusion coefficients decreasing linearly to zero at the SWI.
The sediment domain has 40 layers of 0.01 m depth each.
[van Beusekom et al., 1999]: https://link.springer.com/article/10.1007/BF02764176

**Figure M1-1**. The model grid scheme.
Using the proposed grid the transport program (SPBM) updates each time step (300 sec.) the concentrations of the state variables (they are provided in the Methods 2 section) in each layer by contributions of diffusion, reaction (concentration increments from the biogeochemical model), advection, and horizontal exchange with the external pool.
### Forcing, initial and boundary conditions
The functioning of the transport and biogeochemical models needs some forcing (for example, to calculate sources minus sinks terms the biogeochemical model requires data of seawater temperature, salinity, and photosynthetically active radiation (PAR)).
Also, we have to establish state variables initial conditions and conditions on the boundaries of the modeling box.
The data for forcing (seawater temperature, salinity, density) and initial conditions are averaged to one year from the World Ocean Database (WOD) for the years 2000 - 2010 from a rectangular region (the Southern North Sea, 54.35-55.37$^{\circ}$N 6.65-8.53$^{\circ}$E) that is adjacent to the North Frisian Wadden Sea.
The data from WOD are stored in `wadden_sea.nc` file.
The data of Chlorophyll a are taken from [Loebl et al. (2007)].
The data of $\text{NH}_{4}^{+}$ are taken from [van Beusekom et al. (2009)].
Boundary conditions are set up for $\text{O}_{2}$ at the surface boundary as an exchange with the atmosphere as in ([Yakushev et al., 2017]).
For all other species, boundary conditions at the bottom and the surface interfaces of the model box are set to zero fluxes.
[Loebl et al. (2007)]: https://doi.org/10.1016/j.seares.2007.06.003
[van Beusekom et al. (2009)]: https://doi.org/10.1016/j.seares.2008.06.005
[Yakushev et al., 2017]: https://doi.org/10.5194/gmd-10-453-2017
For diffusive updates, SPBM needs to know the vertical diffusion coefficients in the water column and the dispersion coefficients in sediments (which are analogs to vertical diffusion coefficients in the water column).
The vertical diffusion coefficients in the water column are calculated according to the vertical density distributions following [Gargett (1984)].
Vertical advective updates in the water column (sinking of the particles) are calculated according to the sinking velocities of particles.
The dispersion coefficients in sediments and sinking velocities of particles are discussed in the Methods 3 section.
Vertical advective updates in the sediments (burying) are neglected (no burying).
[Gargett (1984)]: https://doi.org/10.1357/002224084788502756
SPBM calculates some state variables' horizontal exchange of the modeling domain with the external pool.
To supply the model water domain with nutrients for the proper functioning of the phytoplankton model, we introduce horizontal exchange of concentration of phosphates, ammonium, nitrates, and silicates in the modeling domain with concentrations in the external pool.
The concentrations of variables in the external pool are considered to have a permanent seasonal profile.
The seasonal profiles of phosphates, nitrate, and silicates external pool concentrations are from the World Ocean Database from the same region as the data for forcing.
Ammonium external pool seasonal profile concentrations are from [van Beusekom et al. (2009)].
Horizontal exchange is controlled by the horizontal diffusivity coefficient$\ K_{h}$ ([Okubo, 1971], [Okubo, 1976]).
The value of the horizontal diffusivity coefficient is discussed in the Methods 3 section.
Along with concentrations of the corresponding elements, phosphate, ammonium and nitrate exchange also affects alkalinity according to $\text{TA}_{\text{ec}}$ expression.
Sulfate ion ($\text{SO}_{4}^{2 -}$) is a compound of $\text{TA}_{\text{ec}}$ so it is also taken into account by horizontal exchange for the alkalinity exchange evaluation between the model water domain and the external pool.
$\text{SO}_{4}^{2 -}$ affects TA according to $\text{TA}_{\text{ec}}$ expression as well.
It is a major ion so its concentration in the external pool is approximated by a constant value of 25000 $\text{mM m}^{- 3}$.
The advective exchange of other state variables is not considered (concentrations of these state variables in the external pool are assumed to be similar to the concentrations in the modeling domain).
[van Beusekom et al. (2009)]: https://doi.org/10.1016/j.seares.2008.06.005
[Okubo, 1971]: https://doi.org/10.1016/0011-7471(71)90046-5
[Okubo, 1976]: https://doi.org/10.1016/0011-7471(76)90897-4
SPBM calculates the allochtonous organic matter influx to the modeling domain.
To reflect the heterotrophic nature of the Wadden Sea ([van Beusekom et al., 1999]) we add an additional advective influx of particulate OM state variable ($\text{POM}$).
This $\text{POM}$ inflow is adopted from the value for the net import of OM to the Sylt-Rømø basin in the North Frisian Wadden Sea (110 $\text{g}\ \text{m}^{- 2}\ \text{year}^{- 1}$) reported in ([van Beusekom et al., 1999]) as a sinusoidal curve with a maximum in May ([Joint and Pomroy, 1993]; [de Beer et al., 2005]).
This value is also close to the Wadden Sea average OM input (100 $\text{g}\ \text{m}^{- 2}\ \text{year}^{- 1}$) from the North Sea ([van Beusekom et al., 1999]).
[van Beusekom et al., 1999]: https://doi.org/10.1007/BF02764176
[Joint and Pomroy, 1993]: https://www.int-res.com/articles/meps/99/m099p169.pdf
[de Beer et al., 2005]: https://doi.org/10.4319/lo.2005.50.1.0113
IPython notebook `s_1_generate_netcdf.ipynb` reads the data from WOD (`wadden_sea.nc`) and forms another NetCDF data file `wadden_sea_out.nc` which contains the data filtered and averaged to one year, calculated diffusion coefficients, calculated theoretical surface PAR values for the region of the Wadden Sea, and calculated OM influx.
The governing program SPBM uses `wadden_sea_out.nc` to get all the necessary information.
There is an IPython notebook to check the data written in `wadden_sea_out.nc` - `s_2_check_data.ipynb`
### Preliminary evaluations for the biogeochemical model construction
We have a tool to calculate the transport of the state variables in the multilayer box representing the Wadden Sea, but we still missing the biogeochemical model to update the concentrations of the state variables due to biogeochemical reactions.
Here we provide the reasoning to include some reactions and skip others.
There are thirteen terms in $\text{TA}_{\text{ec}}$ expression and the most abundant biogeochemical processes in the coastal ocean change the concentrations of six of them:
$2\lbrack\text{Ca}^{2 +}\rbrack$, $\text{TNH}_{3}$, $\lbrack\text{NO}_{3}^{-}\rbrack$, $\text{TPO}_{4}$, $2\text{TSO}_{4}$, $\text{THNO}_{2}$.
Therefore, if the biogeochemical reactions change the concentration of certain terms, they also change TA.
The influence of the biogeochemical reactions on alkalinity directly follows from $\text{TA}_{\text{ec}}$ expression ([Wolf-Gladrow et al., 2007], see also the definition of $\text{TA}_{\text{ec}}$ in the beginning of this Section):
[Wolf-Gladrow et al., 2007]: https://doi.org/10.1016/j.marchem.2007.01.006
1. Nutrient assimilation by primary producers.
* Assimilation of one mole of $\text{NO}_{3}^{-}$ or $\text{NO}_{2}^{-}$ increases alkalinity by one mole, assimilation of one mole of $\text{NH}_{4}^{+}$ decrease alkalinity by one mole.
* Assimilation of one mole of phosphate increases alkalinity by one mole.
2. Organic matter degradation.
* Oxygen respiration increases alkalinity by 15 moles ($16\text{NH}_{3} - 1\text{H}_{3}\text{PO}_{4}$) per 106 moles of $\text{CH}_{2}\text{O}$ oxidized:
$(\text{CH}_{2}\text{O})_{106}(\text{NH}_{3})_{16}\text{H}_{3}\text{PO}_{4} + 106\text{O}_{2} \rightarrow 106\text{CO}_{2} + 16\text{NH}_{3} + \text{H}_{3}\text{PO}_{4} + 106\text{H}_{2}\text{O}$.
* Denitrification increases alkalinity by 99.8 moles ($84.8\text{HNO}_{3} + 16\text{NH}_{3} - 1\text{H}_{3}\text{PO}_{4}$) per 106 moles of $\text{CH}_{2}\text{O}$ oxidized:
$(\text{CH}_{2}\text{O})_{106}(\text{NH}_{3})_{16}\text{H}_{3}\text{PO}_{4} + 84.8\text{HNO}_{3} \rightarrow 106\text{CO}_{2} + 42.4\text{N}_{2} + 16\text{NH}_{3} + \text{H}_{3}\text{PO}_{4} + 148.4\text{H}_{2}\text{O}$.
* Sulfate reduction increases alkalinity by 121 moles ($2 \cdot 53\text{SO}_{4}^{2 -} + 16\text{NH}_{3} - 1\text{H}_{3}\text{PO}_{4}$) per 106 moles of $\text{CH}_{2}\text{O}$ oxidized:
$(\text{CH}_{2}\text{O})_{106}(\text{NH}_{3})_{16}\text{H}_{3}\text{PO}_{4} + 53\text{SO}_{4}^{2-} \rightarrow 106\text{HCO}_{3}^{-} + 16\text{NH}_{3} + \text{H}_{3}\text{PO}_{4} + 53\text{H}_{2}\text{S}$.
* Other OM degradation reactions.
3. Nitrification, which decreases alkalinity by two moles per mole of $\text{NH}_{4}^{+}$ oxidized:
$\text{NH}_{4}^{+} + 1.5\text{O}_{2} \rightarrow \text{NO}_{3}^{-} + 2\text{H}^{+} + \text{H}_{2}\text{O}$.
4. Calcium carbonate precipitation and dissolution.
* Precipitation of one mole of calcium carbonate decreases alkalinity by two moles:
$\text{Ca}^{2+} + 2\text{HCO}_{3}^{-} \rightarrow \text{CaCO}_{3} + \text{CO}_{2} + \text{H}_{2}\text{O}$
or
$\text{Ca}^{2+} + \text{CO}_{3}^{-} \rightarrow \text{CaCO}_{3}$.
* Calcium carbonate dissolution increases alkalinity by two moles per one mole of calcium carbonate dissolved:
$\text{CaCO}_{3} + \text{CO}_{2} + \text{H}_{2}\text{O} \rightarrow \text{Ca}^{2 +} + 2\text{HCO}_{3}^{-}$.
Now we can try to estimate which of these processes are the most important ones for alkalinity changes in the Wadden Sea.
At first, we write down the mean concentrations of alkalinity and the mentioned six compounds ($\lbrack\text{Ca}^{2 +}\rbrack$, $\text{TNH}_{3}$, $\lbrack\text{NO}_{3}^{-}\rbrack$, $\text{TPO}_{4}$, $\text{TSO}_{4}$, $\text{THNO}_{2}$, all concentration are in $\text{mM m}^{- 3}$) in the Southern Wadden Sea.
We use these mean concentrations from the Southern Wadden Sea as initial state of concentrations in the Wadden Sea before local biogeochemical transformations.
So we can see the concentrations of $\text{TA}_{\text{ec}}$ compounds that correspond to $\text{TA}_{\text{ec}}$, afterwards we can track how biogeochemical transformations change alkalinity.
```
import src.fetch_data as fd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# get some data from the World Ocean Database (WOD)
par, temperature, no3, ammonium, po4, si, irradiance = fd.get_data()
f'NH4={ammonium.mean()}; NO3={no3.mean()}; PO4={po4.mean()}'
```
Nitrites' concentration is negligibly small, so we skip it.
Also, we assume that average TA concentration equals 2300 $\text{mM m}^{- 3}$.
$\text{Ca}^{2 +}$ and $\text{TSO}_{4}$ are the major ions of seawater with the following approximate concentrations (in $\text{mM m}^{- 3}$):
```
Ca, SO4 = (10000, 25000)
```
Initial concentrations of TA compound elements before local biogeochemical transformations:
|Parameter:|$$\lbrack\text{Ca}^{2+}\rbrack$$|$$\text{TNH}_{3}$$|$$\lbrack\text{NO}_{3}^{-}\rbrack$$|$$\text{TPO}_{4}$$|$$\text{TSO}_{4}$$|$$\text{THNO}_{2}$$|$$\text{TA}$$|
|:-|:-:|:-:|:-:|:-:|:-:|:-:|:-:|
|Values, $\text{mM m}^{- 3}$:|10000|3.4|16.1|0.6|25000|0|2300|
These values correspond to each other, for example, $\text{NO}_{3}^{-}$ concentration of 16 $\text{mM m}^{- 3}$ corresponds to TA of 2300 $\text{mM m}^{- 3}$.
An increase of $\text{NO}_{3}^{-}$ by one mole will decrease TA by one mole (due to negative charge sign).
Thus, we can track TA changes due to changes of its compound ions.
To understand how biogeochemical reactions can affect TA we make a function calculating TA changes according to $\text{TA}_{\text{ec}}$ expression:
$$\delta [\text{TA}] = 2\delta [\text{Ca}^{2 +}]
- 2\delta [\text{TSO}_{4}]
+ \delta [\text{NH}_{4}^{+}]
- \delta [\text{NO}_{3}^{-}]
- \delta [\text{PO}_{4}^{-}]$$
For example, if $\text{NO}_{3}^{-}$ drops down to zero (from 16), it will increase alkalinity by 16 $\text{mM m}^{- 3}$.
Providing a change of a particular compound we can track a TA change.
```
def alk_change(TA, dCa=0, dSO4=0, dNH4=0, dNO3=0, dPO4=0):
return TA + 2*dCa - 2*dSO4 + dNH4 - dNO3 - dPO4
def sinusoidal(max_value):
"""Creates a sinusoidal line with a period of 365,
minimum value of zero,
and a maximum value of max_value"""
day=np.arange(0,365,1)
return (1/2)*max_value*(1+np.sin(2*np.pi*((day-90)/365)))
```
Let's test a TA change due to a change of $\text{Ca}^{2 +}$ concentration.
For example, calcifiers consume 100 $\text{mM m}^{- 3}$ of $\text{Ca}^{2 +}$ during a year, and then the equal amount of calcifiers' skeletons dissolve restoring the concentration of $\text{Ca}^{2 +}$ in the end of the year.
```
dCa = -sinusoidal(100)
Ca_year = Ca + dCa
TA_year = alk_change(TA = 2300, dCa = dCa)
ox = np.arange(0,365,1)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(2, 1, 1) # row-col-num
ax1 = fig.add_subplot(2, 1, 2)
ax.plot(ox, Ca_year); ax1.plot(ox, TA_year)
ax.set_ylabel('Ca$^{2+}$'); ax1.set_ylabel('TA');
ax1.set_xlabel('day')
```
**Figure M1-2**. TA response to $\text{Ca}^{2 +}$ change.
We see that consuming of 100 $\text{mM m}^{- 3}$ of $\text{Ca}^{2 +}$ decreases alkalinity by 200 $\text{mM m}^{- 3}$, which is obvious from $\text{TA}_{\text{ec}}$ expression.
The local activities of calcifiers cannot increase alkalinity above 2300 $\text{mM m}^{- 3}$.
To increase TA we need an input of $\text{Ca}^{2 +}$, which can come in form of $\text{Ca}^{2 +}$ or $\text{CaCO}_3$.
Additional $\text{Ca}^{2 +}$ can come with terrestrial inflow.
We do not consider it here since we are interested in biogeochemical transformation occurring in the Wadden Sea.
The supply of allochtonous $\text{CaCO}_3$ to the Wadden Sea has not yet been reported ([Thomas et al., 2009]).
Calcium carbonate related biogeochemical processes cannot increase alkalinity in the Wadden Sea.
As a first approximation according to the goal to calculate the maximum alkalinity generation in the Wadden Sea due to biogeochemical processes we can skip $\text{CaCO}_3$ precipitation/dissolution while preparing the biogeochemical model.
[Thomas et al., 2009]: https://doi.org/10.5194/bg-6-267-2009
Now let's assume that sulfate reduction decreases $\text{SO}_{4}^{2 -}$ by 100 $\text{mM m}^{- 3}$.
```
dSO4 = -sinusoidal(100)
SO4_year = SO4 + dSO4
TA_year = alk_change(TA = 2300, dSO4 = dSO4)
fig = plt.figure(figsize=(8, 6))
ax, ax1 = (fig.add_subplot(2, 1, 1), fig.add_subplot(2, 1, 2))
ax.plot(ox, SO4_year); ax1.plot(ox, TA_year)
ax.set_ylabel('SO$_4^{2-}$'); ax1.set_ylabel('TA')
ax1.set_xlabel('day')
```
**Figure M1-3**. TA response to $\text{SO}_{4}^{2 -}$ change.
The main difference from $\text{Ca}^{2 +}$ is that $\text{SO}_{4}^{2 -}$ is negatively charged.
So decrease of $\text{SO}_{4}^{2 -}$ by 100 $\text{mM m}^{- 3}$ increases TA by 200 $\text{mM m}^{- 3}$.
Also, since $\text{SO}_{4}^{2 -}$ is a major ion and very abundant in seawater sulfate reduction has a tremendous potential to increase alkalinity.
Therefore, sulfate reduction can be the most important reaction while considering alkalinity generation due to biogeochemical processes in the coastal area.
We have another quite abundant negatively charged conservative ion in the coastal area - $\text{NO}_{3}^{-}$.
According to $\text{TA}_{\text{ec}}$ expression it changes TA by one mole per mole of $\text{NO}_{3}^{-}$ consumed/excreted.
The concentration of $\text{NO}_{3}^{-}$ is far lower comparing to $\text{SO}_{4}^{2 -}$ ion.
The average annual $\text{NO}_{3}^{-}$ concentration in the German Bight of 16 $\text{mM m}^{- 3}$ corresponds to TA of 2300 $\text{mM m}^{- 3}$, so consuming of 16 $\text{mM m}^{- 3}$ of $\text{NO}_{3}^{-}$ will increase alkalinity only by 16 $\text{mM m}^{- 3}$.
Thus, $\text{NO}_{3}^{-}$ related biogeochemical processes cannot explain TA concentrations in the German Bight higher than 2316 $\text{mM m}^{- 3}$.
If there is the constant flow of $\text{NO}_{3}^{-}$ from the North sea to the Wadden Sea, according to $\text{TA}_{\text{ec}}$ it means the continuous flow of TA from the Wadden Sea to the North Sea (opposite direction).
Similarly, the constant supply of $\text{NO}_{3}^{-}$ from terrestrial sources would cause the opposite flow of TA.
Therefore, even though denitrification cannot be an explanation of high alkalinity values in the German Bight it still can be the most important variable to explain TA import from the Wadden Sea to the German Bight.
Denitrification and sulfate reduction are OM degradation reactions.
OM degradation reactions have an occurrence order due to their relative energetics (see Stumm, W. S., and J. J. Morgan (1981). Aquatic Chemistry, John Wiley & Sons, Inc., p. 460).
The most energetically valuable in this sequence is oxygen reduction (it is called oxygen respiration if OM is an electron donor).
$\text{MnO}_2$ and $\text{FeOOH}$ electron acceptors are energetically preferable to $\text{SO}_{4}^{2 -}$ for OM oxidation.
But in the biogeochemical model, we can omit $\text{MnO}_2$ and $\text{FeOOH}$ electron acceptors.
With OM oxidation with $\text{MnO}_2$ and $\text{FeOOH}$ (manganese and iron reduction), there will be less OM available for sulfate reduction. Since there is no $\text{Fe}^{2+}$ and $\text{Mn}^{2+}$ (the OM oxidation with $\text{MnO}_2$ and $\text{FeOOH}$ reaction products) in $\text{TA}_{\text{ec}}$ expression iron and manganese reduction produces less alkalinity then sulfate reduction.
Also, $\text{MnO}_2$ and $\text{FeOOH}$ can oxidize reduced sulfur compounds to sulfate, it will decrease TA according to $\text{TA}_{\text{ec}}$ expression.
We can omit these variables since this assumption does not underestimate TA generation what is in the scope of our goals.
Also, the contents of both $\text{FeOOH}$ and $\text{MnO}_2$ in the sediments of the Wadden Sea are quite low ([de Beer et al., 2005]).
[de Beer et al., 2005]: https://doi.org/10.4319/lo.2005.50.1.0113
According to the above reasoning and to the goal of the study (to estimate the maximum value of TA that can be generated in the Wadden Sea) we should include sulfate reduction and denitrification reactions into the biogeochemical model, but we can skip $\text{CaCO}_3$ precipitation/dissolution.
Also, as the most energetically advantageous, we cannot skip oxygen respiration (it happens before denitrification and sulfate reduction).
Since the alkalinity generating reactions consume OM, we should implement the opposite processes supplying OM.
We have already taken into account the allochtonous OM coming from an external pool; we also need to add OM production into consideration.
Moreover, to balance sulfate reduction and denitrification we should add the opposite reactions from nitrogen and sulfur cycles: nitrification reactions and sulfides oxidation reactions.
We provide a thorough description of reactions included in the biogeochemical model in the Methods 2 section.
|
github_jupyter
|
import src.fetch_data as fd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# get some data from the World Ocean Database (WOD)
par, temperature, no3, ammonium, po4, si, irradiance = fd.get_data()
f'NH4={ammonium.mean()}; NO3={no3.mean()}; PO4={po4.mean()}'
Ca, SO4 = (10000, 25000)
def alk_change(TA, dCa=0, dSO4=0, dNH4=0, dNO3=0, dPO4=0):
return TA + 2*dCa - 2*dSO4 + dNH4 - dNO3 - dPO4
def sinusoidal(max_value):
"""Creates a sinusoidal line with a period of 365,
minimum value of zero,
and a maximum value of max_value"""
day=np.arange(0,365,1)
return (1/2)*max_value*(1+np.sin(2*np.pi*((day-90)/365)))
dCa = -sinusoidal(100)
Ca_year = Ca + dCa
TA_year = alk_change(TA = 2300, dCa = dCa)
ox = np.arange(0,365,1)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(2, 1, 1) # row-col-num
ax1 = fig.add_subplot(2, 1, 2)
ax.plot(ox, Ca_year); ax1.plot(ox, TA_year)
ax.set_ylabel('Ca$^{2+}$'); ax1.set_ylabel('TA');
ax1.set_xlabel('day')
dSO4 = -sinusoidal(100)
SO4_year = SO4 + dSO4
TA_year = alk_change(TA = 2300, dSO4 = dSO4)
fig = plt.figure(figsize=(8, 6))
ax, ax1 = (fig.add_subplot(2, 1, 1), fig.add_subplot(2, 1, 2))
ax.plot(ox, SO4_year); ax1.plot(ox, TA_year)
ax.set_ylabel('SO$_4^{2-}$'); ax1.set_ylabel('TA')
ax1.set_xlabel('day')
| 0.629775 | 0.908049 |
# 08-1 자료형 다루기
## 자료형 변환하기
자료형 변환은 데이터 분석 과정에서 반드시 알아야 하는 요소 중 하나입니다. 예를 들어 카테고리는 문자열로 변환해야 데이터 분석을 더 수월하게 할 수 있기 때문에 자주 변환하는 자료형입니다. 또 다른 예는 전화번호입니다. 전화번호는 보통 숫자로 저장합니다. 하지만 전화번호로 평균을 구하거나 더하는 등의 계산은 거의 하지 않습니다. 오히려 문자열 처럼 다루는 경우가 더 많죠 . 다음 실습을 통해 여러 가지 자료형을 문자열로 변환하는 방법에 대해 알아보겠습니다.
### 자료형을 자유자재로 변환하기-- astype 메서드
#### 1.
이번에 사용할 데이터 집합은 seaborn 라이브러리의 tips 데이터 집합입니다.
```
import pandas as pd
import seaborn as sns
tips=sns.load_dataset("tips")
```
#### 2. 여러 가지 자료형을 문자열로 변환하기
자료형을 변환하려면 astype 메서드를 사용하면 됩니다. 다음은 astype 메서드를 사용해 sex열의 데이터를 문자열로 변환하여 sex_str이라는 새로운 열에 저장한 것입니다.
```
tips['sex_str']=tips['sex'].astype(str)
```
#### 3.
문자열로 제대로 변환되었는지 확인해 볼까요? 자료형이 문자열인 sex_str 열이 새로 추가되었음을 알 수 있습니다.
```
print(tips.dtypes)
```
#### 4. 자료형이 변환된 데이터 다시 원래대로 만들기
자료형이 변환된 데이터를 다시 원래대로 만들 수 있을까요? 다음은 roral_bill 열의 자료형을 문자열로 변환한 것입니다.
```
tips['total_bill']=tips['total_bill'].astype(str)
print(tips.dtypes)
```
#### 5.
과정 4에서 문자열로 변환한 total_bill 열을 다시 실수로 변환했습니다.
```
tips['total_bill']=tips['total_bill'].astype(float)
print(tips.dtypes)
```
## 잘못 입력한 데이터 처리하기
이번에는 잘못 입력한 데이터를 변환하는 방법에 대해 알아보겠습니다. 만약 정수가 있어야 하는 열에 문자열이 입력되어 있으면 어떻게 해야 할까요? 이런 문제를 해결하는 방법과 자료형을 변환하는 to_numeric 메서드도 함께 알아보겠습니다.
### 잘못 입력한 문자열 처리하기 -- to_numeric 메서드
#### 1.
다음은 total_bill 열의 1,3,5,7 행의 데이터를 'missing'으로 바꿔 변수 tips_sub_miss에 저장한 것입니다.
```
tips_sub_miss=tips.head(10)
tips_sub_miss.loc[[1,3,5,7], 'total_bill']='missing'
print(tips_sub_miss)
```
#### 2.
데이터프레임의 자료형을 확인해 보면 total_bill열이 실수가 아니라 문자열임을 알 수 있습니다. 'missing'이라는 문자열 때문에 이런 문제가 발생한 것입니다.
```
print(tips_sub_miss.dtypes)
```
#### 3.
astype 메서드를 사용하면 이 문제를 해결할 수 있을까요? astype 메서드를 사용해 total_bill 열의 데이터를 실수로 변환하려 하면 오류가 발생합니다. 판다스는 'missing'이라는 문자열을 실수로 변환하는 방법을 모르기 때문입니다.
```
tips_sub_miss['total_bill'].astype(float)
```
#### 4.
그러면 다른 방법을 사용해야 합니다. 이번에는 to_numeric 메서드를 사용해 보겠습니다. 그런데 to_numeric 메서드를 사용해도 비슷한 오류가 발생합니다.
```
pd.to_numeric(tips_sub_miss['total_bill'])
```
#### 5.
사실 to_numeric 메서드를 사용해도 문자열을 실수로 변환할 수 없습니다. 하지만 to_numeric 메서드는 errors 인자에 raise, coerce, ignore를 지정하여 오류를 어느 정도 제어 할 수 있습니다. 예를 들어 errors 인자를 raise로 설정하면 숫자로 변환할 수 없는 값이 있을 때만 오류가 발생합니다. 이런 오류는 분석가가 의도한 오류이므로 오류가 발생한 지점을 정확히 알 수 있어 유욘한 오류죠. errors 인자에 설정할 수 있는 값은 다음과 같습니다.
#### errors 인자에 설정할 수 있는값
- reise : 숫자로 변환할 수 없는 값이 있으면 오류 발생
- coerce : 숫자로 변환할 수 없는 값을 누락값으로 지정
- ignore : 아무 작업도 하지 않음
#### 6.
errors 인자를 ignore로 설정하면 오류가 발생하지 않지만 자료형도 변하지 않습니다. 말 그대로 오류를 무시하는 것이죠. 여전히 total_bill은 문자열입니다.
```
tips_sub_miss['total_bill']=pd.to_numeric(tips_sub_miss['total_bill'],errors='ignore')
print(tips_sub_miss.dtypes)
```
#### 7.
이번에는 errors 인자를 coerce로 설정해 보겠습니다. 그러면 'missing'이 누락값으로 바뀝니다. dtypes로 데이터프레임의 자료형을 확인해 볼까요? total_bill의 자료형이 실수로 바뀌었습니다.
```
tips_sub_miss['total_bill']=pd.to_numeric(tips_sub_miss['total_bill'],errors='coerce')
print(tips_sub_miss.dtypes)
```
#### 8.
to_numeric 메서드에는 errors 인자 외에도 downcast 인자가 있습니다. downcast는 정수, 실수와 같은 자료형을 더 작은 형태로 만들 때 사용합니다. 이를 다운캐스트라고 하죠. downcast 인자에는 interger, unsignedm float등의 값을 사용할 수 있습니다. 다음은 total_bill 열을 다운캐스트한 것입니다. 그러면 total_bill 열의 자료형이 float64에서 float32로 바뀐 것을 알 수 있습니다. float64는 float32보다 더 많은 범위의 실수를 표현할 수 있지만 메모리 공간을 2배나 차지합니다. 만약 저장하는 실수의 예상 범위가 크지 않다면 다운캐스트하는 것이 좋습니다.
```
tips_sub_miss['total_bill']= pd.to_numeric(tips_sub_miss['total_bill'],errors='coerce', downcast='float')
print(tips_sub_miss.dtypes)
```
# 08-2 카테고리 자료형
판다스 라이브러리는 유한한 범위의 값만을 가질 수 있는 카테고리라는 특수한 자료형이 있습니다. 만약 10종류의 과일 이름을 저장한 열이 있다고 가정할 경우 문자열 자료형보다 카테고리 자료형을 사용하는 것이 용량과 속도 면에서 더 효율적입니다. 카테고리 자료형의 장점과 특징은 다음과 같습니다.
### 카테고리 자료형의 장점과 특징
- 용량과 속도 면에서 매우 효율적입니다.
- 주로 동일한 문자열이 반복되어 데이터를 구성하는 경우에 사용합니다.
### 문자열을 카테고리로 변환하기
#### 1.
sex 열의 데이터는 남자 또는 여자만으로 구성되어 있습니다. 그래서 카테고리 자료형으로 저장되어 있죠. 만약 sex 열의 자료형을 문자열로 변환하면 어떻게 될까요? sex 열의 자료형을 문자열로 변환한 다음 데이터프레임의 용량을 info 메서드로 확인하면 데이터프레임의 용량이 10.7KB 정도라는 것을 확인할 수 있습니다.
```
tips['sex']=tips['sex'].astype('str')
print(tips.info())
```
#### 2.
다시 sex 열을 카테고리로 변환해 볼까요? info 메서드로 데이터프레임의 용량을 확인해보면 데이터프레임의 용량이 10.7+KB에서 9.1+KB로 줄어든 것을 알 수 있습니다. 이와 같이 반복되는 문자열로 구성된 데이터는 카테고리를 사용하는 것이 더 효율적입니다.
```
tips['sex']=tips['sex'].astype('category')
print(tips.info())
```
## 마무리하며
이 장에서는 자료형을 다루는 방법에 대해 알아보았습니다. 특히 카테고리라는 자료형은 따로 설명했습니다. 카테고리는 데이터의 크기가 커지면 커질수록 진가를 발휘하는 자료형이기 때문에 반드시 알아두어야 합니다.
출처 : "do it 데이터 분석을 위한 판다스입문"
|
github_jupyter
|
import pandas as pd
import seaborn as sns
tips=sns.load_dataset("tips")
tips['sex_str']=tips['sex'].astype(str)
print(tips.dtypes)
tips['total_bill']=tips['total_bill'].astype(str)
print(tips.dtypes)
tips['total_bill']=tips['total_bill'].astype(float)
print(tips.dtypes)
tips_sub_miss=tips.head(10)
tips_sub_miss.loc[[1,3,5,7], 'total_bill']='missing'
print(tips_sub_miss)
print(tips_sub_miss.dtypes)
tips_sub_miss['total_bill'].astype(float)
pd.to_numeric(tips_sub_miss['total_bill'])
tips_sub_miss['total_bill']=pd.to_numeric(tips_sub_miss['total_bill'],errors='ignore')
print(tips_sub_miss.dtypes)
tips_sub_miss['total_bill']=pd.to_numeric(tips_sub_miss['total_bill'],errors='coerce')
print(tips_sub_miss.dtypes)
tips_sub_miss['total_bill']= pd.to_numeric(tips_sub_miss['total_bill'],errors='coerce', downcast='float')
print(tips_sub_miss.dtypes)
tips['sex']=tips['sex'].astype('str')
print(tips.info())
tips['sex']=tips['sex'].astype('category')
print(tips.info())
| 0.169234 | 0.989731 |
# AFV code demonstration
We demonstrate the code used in the paper *Efficient simulation of affine forward variance models*.
```
library(repr)
source("BlackFormula.R")
source("AFVsimulation.R")
source("GammaKernel.R")
source("roughHestonAdams.R")
source("roughHestonPade.R")
source("Lewis.R")
bl <- "royalblue"
rd <- "red2"
pk <- "hotpink1"
gr <- "green4"
```
### Parameters
We choose parameters similar to those found from a fit to SPX options as of May 19, 2017, the same data that was used in Roughening Heston.
```
params0 <- list(al=0.55,lam=0,eta=0.8,rho=-0.65, H=0.05,lam=0)
xi0 <- function(s){0.025+0*s} # The forward variance curve
```
### Simulation using the RSQE and HQE schemes
The option "all" returns a list with many variables of interest.
```
system.time(rsqe.sim <- RSQE.sim(params0, xi0)(paths=100000, steps=100, expiries=1,output="all"))
system.time(hqe.sim.100 <- HQE.sim(params0, xi0)(paths=100000, steps=100, expiries=1,output="all"))
system.time(hqe.sim.200 <- HQE.sim(params0, xi0)(paths=100000, steps=200, expiries=1,output="all"))
names(hqe.sim.100)
```
- v is terminal variance $v_T$
- x is terminal log-spot $X_T$
- y is terminal value of $Y_T = \int_0^T\,\sqrt{v_s}\,dW_s$.
- w is terminal value of quadratic variation $w_T = \int_0^T\,{v_s}\,ds$.
#### Parallelization
The code can be run in parallel with selected variables as output.
For example, here with $v_T$ as output.
```
library(foreach)
library(doParallel)
paths <- 1000000 # Note 1 million paths here!
steps <- 100
t0<-proc.time()
# Number of iterations
iters<- max(1,floor(paths/1000))
# Setup parallel backend to use all processors
(cl.num <- detectCores()) # Display number of processors on your machine
cl<-makeCluster(cl.num)
registerDoParallel(cl)
# Loop
ls <- foreach(icount(iters)) %dopar% {
HQE.sim(params0, xi0)(paths=1000, steps=steps, expiries=1,output="v")
}
stopCluster(cl)
hqe.sim.v <- do.call(cbind, ls) #Bind all of the submatrices into one big matrix
print(proc.time()- t0)
```
### Figure 7.6: Histograms of $v_T$
```
options(repr.plot.width=14,repr.plot.height=7,repr.plot.res=150)
vv <- hqe.sim.v
vvg <- vv[vv > 1e-9] # Restrict sample to values greater than tiny
par(mfrow=c(1,2))
hist(vv,breaks=200,xlab=expression(v[T]),main="",col=bl, border=bl, cex.lab=1.5)
par(new=F)
hist(vvg,breaks=200,xlab=expression(v[T]),main="",col=bl,border=bl, cex.lab=1.5)
par(mfrow=c(1,1))
```
### Histograms of terminal stock prices
```
spots.100 <- exp(hqe.sim.100$x)
spots.200 <- exp(hqe.sim.200$x)
hist(spots.100,breaks=200,xlab=expression(S[T]),main="",col=pk, border=pk, cex.lab=1.5)
```
Note the fat negative tail in the return distribution.
### Draw the 1 year smile with the above parameters
```
smile.100 <- function(k){ivS(spots.100, T=1, mean(spots.100)*exp(k))}
smile.200 <- function(k){ivS(spots.200, T=1, mean(spots.200)*exp(k))}
```
We select a vector of log-strikes.
```
kk <- seq(-.4,.4,.01)
smile.HQE.100 <- smile.100(kk)
smile.HQE.200 <- smile.200(kk)
options(repr.plot.width=10,repr.plot.height=7,repr.plot.res=150)
plot(kk,smile.HQE.100,col=rd,lwd=2,type="l",
xlab="Log-strike k", ylab = "Implied vol.",cex.lab=1.5)
lines(kk,smile.HQE.200,col=bl,lwd=2,lty=2)
legend("topright",c("HQE 100 steps","HQE 200 steps"), cex=1.5, inset=.05,
lty=c(1,2),col=c(rd,bl), lwd=2)
```
### Richardson extrapolated smile
```
smile.HQE.Richardson <- 2*smile.200(kk)-smile.100(kk)
plot(kk,smile.HQE.100,col=rd,lwd=2,type="l",
xlab="Log-strike k", ylab = "Implied vol.",cex.lab=1.5)
lines(kk,smile.HQE.200,col=bl,lwd=2,lty=2)
lines(kk,smile.HQE.Richardson,col=gr,lwd=2,lty=4)
legend("topright",c("HQE 100 steps","HQE 200 steps",
"HQE Richardson"), cex=1.5, inset=.05,
lty=c(1,2,4),col=c(rd,bl,gr), lwd=2)
```
### Parameter conversion
The QE simulation and the Adams/Padé approximations use different formulations of the rough Heston model.
In the QE case,
$$
d\xi_t(u) = \eta\,\sqrt{2 H}\,(u-t)^{\alpha-1}.
$$
In the Adams/Padé case,
$$
d\xi_t(u) = \frac{\nu}{\Gamma(\alpha)}\,(u-t)^{\alpha-1}.
$$
Thus
$$
\nu = \sqrt{2 H}\, \Gamma(\alpha)\,\eta.
$$
```
(params0$nu <- params0$eta*sqrt(2*params0$al-1)*gamma(params0$al))
```
### Comparison with Adams smile
- The Adams scheme code is orignally due to Fabio Baschetti, Giacomo Bormetti, Pietro Rossi, and Silvia Romagnoli, University of Bologna (2020).
- The code for computing the cutoff in the Lewis formula is based on code originally by Omar El Euch, École Polytechnique Paris (2017).
Note that the Adams smile takes time to compute (two minutes on my machine).
```
system.time(vol.Adams.kk.1y.200 <- impliedVolRoughHeston(params0, xi0, nSteps=200)(kk,1))
plot(kk,smile.HQE.Richardson,col=rd,lwd=2,type="l",
xlab="Log-strike k", ylab = "Implied vol.",cex.lab=1.5)
lines(kk,vol.Adams.kk.1y.200,col=bl,lwd=2,lty=2)
legend("topright",c("HQE Richardon","Adams"), cex=1.5, inset=.05,
lty=c(1,2),col=c(rd,bl), lwd=2)
```
### Padé approximation
```
phi.Pade <- phiRoughHestonDhApprox(params0, xi0, dh.approx= d.h.Pade33, n=500)
system.time(vol.Pade.kk.1y <- impvol.phi(phi.Pade)(kk,1))
plot(kk,smile.HQE.Richardson,col=rd,lwd=2,type="l",
xlab="Log-strike k", ylab = "Implied vol.",cex.lab=1.5)
lines(kk,vol.Adams.kk.1y.200,col=bl,lwd=2,lty=2)
lines(kk,vol.Pade.kk.1y,col=gr,lwd=2,lty=4)
legend("topright",c("HQE Richardson","Adams","Padé"), cex=1.5, inset=.05,
lty=c(1,2,2),col=c(rd,bl,gr), lwd=2)
```
To get reliable agreement between these smiles, a greater number of paths (such as 1 million) is needed. Alternatively (and better), variance reduction can be implemented.
|
github_jupyter
|
library(repr)
source("BlackFormula.R")
source("AFVsimulation.R")
source("GammaKernel.R")
source("roughHestonAdams.R")
source("roughHestonPade.R")
source("Lewis.R")
bl <- "royalblue"
rd <- "red2"
pk <- "hotpink1"
gr <- "green4"
params0 <- list(al=0.55,lam=0,eta=0.8,rho=-0.65, H=0.05,lam=0)
xi0 <- function(s){0.025+0*s} # The forward variance curve
system.time(rsqe.sim <- RSQE.sim(params0, xi0)(paths=100000, steps=100, expiries=1,output="all"))
system.time(hqe.sim.100 <- HQE.sim(params0, xi0)(paths=100000, steps=100, expiries=1,output="all"))
system.time(hqe.sim.200 <- HQE.sim(params0, xi0)(paths=100000, steps=200, expiries=1,output="all"))
names(hqe.sim.100)
library(foreach)
library(doParallel)
paths <- 1000000 # Note 1 million paths here!
steps <- 100
t0<-proc.time()
# Number of iterations
iters<- max(1,floor(paths/1000))
# Setup parallel backend to use all processors
(cl.num <- detectCores()) # Display number of processors on your machine
cl<-makeCluster(cl.num)
registerDoParallel(cl)
# Loop
ls <- foreach(icount(iters)) %dopar% {
HQE.sim(params0, xi0)(paths=1000, steps=steps, expiries=1,output="v")
}
stopCluster(cl)
hqe.sim.v <- do.call(cbind, ls) #Bind all of the submatrices into one big matrix
print(proc.time()- t0)
options(repr.plot.width=14,repr.plot.height=7,repr.plot.res=150)
vv <- hqe.sim.v
vvg <- vv[vv > 1e-9] # Restrict sample to values greater than tiny
par(mfrow=c(1,2))
hist(vv,breaks=200,xlab=expression(v[T]),main="",col=bl, border=bl, cex.lab=1.5)
par(new=F)
hist(vvg,breaks=200,xlab=expression(v[T]),main="",col=bl,border=bl, cex.lab=1.5)
par(mfrow=c(1,1))
spots.100 <- exp(hqe.sim.100$x)
spots.200 <- exp(hqe.sim.200$x)
hist(spots.100,breaks=200,xlab=expression(S[T]),main="",col=pk, border=pk, cex.lab=1.5)
smile.100 <- function(k){ivS(spots.100, T=1, mean(spots.100)*exp(k))}
smile.200 <- function(k){ivS(spots.200, T=1, mean(spots.200)*exp(k))}
kk <- seq(-.4,.4,.01)
smile.HQE.100 <- smile.100(kk)
smile.HQE.200 <- smile.200(kk)
options(repr.plot.width=10,repr.plot.height=7,repr.plot.res=150)
plot(kk,smile.HQE.100,col=rd,lwd=2,type="l",
xlab="Log-strike k", ylab = "Implied vol.",cex.lab=1.5)
lines(kk,smile.HQE.200,col=bl,lwd=2,lty=2)
legend("topright",c("HQE 100 steps","HQE 200 steps"), cex=1.5, inset=.05,
lty=c(1,2),col=c(rd,bl), lwd=2)
smile.HQE.Richardson <- 2*smile.200(kk)-smile.100(kk)
plot(kk,smile.HQE.100,col=rd,lwd=2,type="l",
xlab="Log-strike k", ylab = "Implied vol.",cex.lab=1.5)
lines(kk,smile.HQE.200,col=bl,lwd=2,lty=2)
lines(kk,smile.HQE.Richardson,col=gr,lwd=2,lty=4)
legend("topright",c("HQE 100 steps","HQE 200 steps",
"HQE Richardson"), cex=1.5, inset=.05,
lty=c(1,2,4),col=c(rd,bl,gr), lwd=2)
(params0$nu <- params0$eta*sqrt(2*params0$al-1)*gamma(params0$al))
system.time(vol.Adams.kk.1y.200 <- impliedVolRoughHeston(params0, xi0, nSteps=200)(kk,1))
plot(kk,smile.HQE.Richardson,col=rd,lwd=2,type="l",
xlab="Log-strike k", ylab = "Implied vol.",cex.lab=1.5)
lines(kk,vol.Adams.kk.1y.200,col=bl,lwd=2,lty=2)
legend("topright",c("HQE Richardon","Adams"), cex=1.5, inset=.05,
lty=c(1,2),col=c(rd,bl), lwd=2)
phi.Pade <- phiRoughHestonDhApprox(params0, xi0, dh.approx= d.h.Pade33, n=500)
system.time(vol.Pade.kk.1y <- impvol.phi(phi.Pade)(kk,1))
plot(kk,smile.HQE.Richardson,col=rd,lwd=2,type="l",
xlab="Log-strike k", ylab = "Implied vol.",cex.lab=1.5)
lines(kk,vol.Adams.kk.1y.200,col=bl,lwd=2,lty=2)
lines(kk,vol.Pade.kk.1y,col=gr,lwd=2,lty=4)
legend("topright",c("HQE Richardson","Adams","Padé"), cex=1.5, inset=.05,
lty=c(1,2,2),col=c(rd,bl,gr), lwd=2)
| 0.380068 | 0.906736 |
```
import numpy as np
import matplotlib.pyplot as plt
p_mean = 3.0 # GeV/c
p_sigma = 0.3 # GeV/c
decay_length = 150.0 # m
location = 1000 # m
cross_sec = np.pi # m^2
m_pion = 0.13957 # GeV/c^2
m_muon = 1.057e-5 # GeV/c^2
m_numu = 0 # GeV/c^2
c = 2.9979e8 # m/s
t_pion = 2.6033e-8 # s
def p4_hist(p4):
plt.hist(p4[:,0], bins = 50)
plt.show()
plt.hist(p4[:,1], bins = 50)
plt.show()
plt.hist(p4[:,2], bins = 50)
plt.show()
plt.hist(p4[:,3], bins = 50)
plt.show()
def two_body_decay(M,m1,m2, size = 1):
p4 = np.zeros((size, 4))
p4[:,0] = (M**2 + m1**2 - m2**2)/(2.0*M) #E1=(M**2+m1**2-m2**2)/(2*M)
P1 = np.sqrt(p4[:,0]**2 - m1**2)
phi = 2.0*np.pi*np.random.random(size) #random phi 0-2pi
cos_theta = -1.0 + 2.0*np.random.random(size) # random cos theta -1 to 1
sin_theta = np.sqrt(1. - cos_theta**2)
p4[:,3] = P1*cos_theta
p4[:,1] = P1*sin_theta*np.cos(phi)
p4[:,2] = P1*sin_theta*np.sin(phi)
return p4
def Lorentz_transform(p4_rest, gamma, beta):
p4_lab = np.zeros((len(gamma), 4))
p4_lab[:,1:2] = p4_rest[:,1:2]
p4_lab[:,0] = gamma*(p4_rest[:,0] + beta*p4_rest[:,3])
p4_lab[:,3] = gamma*(p4_rest[:,0]/beta + p4_rest[:,3])
return p4_lab
p4_pion = np.zeros((n_pions, 4))
p4_pion[:,3] = np.random.normal(loc = p_mean, scale = p_sigma, size = n_pions)
p4_pion[:,0] = p4_pion[:,3]**2 + (m_pion**2)
#p4_hist(p4_pion)
length_p = p4_pion[:,3]/m_pion*c*t_pion
pi_decay_point = np.random.exponential(length_p)
# Take only the pions that decay inside the tunnel
decay_mask, = np.where(pi_decay_point < decay_length)
remaining_p4_pion = p4_pion[decay_mask,:]
remaining_pi_decay_point = pi_decay_point[decay_mask]
remaining_pions = len(remaining_pi_decay_point)
#p4_hist(remaining_p4_pion)
p4_neutrino_rest = two_body_decay(m_pion, m_numu, m_muon, size = remaining_pions)
#p4_hist(p4_neutrino_rest)
gamma = remaining_p4_pion[:,0]/m_pion # gamma = E/m
beta = remaining_p4_pion[:,3]/remaining_p4_pion[:,0] # beta = P/E in these units
p4_neutrino = Lorentz_transform(p4_neutrino_rest, gamma, beta)
#p4_hist(p4_neutrino)
max_detector_angle = np.arctan(1/(1000-remaining_pi_decay_point))
cos_theta = p4_neutrino[:,3]/np.sqrt(p4_neutrino[:,1]**2+p4_neutrino[:,2]**2+p4_neutrino[:,3]**2)
detector_hit_angle = np.arccos(cos_theta)
hit_mask, = np.where(np.abs(detector_hit_angle) < np.abs(max_detector_angle))
hit_p4_neutrino = p4_neutrino[hit_mask,:]
plt.hist(hit_p4_neutrino[:,0], bins = 100, density = True, histtype = 'step', align = 'left')
plt.hist(p4_neutrino[:,0], bins = 100, density = True, histtype = 'step', align = 'left')
plt.show()
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
p_mean = 3.0 # GeV/c
p_sigma = 0.3 # GeV/c
decay_length = 150.0 # m
location = 1000 # m
cross_sec = np.pi # m^2
m_pion = 0.13957 # GeV/c^2
m_muon = 1.057e-5 # GeV/c^2
m_numu = 0 # GeV/c^2
c = 2.9979e8 # m/s
t_pion = 2.6033e-8 # s
def p4_hist(p4):
plt.hist(p4[:,0], bins = 50)
plt.show()
plt.hist(p4[:,1], bins = 50)
plt.show()
plt.hist(p4[:,2], bins = 50)
plt.show()
plt.hist(p4[:,3], bins = 50)
plt.show()
def two_body_decay(M,m1,m2, size = 1):
p4 = np.zeros((size, 4))
p4[:,0] = (M**2 + m1**2 - m2**2)/(2.0*M) #E1=(M**2+m1**2-m2**2)/(2*M)
P1 = np.sqrt(p4[:,0]**2 - m1**2)
phi = 2.0*np.pi*np.random.random(size) #random phi 0-2pi
cos_theta = -1.0 + 2.0*np.random.random(size) # random cos theta -1 to 1
sin_theta = np.sqrt(1. - cos_theta**2)
p4[:,3] = P1*cos_theta
p4[:,1] = P1*sin_theta*np.cos(phi)
p4[:,2] = P1*sin_theta*np.sin(phi)
return p4
def Lorentz_transform(p4_rest, gamma, beta):
p4_lab = np.zeros((len(gamma), 4))
p4_lab[:,1:2] = p4_rest[:,1:2]
p4_lab[:,0] = gamma*(p4_rest[:,0] + beta*p4_rest[:,3])
p4_lab[:,3] = gamma*(p4_rest[:,0]/beta + p4_rest[:,3])
return p4_lab
p4_pion = np.zeros((n_pions, 4))
p4_pion[:,3] = np.random.normal(loc = p_mean, scale = p_sigma, size = n_pions)
p4_pion[:,0] = p4_pion[:,3]**2 + (m_pion**2)
#p4_hist(p4_pion)
length_p = p4_pion[:,3]/m_pion*c*t_pion
pi_decay_point = np.random.exponential(length_p)
# Take only the pions that decay inside the tunnel
decay_mask, = np.where(pi_decay_point < decay_length)
remaining_p4_pion = p4_pion[decay_mask,:]
remaining_pi_decay_point = pi_decay_point[decay_mask]
remaining_pions = len(remaining_pi_decay_point)
#p4_hist(remaining_p4_pion)
p4_neutrino_rest = two_body_decay(m_pion, m_numu, m_muon, size = remaining_pions)
#p4_hist(p4_neutrino_rest)
gamma = remaining_p4_pion[:,0]/m_pion # gamma = E/m
beta = remaining_p4_pion[:,3]/remaining_p4_pion[:,0] # beta = P/E in these units
p4_neutrino = Lorentz_transform(p4_neutrino_rest, gamma, beta)
#p4_hist(p4_neutrino)
max_detector_angle = np.arctan(1/(1000-remaining_pi_decay_point))
cos_theta = p4_neutrino[:,3]/np.sqrt(p4_neutrino[:,1]**2+p4_neutrino[:,2]**2+p4_neutrino[:,3]**2)
detector_hit_angle = np.arccos(cos_theta)
hit_mask, = np.where(np.abs(detector_hit_angle) < np.abs(max_detector_angle))
hit_p4_neutrino = p4_neutrino[hit_mask,:]
plt.hist(hit_p4_neutrino[:,0], bins = 100, density = True, histtype = 'step', align = 'left')
plt.hist(p4_neutrino[:,0], bins = 100, density = True, histtype = 'step', align = 'left')
plt.show()
| 0.324556 | 0.711944 |
## Loading and using a trained model for Dsprites
Notebook demonstrating how to load a JointVAE model and use it for various things.
```
from utils.load_model import load
path_to_model_folder = './trained_models/dsprites/'
model = load(path_to_model_folder)
# Print the latent distribution info
print(model.latent_spec)
# Print model architecture
print(model)
from viz.visualize import Visualizer as Viz
# Create a Visualizer for the model
viz = Viz(model)
viz.save_images = False # Return tensors instead of saving images
%matplotlib inline
import matplotlib.pyplot as plt
samples = viz.samples()
plt.imshow(samples.numpy()[0, :, :], cmap='gray')
traversals = viz.all_latent_traversals()
plt.imshow(traversals.numpy()[0, :, :], cmap='gray')
# Traverse 3rd continuous latent dimension across columns and first
# discrete latent dimension across rows
traversals = viz.latent_traversal_grid(cont_idx=0, cont_axis=1, disc_idx=0, disc_axis=0, size=(3, 10))
plt.imshow(traversals.numpy()[0, :, :], cmap='gray')
```
#### Reorder discrete latent to match order of digits
```
from viz.visualize import reorder_img
ordering = [1,0,2] # The 9th dimension corresponds to 0, the 3rd to 1 etc...
traversals = reorder_img(traversals, ordering, by_row=True)
plt.imshow(traversals.numpy()[0, :, :], cmap='gray')
traversal = viz.latent_traversal_line(cont_idx=1, size=12)
plt.imshow(traversal.numpy()[0, :, :], cmap='gray')
from utils.dataloaders import get_dsprites_dataloader
# Get MNIST test data
dataloader = get_dsprites_dataloader(batch_size=32)
# Extract a batch of data
for batch, labels in dataloader:
break
recon = viz.reconstructions(batch, size=(8, 8))
plt.imshow(recon.numpy()[0, :, :], cmap='gray')
```
### Encode data
The model can also be used to get encodings of the data
```
from torch.autograd import Variable
encodings = model.encode(Variable(batch))
# Continuous encodings for the first 5 examples
encodings['cont'][0][:5]
from utils.dataloaders import get_dsprites_dataloader
subsample = 1000
import numpy as np
path_to_data = '../data/dsprites-data/dsprites_data.npz'
state = np.random.get_state()
data = np.load(path_to_data)
img = data['imgs']
np.random.shuffle(img)
imgs = img[::subsample]
label = data['latents_values'][:, 1]
np.random.set_state(state)
np.random.shuffle(label)
labels = label[:subsample]-1
# self.transform = transform
# Each image in the dataset has binary values so multiply by 255 to get
# pixel values
import torch
import torch
imgs = torch.tensor(imgs).float()
imgs = imgs.unsqueeze(1)
from torch.autograd import Variable
imgs = Variable(imgs)
import torch
latent_dist = model.encode(imgs)
_, predict_label = torch.max(latent_dist['disc'][0], dim=1)
confusion = torch.zeros(3, 3)
for i in range(738):
confusion[int(labels[i]),predict_label[i].item()] += 1
for i in range(3):
confusion[i] = confusion[i] / confusion[i].sum()
%matplotlib inline
import matplotlib.pyplot as plt
# confusion = np.array([[0.9,0.1,0.0],[0.0,0.8,0.2],[0.1,0.7,0.2]])
# confusion = torch.tensor(confusion)
from matplotlib import cm
plt.imshow(confusion,interpolation='nearest',cmap=cm.Blues,aspect='auto',vmin=0,vmax=1.0)
value, predict_label = torch.max(confusion, dim=1)
list_price_positoin_address = []
seen = []
for i in predict_label:
if i in seen:
pass
else:
seen.append(i)
address_index = [x for x in range(len(predict_label)) if predict_label[x] == i]
list_price_positoin_address.append([i, address_index])
dict_address = dict(list_price_positoin_address)
print(dict_address)
for keys in dict_address.keys():
if(len(dict_address[keys])>1):
acc = confusion[dict_address[keys],keys.item()]
_, predict_label = torch.min(acc, dim=0)
confusion[dict_address[keys][predict_label.item()],keys.item()] = 0.0
value[dict_address[keys][predict_label.item()]], p = torch.max(confusion[dict_address[keys][predict_label.item()],:],dim=0)
value.mean()
from utils.load_model import load_param
spec,img_size = load_param(path_to_model_folder)
spec
```
|
github_jupyter
|
from utils.load_model import load
path_to_model_folder = './trained_models/dsprites/'
model = load(path_to_model_folder)
# Print the latent distribution info
print(model.latent_spec)
# Print model architecture
print(model)
from viz.visualize import Visualizer as Viz
# Create a Visualizer for the model
viz = Viz(model)
viz.save_images = False # Return tensors instead of saving images
%matplotlib inline
import matplotlib.pyplot as plt
samples = viz.samples()
plt.imshow(samples.numpy()[0, :, :], cmap='gray')
traversals = viz.all_latent_traversals()
plt.imshow(traversals.numpy()[0, :, :], cmap='gray')
# Traverse 3rd continuous latent dimension across columns and first
# discrete latent dimension across rows
traversals = viz.latent_traversal_grid(cont_idx=0, cont_axis=1, disc_idx=0, disc_axis=0, size=(3, 10))
plt.imshow(traversals.numpy()[0, :, :], cmap='gray')
from viz.visualize import reorder_img
ordering = [1,0,2] # The 9th dimension corresponds to 0, the 3rd to 1 etc...
traversals = reorder_img(traversals, ordering, by_row=True)
plt.imshow(traversals.numpy()[0, :, :], cmap='gray')
traversal = viz.latent_traversal_line(cont_idx=1, size=12)
plt.imshow(traversal.numpy()[0, :, :], cmap='gray')
from utils.dataloaders import get_dsprites_dataloader
# Get MNIST test data
dataloader = get_dsprites_dataloader(batch_size=32)
# Extract a batch of data
for batch, labels in dataloader:
break
recon = viz.reconstructions(batch, size=(8, 8))
plt.imshow(recon.numpy()[0, :, :], cmap='gray')
from torch.autograd import Variable
encodings = model.encode(Variable(batch))
# Continuous encodings for the first 5 examples
encodings['cont'][0][:5]
from utils.dataloaders import get_dsprites_dataloader
subsample = 1000
import numpy as np
path_to_data = '../data/dsprites-data/dsprites_data.npz'
state = np.random.get_state()
data = np.load(path_to_data)
img = data['imgs']
np.random.shuffle(img)
imgs = img[::subsample]
label = data['latents_values'][:, 1]
np.random.set_state(state)
np.random.shuffle(label)
labels = label[:subsample]-1
# self.transform = transform
# Each image in the dataset has binary values so multiply by 255 to get
# pixel values
import torch
import torch
imgs = torch.tensor(imgs).float()
imgs = imgs.unsqueeze(1)
from torch.autograd import Variable
imgs = Variable(imgs)
import torch
latent_dist = model.encode(imgs)
_, predict_label = torch.max(latent_dist['disc'][0], dim=1)
confusion = torch.zeros(3, 3)
for i in range(738):
confusion[int(labels[i]),predict_label[i].item()] += 1
for i in range(3):
confusion[i] = confusion[i] / confusion[i].sum()
%matplotlib inline
import matplotlib.pyplot as plt
# confusion = np.array([[0.9,0.1,0.0],[0.0,0.8,0.2],[0.1,0.7,0.2]])
# confusion = torch.tensor(confusion)
from matplotlib import cm
plt.imshow(confusion,interpolation='nearest',cmap=cm.Blues,aspect='auto',vmin=0,vmax=1.0)
value, predict_label = torch.max(confusion, dim=1)
list_price_positoin_address = []
seen = []
for i in predict_label:
if i in seen:
pass
else:
seen.append(i)
address_index = [x for x in range(len(predict_label)) if predict_label[x] == i]
list_price_positoin_address.append([i, address_index])
dict_address = dict(list_price_positoin_address)
print(dict_address)
for keys in dict_address.keys():
if(len(dict_address[keys])>1):
acc = confusion[dict_address[keys],keys.item()]
_, predict_label = torch.min(acc, dim=0)
confusion[dict_address[keys][predict_label.item()],keys.item()] = 0.0
value[dict_address[keys][predict_label.item()]], p = torch.max(confusion[dict_address[keys][predict_label.item()],:],dim=0)
value.mean()
from utils.load_model import load_param
spec,img_size = load_param(path_to_model_folder)
spec
| 0.686265 | 0.947088 |
The Benjamini Yekutieli (BY) procedure is a multiple testing procedure that can be used to control the accumulation in type 1 errors when comparing multiple hypothesis at the same time.
In the tsfresh filtering the BY procedure is used to decide which features to use and which to keep.
The method is based on a line, the so called rejection line, that is compared to the sequence of ordered p-values. In this notebook, we will visualize that rejection line.
```
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from tsfresh.examples.robot_execution_failures import download_robot_execution_failures, load_robot_execution_failures
from tsfresh import defaults, extract_features
from tsfresh.feature_selection.relevance import calculate_relevance_table
from tsfresh.utilities.dataframe_functions import impute
from tsfresh.feature_extraction import ComprehensiveFCParameters
matplotlib.rcParams["figure.figsize"] = [16, 6]
matplotlib.rcParams["font.size"] = 14
matplotlib.style.use('seaborn-darkgrid')
```
## Parameter setting
```
FDR_LEVEL = defaults.FDR_LEVEL
HYPOTHESES_INDEPENDENT = defaults.HYPOTHESES_INDEPENDENT
```
## Load robot data
```
download_robot_execution_failures()
df, y = load_robot_execution_failures()
df.head()
```
## Extract Features
```
X = extract_features(df,
column_id='id', column_sort='time',
default_fc_parameters=ComprehensiveFCParameters(),
impute_function=impute)
# drop constant features
print(X.shape)
X = X.loc[:, X.apply(pd.Series.nunique) != 1]
print(X.shape)
```
## Calculate p-values and Benjamini-Yekutieli Procedure
tsfresh has implemented two different feature significance tests, the Mann-Whitney-U test and the Kolmogorov-Smirnov test. In the following, both of them are being illustrated to show a scientific report of the feature selection process and to give a comparison of the differences of both methods.
### Mann-Whitney-U
Run significance test with Mann-Whitney-U test. Returns the p-values of the features and whether they are rejected or not.
```
df_pvalues_mann = calculate_relevance_table(X, y, fdr_level=FDR_LEVEL, test_for_binary_target_real_feature='mann')
print("# total \t", len(df_pvalues_mann))
print("# relevant \t", (df_pvalues_mann["relevant"] == True).sum())
print("# irrelevant \t", (df_pvalues_mann["relevant"] == False).sum(),
"( # constant", (df_pvalues_mann["type"] == "const").sum(), ")")
df_pvalues_mann.head()
```
### Kolmogorov-Smirnov
Run significance test with Kolmogorov-Smirnov test. Returns the p-values of the features and whether they are rejected or not.
```
df_pvalues_smir = calculate_relevance_table(X, y, fdr_level=FDR_LEVEL, test_for_binary_target_real_feature='smir')
print("# total \t", len(df_pvalues_smir))
print("# relevant \t", (df_pvalues_smir["relevant"] == True).sum())
print("# irrelevant \t", (df_pvalues_smir["relevant"] == False).sum(),
"( # constant", (df_pvalues_smir["type"] == "const").sum(), ")")
df_pvalues_smir.head()
```
## Calculate rejection line
With the rejection line it is determined whether a feature is relevant or irrelevant.
```
def calc_rejection_line(df_pvalues, hypothesis_independent, fdr_level):
m = len(df_pvalues.loc[~(df_pvalues.type == "const")])
K = list(range(1, m + 1))
if hypothesis_independent:
C = [1] * m
else:
C = [sum([1.0 / k for k in K])] * m
return [fdr_level * k / m * 1.0 / c for k, c in zip(K, C)]
```
### Mann-Whitney-U
```
rejection_line_mann = calc_rejection_line(df_pvalues_mann, HYPOTHESES_INDEPENDENT, FDR_LEVEL)
```
### Kolmogorov-Smirnov
```
rejection_line_smir = calc_rejection_line(df_pvalues_smir, HYPOTHESES_INDEPENDENT, FDR_LEVEL)
```
## Plot ordered p-values and rejection line
In the plot, the p-values are ordered from low to high. Constant features (green points) are always irrelevant but are not considered for calculating the rejection line (red line).
For nice plotting, the p-values are divided in the three groups relevant, irrelevant and constant (which are also irrelevant).
### Mann-Whitney-U
```
df_pvalues_mann.index = pd.Series(range(0, len(df_pvalues_mann.index)))
df_pvalues_mann.p_value.where(df_pvalues_mann.relevant)\
.plot(style=".", label="relevant features")
df_pvalues_mann.p_value.where(~df_pvalues_mann.relevant & (df_pvalues_mann.type != "const"))\
.plot(style=".", label="irrelevant features")
df_pvalues_mann.p_value.fillna(1).where(df_pvalues_mann.type == "const")\
.plot(style=".", label="irrelevant (constant) features")
plt.plot(rejection_line_mann, label="rejection line (FDR = " + str(FDR_LEVEL) + ")")
plt.xlabel("Feature #")
plt.ylabel("p-value")
plt.title("Mann-Whitney-U")
plt.legend()
plt.plot()
```
### Kolmogorov-Smirnov
```
df_pvalues_smir.index = pd.Series(range(0, len(df_pvalues_smir.index)))
df_pvalues_smir.p_value.where(df_pvalues_smir.relevant)\
.plot(style=".", label="relevant features")
df_pvalues_smir.p_value.where(~df_pvalues_smir.relevant & (df_pvalues_smir.type != "const"))\
.plot(style=".", label="irrelevant features")
df_pvalues_smir.p_value.fillna(1).where(df_pvalues_smir.type == "const")\
.plot(style=".", label="irrelevant (constant) features")
plt.plot(rejection_line_smir, label="rejection line (FDR = " + str(FDR_LEVEL) + ")")
plt.xlabel("Feature #")
plt.ylabel("p-value")
plt.title("Kolmogorov-Smirnov")
plt.legend()
plt.plot()
```
## Plot zoomed ordered p-values and rejection line
Since the intersection of the ordered p-values and the rejection line is not clearly visible, a zoomed plot is provided.
### Mann-Whitney-U
```
last_rejected_index = (df_pvalues_mann["relevant"] == True).sum() - 1
margin = 20
a = max(last_rejected_index - margin, 0)
b = min(last_rejected_index + margin, len(df_pvalues_mann) - 1)
df_pvalues_mann[a:b].p_value.where(df_pvalues_mann[a:b].relevant)\
.plot(style=".", label="relevant features")
df_pvalues_mann[a:b].p_value.where(~df_pvalues_mann[a:b].relevant)\
.plot(style=".", label="irrelevant features")
plt.plot(np.arange(a, b), rejection_line_mann[a:b], label="rejection line (FDR = " + str(FDR_LEVEL) + ")")
plt.xlabel("Feature #")
plt.ylabel("p-value")
plt.title("Mann-Whitney-U")
plt.legend()
plt.plot()
```
### Kolmogorov-Smirnov
```
last_rejected_index = (df_pvalues_smir["relevant"] == True).sum() - 1
margin = 20
a = max(last_rejected_index - margin, 0)
b = min(last_rejected_index + margin, len(df_pvalues_smir) - 1)
df_pvalues_smir[a:b].p_value.where(df_pvalues_smir[a:b].relevant)\
.plot(style=".", label="relevant features")
df_pvalues_smir[a:b].p_value.where(~df_pvalues_smir[a:b].relevant)\
.plot(style=".", label="irrelevant features")
plt.plot(np.arange(a, b), rejection_line_smir[a:b], label="rejection line (FDR = " + str(FDR_LEVEL) + ")")
plt.xlabel("Feature #")
plt.ylabel("p-value")
plt.title("Kolmogorov-Smirnov")
plt.legend()
plt.plot()
```
|
github_jupyter
|
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from tsfresh.examples.robot_execution_failures import download_robot_execution_failures, load_robot_execution_failures
from tsfresh import defaults, extract_features
from tsfresh.feature_selection.relevance import calculate_relevance_table
from tsfresh.utilities.dataframe_functions import impute
from tsfresh.feature_extraction import ComprehensiveFCParameters
matplotlib.rcParams["figure.figsize"] = [16, 6]
matplotlib.rcParams["font.size"] = 14
matplotlib.style.use('seaborn-darkgrid')
FDR_LEVEL = defaults.FDR_LEVEL
HYPOTHESES_INDEPENDENT = defaults.HYPOTHESES_INDEPENDENT
download_robot_execution_failures()
df, y = load_robot_execution_failures()
df.head()
X = extract_features(df,
column_id='id', column_sort='time',
default_fc_parameters=ComprehensiveFCParameters(),
impute_function=impute)
# drop constant features
print(X.shape)
X = X.loc[:, X.apply(pd.Series.nunique) != 1]
print(X.shape)
df_pvalues_mann = calculate_relevance_table(X, y, fdr_level=FDR_LEVEL, test_for_binary_target_real_feature='mann')
print("# total \t", len(df_pvalues_mann))
print("# relevant \t", (df_pvalues_mann["relevant"] == True).sum())
print("# irrelevant \t", (df_pvalues_mann["relevant"] == False).sum(),
"( # constant", (df_pvalues_mann["type"] == "const").sum(), ")")
df_pvalues_mann.head()
df_pvalues_smir = calculate_relevance_table(X, y, fdr_level=FDR_LEVEL, test_for_binary_target_real_feature='smir')
print("# total \t", len(df_pvalues_smir))
print("# relevant \t", (df_pvalues_smir["relevant"] == True).sum())
print("# irrelevant \t", (df_pvalues_smir["relevant"] == False).sum(),
"( # constant", (df_pvalues_smir["type"] == "const").sum(), ")")
df_pvalues_smir.head()
def calc_rejection_line(df_pvalues, hypothesis_independent, fdr_level):
m = len(df_pvalues.loc[~(df_pvalues.type == "const")])
K = list(range(1, m + 1))
if hypothesis_independent:
C = [1] * m
else:
C = [sum([1.0 / k for k in K])] * m
return [fdr_level * k / m * 1.0 / c for k, c in zip(K, C)]
rejection_line_mann = calc_rejection_line(df_pvalues_mann, HYPOTHESES_INDEPENDENT, FDR_LEVEL)
rejection_line_smir = calc_rejection_line(df_pvalues_smir, HYPOTHESES_INDEPENDENT, FDR_LEVEL)
df_pvalues_mann.index = pd.Series(range(0, len(df_pvalues_mann.index)))
df_pvalues_mann.p_value.where(df_pvalues_mann.relevant)\
.plot(style=".", label="relevant features")
df_pvalues_mann.p_value.where(~df_pvalues_mann.relevant & (df_pvalues_mann.type != "const"))\
.plot(style=".", label="irrelevant features")
df_pvalues_mann.p_value.fillna(1).where(df_pvalues_mann.type == "const")\
.plot(style=".", label="irrelevant (constant) features")
plt.plot(rejection_line_mann, label="rejection line (FDR = " + str(FDR_LEVEL) + ")")
plt.xlabel("Feature #")
plt.ylabel("p-value")
plt.title("Mann-Whitney-U")
plt.legend()
plt.plot()
df_pvalues_smir.index = pd.Series(range(0, len(df_pvalues_smir.index)))
df_pvalues_smir.p_value.where(df_pvalues_smir.relevant)\
.plot(style=".", label="relevant features")
df_pvalues_smir.p_value.where(~df_pvalues_smir.relevant & (df_pvalues_smir.type != "const"))\
.plot(style=".", label="irrelevant features")
df_pvalues_smir.p_value.fillna(1).where(df_pvalues_smir.type == "const")\
.plot(style=".", label="irrelevant (constant) features")
plt.plot(rejection_line_smir, label="rejection line (FDR = " + str(FDR_LEVEL) + ")")
plt.xlabel("Feature #")
plt.ylabel("p-value")
plt.title("Kolmogorov-Smirnov")
plt.legend()
plt.plot()
last_rejected_index = (df_pvalues_mann["relevant"] == True).sum() - 1
margin = 20
a = max(last_rejected_index - margin, 0)
b = min(last_rejected_index + margin, len(df_pvalues_mann) - 1)
df_pvalues_mann[a:b].p_value.where(df_pvalues_mann[a:b].relevant)\
.plot(style=".", label="relevant features")
df_pvalues_mann[a:b].p_value.where(~df_pvalues_mann[a:b].relevant)\
.plot(style=".", label="irrelevant features")
plt.plot(np.arange(a, b), rejection_line_mann[a:b], label="rejection line (FDR = " + str(FDR_LEVEL) + ")")
plt.xlabel("Feature #")
plt.ylabel("p-value")
plt.title("Mann-Whitney-U")
plt.legend()
plt.plot()
last_rejected_index = (df_pvalues_smir["relevant"] == True).sum() - 1
margin = 20
a = max(last_rejected_index - margin, 0)
b = min(last_rejected_index + margin, len(df_pvalues_smir) - 1)
df_pvalues_smir[a:b].p_value.where(df_pvalues_smir[a:b].relevant)\
.plot(style=".", label="relevant features")
df_pvalues_smir[a:b].p_value.where(~df_pvalues_smir[a:b].relevant)\
.plot(style=".", label="irrelevant features")
plt.plot(np.arange(a, b), rejection_line_smir[a:b], label="rejection line (FDR = " + str(FDR_LEVEL) + ")")
plt.xlabel("Feature #")
plt.ylabel("p-value")
plt.title("Kolmogorov-Smirnov")
plt.legend()
plt.plot()
| 0.407569 | 0.977522 |
# Module 13: GUIs and Executables
March 26, 2021
Last time we dived deeper into objects and object-oriented programming (OOP) in Python. We saw how to create own classes, discuss the concept of inheritance, and had a quick look at UML class diagrams. Furthermore, we briefly talked about higher-order functions, which exploit that in Python also functions are objects.
Today we take a look at some additional practical topics: Building graphical user interfaces (GUIs) and executables with Python, which can be very useful for making functionality for third parties, especially when they can/must/should not deal with the code directly.
Next time we will discuss how to implement parallel behaviour in Python.
## Graphical User Interfaces with Tkinter
There is a large number of frameworks and toolkits available for building graphical user interfaces (GUIs) in Python (see e.g. the list at https://wiki.python.org/moin/GuiProgramming). We will focus here on how to create GUIs with Tkinter (see the official reference at https://docs.python.org/3/library/tkinter.html), which is the fairly easy-to-use standard GUI framework included in Python. The following introduction to Tkinter is largely based on the very elaborate “Thinking Tkinter” tutorial available online at http://thinkingtkinter.sourceforge.net/.
The simplest possible Tkinter program is probably the following:
```
import tkinter as tk
root = tk.Tk()
root.mainloop()
```
First the Tkinter library is imported under the name ```tk``` for easier reference. Then an instance of the class ```Tkinter.Tk``` is created, which creates a basic window object. This ```root``` object is the the highest-level GUI component in any Tkinter application, often also referred to as the “toplevel window”. Finally, the ```mainloop``` method of the root object is executed. As the name suggests, it starts the main loop of the application window. This loop runs continuously, waiting for events, handling them when they occur, and only stopping when the window is closed.

Obviously, what needs to happen from here is to add further components to the root window, and implement the functionality to handle events that occur from interaction of a user with the interface. We cannot cover all possibilities in the scope of this lecture, of course, but the following examples should give you an idea how it works.
Two kinds of GUI components are distinguished in Tkinter: containers and widgets. *Widgets* are all the things that are (usually) visible and do things, such as text fields, drop-down lists, buttons, etc. *Containers* are components that, well, contain other components, especially widgets. The most frequently used container class is ```Frame```.
The following example shows how to add a container and a “Say hello!” button to the empty window from above:
```
root = tk.Tk()
container = tk.Frame(root)
container.pack()
hwbutton = tk.Button(container)
hwbutton["text"] = "Say hello!"
hwbutton.pack()
root.mainloop()
```
We create a ```root``` object as before. Then we add a frame ```container``` to the base window. This establishes a logical relationship between the ```container``` and ```root```. Furthermore, the ```pack``` method needs to be called to invoke a “geometry manager” and establish a visual relationship between the object and its parent, to actually make it visible. Similarly, we add a button to the container, set its text and ```pack``` it. Finally, the main loop of the application is started. We get a window with a button that we can click, but nothing else happens, simply because we have not defined yet what should happen (but we will get to that).

When GUI applications get larger, it is usually advisable to follow the object-oriented programming style rather than the procedure-oriented one, and organize the code in classes. For the example from above, that could look as follows:
```
class HelloWorldApp:
def __init__(self,parent):
self.container = tk.Frame(parent)
self.container.pack()
self.hwbutton = tk.Button(self.container)
self.hwbutton["text"] = "Say hello!"
self.hwbutton.pack()
root = tk.Tk()
hwapp = HelloWorldApp(root)
root.mainloop()
```
Adding additional widgets to the application can be done in the same way as adding the button. The following example shows that instead of configuring widgets by using their dictionaries, this can also be done with the ```configure``` method or directly during their instantiation:
```
class HelloWorldApp:
def __init__(self,parent):
self.container = tk.Frame(parent)
self.container.pack()
self.hwbutton = tk.Button(self.container)
self.hwbutton["text"] = "Say hello!"
self.hwbutton.pack()
self.hwtext = tk.Label(self.container)
self.hwtext.configure(text="",background="white")
self.hwtext.pack()
self.gbbutton = tk.Button(self.container, text="Goodbye!", \
background="red")
self.gbbutton.pack()
root = tk.Tk()
hwapp = HelloWorldApp(root)
root.mainloop()
```

Note: If you are on Mac OS X, you might have to run the following code instead to see the button in red:
```
# importing tkmacosx
import tkmacosx as tkmac
class HelloWorldApp:
def __init__(self,parent):
self.container = tk.Frame(parent)
self.container.pack()
self.hwbutton = tk.Button(self.container)
self.hwbutton["text"] = "Say hello!"
self.hwbutton.pack()
self.hwtext = tk.Label(self.container)
self.hwtext.configure(text="",background="white")
self.hwtext.pack()
# using Button from tkmacosx
self.gbbutton = tkmac.Button(self.container, text="Goodbye!", \
background="red")
self.gbbutton.pack()
root = tk.Tk()
hwapp = HelloWorldApp(root)
root.mainloop()
```
The standard way of placing widgets within the container is on top of each other. That is because the default value of the ```side``` parameter of the ```pack``` method is in fact ```top```. By using “bottom”, “left” or “right” alternatively, the orientation can be changed. To avoid unpredictable behavior when e.g. resizing the application window, it is advisable to use the same orientation for all widgets in a container. Change the code above to use ```pack``` always with parameter ```side=”left”``` and see what happens.
If we would like, for example, to place the text field above the two buttons, we can easily do that by using to containers (placed on top of each other), of which one contains the text field and the other the two buttons (next to each other):
```
class HelloWorldApp:
def __init__(self,parent):
self.container1 = tk.Frame(parent)
self.container1.pack()
self.hwtext = tk.Label(self.container1)
self.hwtext.configure(text="",background="white")
self.hwtext.pack(side="left")
self.container2 = tk.Frame(parent)
self.container2.pack()
self.hwbutton = tk.Button(self.container2)
self.hwbutton["text"] = "Say hello!"
self.hwbutton.pack(side="left")
self.gbbutton = tk.Button(self.container2, text="Goodbye!", \
background="red")
self.gbbutton.pack(side="left")
root = tk.Tk()
hwapp = HelloWorldApp(root)
root.mainloop()
```

So far for creating a tk application window and adding and arranging GUI elements. Of course we also want the buttons to actually do something when we click on them. To achieve this, we need to do two things: 1) write event handler routines that do the intended things, and 2) bind these routines to the respective widgets and events.
For example, if we want a click on the “Say hello!” button to cause the text “Hello World!” to appear in the text box, and a click on the “Goodbye!” button to cause the window to close, we could define the following two methods in our application class:
```
def hwbuttonClick(self,event):
self.hwtext.configure(text="Hello World!")
def gbbuttonClick(self,event):
self.parent.destroy()
```
The first method simply changes the text in the text field, the second one calls the ```destroy``` method of the root object and thus closes the window. Furthermore, we need to register the methods at the respective buttons with the ```bind``` method. The first parameter of ```bind``` is the event that we want to handle (a click with the left mouse button is called ```“<Button-1>”```) and the function that is to be called. See the complete example with event binding below:
```
class HelloWorldApp:
def __init__(self,parent):
self.parent = parent
self.container1 = tk.Frame(parent)
self.container1.pack()
self.hwtext = tk.Label(self.container1)
self.hwtext.configure(text="",background="white")
self.hwtext.pack(side="left")
self.container2 = tk.Frame(parent)
self.container2.pack()
self.hwbutton = tk.Button(self.container2)
self.hwbutton["text"] = "Say hello!"
self.hwbutton.pack(side="left")
self.hwbutton.bind("<Button-1>", self.hwbuttonClick)
self.gbbutton = tk.Button(self.container2,
text="Goodbye!", background="red")
self.gbbutton.pack(side="left")
self.gbbutton.bind("<Button-1>", self.gbbuttonClick)
def hwbuttonClick(self,event):
self.hwtext.configure(text="Hello World!")
def gbbuttonClick(self,event):
self.parent.destroy()
root = tk.Tk()
hwapp = HelloWorldApp(root)
root.mainloop()
```

Next to the Frame container, the Canvas container mentioned earlier is often useful to use. Basically, it allows for including all kinds of graphics – self-drawn, generated or imported. Here a small example added to our HelloWorldApp:
```
class HelloWorldApp:
def __init__(self,parent):
self.parent = parent
self.container0 = tk.Canvas(parent, width=100, height=100)
self.container0.create_oval(0,0,100,100,fill="yellow")
self.container0.create_oval(45,45,55,55,fill="red")
self.container0.create_oval(25,25,35,35,fill="blue")
self.container0.create_oval(65,25,75,35,fill="blue")
self.container0.create_arc(25,55,75,80,fill="red",
style="arc",start=180,extent=180)
self.container0.pack()
self.container1 = tk.Frame(parent)
self.container1.pack()
self.hwtext = tk.Label(self.container1)
self.hwtext.configure(text="",background="white")
self.hwtext.pack(side="left")
self.container2 = tk.Frame(parent)
self.container2.pack()
self.hwbutton = tk.Button(self.container2)
self.hwbutton["text"] = "Say hello!"
self.hwbutton.pack(side="left")
self.hwbutton.bind("<Button-1>", self.hwbuttonClick)
self.gbbutton = tk.Button(self.container2,
text="Goodbye!", background="red")
self.gbbutton.pack(side="left")
self.gbbutton.bind("<Button-1>", self.gbbuttonClick)
def hwbuttonClick(self,event):
self.hwtext.configure(text="Hello World!")
def gbbuttonClick(self,event):
self.parent.destroy()
root = tk.Tk()
hwapp = HelloWorldApp(root)
root.mainloop()
```
The code now adds another container to the GUI frame, namely a Canvas container on top of the two previously defined containers. We create it with ```width = 100``` and ```height = 100```, meaning that the canvas will have a size of 100x100 pixels. Onto that canvas, we draw a big yellow circle, one red and two blue circles as well as an arc, which together create a nice smiley face. :-) Note that the coordinate system of the canvas starts in the upper left corner, so x,y=0,0 is the coordinate in the upper left, 100,100 the one in the lower left, etc. The drawing functions expect a “bounding box”, i.e. two coordinate pairs that define the rectangle in which the figure is drawn, thus always 4 numbers as parameters in of the methods creating the shapes.

Finally, note that it also possible to display the name of the app in the window frame, instead of “tk”. All that is required is to add one more line to the main program:
```
root = tk.Tk()
root.title('Hello!')
hwapp = HelloWorldApp(root)
root.mainloop()
```
So much about the basic principles of creating graphical user interfaces in Python with the Tkinter framework. There are a lot more widgets, events and configuration options to be explored, but they follow the same ideas. More advanced information can also be found in the online documentation and tutorials referenced above.
As mentioned in the beginning, several (other) frameworks and toolkits for building GUIs with Python exist. Using toolkits for GUI design often makes it easier to create more “beautiful” interfaces, but on the other hand the code that they generate automatically can be more difficult to understand. If you are interested in more GUI programming, it might nevertheless be worth investigating them further.
## Creating Executables with PyInstaller
In the scientific community, people often like to share their Python programs as plain source code, or as Jupyter notebooks, so that they can easily make changes to the program to adapt it to their own data analysis problems. Also, open-sourcing code of computational experiments is in line with reproducibility standards, etc. In other areas, for example commercial software development, the situation is often different. Customers should use the developed software, but not see the code. They should not need a development environment, but just be able to run a stand-alone version of the program (for historical reasons usually called “executable”, although that is not a very intuitive term for interpreted languages like Python, which you can in principle always directly execute).
The trick thing with executables (in Python, but also with many other languages) is that they are platform-specific. That is, they can be made for either Windows, Mac OS or Linux platforms, so several versions are needed to make all potential users happy. Furthermore, you can usually only build executables for a specific platform on a machine with the same (kind of) operating system, so professional development teams run different (virtual) machines to be able to do that.
As with many things in the Python ecosystem, there are different frameworks available to “freeze” (generate executables for) Python programs. PyInstaller (https://www.pyinstaller.org/) is one of the few of them that supports all major platforms (e.g. Windows, Mac OS and Linux) and most of the recent Python versions, so it is often a good choice, definitely for a course like this one.
My laptop runs on Linux (Ubuntu 16.04) so we will see how it works there. Generally the process on Windows and Mac OS platforms is the same, but in detail it might differ a bit. The PyInstaller manual at https://pyinstaller.readthedocs.io/en/stable/ provides quite elaborate instructions for all platforms (on the process, but also regarding requirements and common errors), so please refer to that when you try to build executables for your Python programs.
So, let’s assume I have already checked that my system meets the listed requirements. If PyInstaller is not installed already, I can simply do that with the command ```pip install pyinstaller``` in the terminal. Then it’s best to run PyInstaller directly from the directory where the (main) Python program file is located, so I go there first:

In principle, all I have to do now is to call PyInstaller with the (main) .py file of the program that I want to turn into an executable, e.g. ```pyinstaller helloworldapp.py.``` Let’s try:
 ... 
A lot of output, but basically it informs us that it has successfully created the executable. It is in the ```dist/``` directory that it has created in the current directory:

The actual executable file is the one just named `helloworldapp`, executable from the command line by entering `./helloworldapp` (might be slightly different on other platforms, e.g. a `helloworld.exe` file on Windows that can be executed through double-clicking on it). Apparently, there are a lot of other files, too. They are there because the whole required runtime environment (Python interpreter, libraries used, …) need to be put into the executable, too, so that it is really stand-alone. You don’t want your users/customers to deal with a development environment, worry about dependencies, etc.
With the option ```--onefile``` PyInstaller will pack everything into one single file. That can be easier for users (a single file might look more trustworthy than a large collection of strange-looking files), but in case the program includes related files like a README or License information, they would have to be distributed separately.

Note that Python will automatically detect that modules that are imported by the (main) .py file from which the executable is generated also need to be included in the executable. In case further files are to be included, such as README or other files with additional information about the program, or sample input data files, PyInstaller needs to be told about them (e.g. via the command line). And of course there are many more options and advanced features that can be relevant especially when creating executables for larger, more complex programs, but the PyInstaller website and community should be able the help with that, too.
## Exercises
Please use Quarterfall to submit and check your answers.
### 1. Number Guessing with GUI (★★★★☆)
In an earlier lecture, we programmed a little command-line number-guessing game. The program would generate a random number between 1 and 10, and then ask the user too guess a number until they hit the right one. When the guess is wrong, it would display a message if it is too large or too small.
Now create a simple GUI for playing the number guessing game. It should look something like:
  
That is, it has a text field (`Label`) to display the different messages. Below this, there are an input field (`Entry`) for the user to enter their guess, a button to check the guess (also triggering the text field to change), and a button to start a new round (generating a new random number and resetting the text and input field).
Optionally, create an executable for this program using pyinstaller.
### 2. QR Code Generator with GUI (★★★★☆)
In a previous exercise you implemented a command-line client for a QR code generation web service. (If you did not complete the exercise, you can use the code from the sample solution as a basis.) Now use the Tkinter framework to build a graphical user interface (GUI) for the functionality. The user should be able to enter a text and the RGB codes for foreground and background color. After a click on a button the QR code is generated and displayed. (The image does not have to be saved as a file, and for simplicity you can always create and process the image in the same format, for example png).
The GUI should look something like (feel free to make a prettier one):

Hint: For displaying the (png) image obtained from the web service on the canvas, the Tkinter.PhotoImage function and the Tkinter.Canvas.create_image method might be useful.
Optionally, create an executable for this program using pyinstaller.
|
github_jupyter
|
import tkinter as tk
root = tk.Tk()
root.mainloop()
root = tk.Tk()
container = tk.Frame(root)
container.pack()
hwbutton = tk.Button(container)
hwbutton["text"] = "Say hello!"
hwbutton.pack()
root.mainloop()
class HelloWorldApp:
def __init__(self,parent):
self.container = tk.Frame(parent)
self.container.pack()
self.hwbutton = tk.Button(self.container)
self.hwbutton["text"] = "Say hello!"
self.hwbutton.pack()
root = tk.Tk()
hwapp = HelloWorldApp(root)
root.mainloop()
class HelloWorldApp:
def __init__(self,parent):
self.container = tk.Frame(parent)
self.container.pack()
self.hwbutton = tk.Button(self.container)
self.hwbutton["text"] = "Say hello!"
self.hwbutton.pack()
self.hwtext = tk.Label(self.container)
self.hwtext.configure(text="",background="white")
self.hwtext.pack()
self.gbbutton = tk.Button(self.container, text="Goodbye!", \
background="red")
self.gbbutton.pack()
root = tk.Tk()
hwapp = HelloWorldApp(root)
root.mainloop()
# importing tkmacosx
import tkmacosx as tkmac
class HelloWorldApp:
def __init__(self,parent):
self.container = tk.Frame(parent)
self.container.pack()
self.hwbutton = tk.Button(self.container)
self.hwbutton["text"] = "Say hello!"
self.hwbutton.pack()
self.hwtext = tk.Label(self.container)
self.hwtext.configure(text="",background="white")
self.hwtext.pack()
# using Button from tkmacosx
self.gbbutton = tkmac.Button(self.container, text="Goodbye!", \
background="red")
self.gbbutton.pack()
root = tk.Tk()
hwapp = HelloWorldApp(root)
root.mainloop()
class HelloWorldApp:
def __init__(self,parent):
self.container1 = tk.Frame(parent)
self.container1.pack()
self.hwtext = tk.Label(self.container1)
self.hwtext.configure(text="",background="white")
self.hwtext.pack(side="left")
self.container2 = tk.Frame(parent)
self.container2.pack()
self.hwbutton = tk.Button(self.container2)
self.hwbutton["text"] = "Say hello!"
self.hwbutton.pack(side="left")
self.gbbutton = tk.Button(self.container2, text="Goodbye!", \
background="red")
self.gbbutton.pack(side="left")
root = tk.Tk()
hwapp = HelloWorldApp(root)
root.mainloop()
def hwbuttonClick(self,event):
self.hwtext.configure(text="Hello World!")
def gbbuttonClick(self,event):
self.parent.destroy()
class HelloWorldApp:
def __init__(self,parent):
self.parent = parent
self.container1 = tk.Frame(parent)
self.container1.pack()
self.hwtext = tk.Label(self.container1)
self.hwtext.configure(text="",background="white")
self.hwtext.pack(side="left")
self.container2 = tk.Frame(parent)
self.container2.pack()
self.hwbutton = tk.Button(self.container2)
self.hwbutton["text"] = "Say hello!"
self.hwbutton.pack(side="left")
self.hwbutton.bind("<Button-1>", self.hwbuttonClick)
self.gbbutton = tk.Button(self.container2,
text="Goodbye!", background="red")
self.gbbutton.pack(side="left")
self.gbbutton.bind("<Button-1>", self.gbbuttonClick)
def hwbuttonClick(self,event):
self.hwtext.configure(text="Hello World!")
def gbbuttonClick(self,event):
self.parent.destroy()
root = tk.Tk()
hwapp = HelloWorldApp(root)
root.mainloop()
class HelloWorldApp:
def __init__(self,parent):
self.parent = parent
self.container0 = tk.Canvas(parent, width=100, height=100)
self.container0.create_oval(0,0,100,100,fill="yellow")
self.container0.create_oval(45,45,55,55,fill="red")
self.container0.create_oval(25,25,35,35,fill="blue")
self.container0.create_oval(65,25,75,35,fill="blue")
self.container0.create_arc(25,55,75,80,fill="red",
style="arc",start=180,extent=180)
self.container0.pack()
self.container1 = tk.Frame(parent)
self.container1.pack()
self.hwtext = tk.Label(self.container1)
self.hwtext.configure(text="",background="white")
self.hwtext.pack(side="left")
self.container2 = tk.Frame(parent)
self.container2.pack()
self.hwbutton = tk.Button(self.container2)
self.hwbutton["text"] = "Say hello!"
self.hwbutton.pack(side="left")
self.hwbutton.bind("<Button-1>", self.hwbuttonClick)
self.gbbutton = tk.Button(self.container2,
text="Goodbye!", background="red")
self.gbbutton.pack(side="left")
self.gbbutton.bind("<Button-1>", self.gbbuttonClick)
def hwbuttonClick(self,event):
self.hwtext.configure(text="Hello World!")
def gbbuttonClick(self,event):
self.parent.destroy()
root = tk.Tk()
hwapp = HelloWorldApp(root)
root.mainloop()
root = tk.Tk()
root.title('Hello!')
hwapp = HelloWorldApp(root)
root.mainloop()
| 0.339828 | 0.961606 |
```
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
#laod data & divide part
DATADIR = "C:/Users/Rdi/Desktop/data"
CATEGORIES = ["trash","bottle","can","paper"]
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path,img))
plt.imshow(img_array)
plt.show()
break
break
#show first image
IMG_SIZE = 100
new_array = cv2.resize(img_array, (IMG_SIZE,IMG_SIZE))
plt.imshow(new_array)
plt.show()
training_data = []
#create & resize image to training data
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
class_num = CATEGORIES.index(category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path,img))
new_array = cv2.resize(img_array, (IMG_SIZE,IMG_SIZE))
training_data.append([new_array, class_num])
create_training_data()
#total data
print(len(training_data))
import random
#random data for training
random.shuffle(training_data)
#random result
for sample in training_data[:10]:
print(sample[1])
X = []
y = []
#convert to numpy array and reshape
for features, label in training_data:
X.append(features)
y.append(label)
# 100*100 image & 3 color = RGB
X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 3)
import pickle
#data -> pickle
pickle_out = open("X.pickle","wb")
pickle.dump(X, pickle_out)
pickle_out.close()
pickle_out = open("y.pickle","wb")
pickle.dump(y, pickle_out)
pickle_out.close()
pickle_in = open("X.pickle", "rb")
X = pickle.load(pickle_in)
#example numpy array
X[1]
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
from tensorflow import keras
import pickle
#load data from pickle
X = pickle.load(open("X.pickle","rb"))
y = pickle.load(open("y.pickle","rb"))
# 255=0xFF=maximum value, divide 255=use 0 to 1 represent the number
X = X/255.0
#neural network
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=(100,100,3))) # 1st convolutional layer
model.add(Conv2D(64, (3, 3), activation='relu')) # 2nd convolutional layer
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu')) # hidden layer
model.add(Dropout(0.5))
model.add(Dense(4, activation='softmax')) #output layer
#model compile
model.compile(loss='sparse_categorical_crossentropy',optimizer='Adam',metrics=['accuracy'])
#model fit & epochs for 100times
model.fit(X, y, epochs=100)
#save model
model.save('trash.model')
#print stat
test_loss, test_acc = model.evaluate(X, y, verbose=2)
print('\nTest accuracy:', test_acc)
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
#laod data & divide part
DATADIR = "C:/Users/Rdi/Desktop/data"
CATEGORIES = ["trash","bottle","can","paper"]
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path,img))
plt.imshow(img_array)
plt.show()
break
break
#show first image
IMG_SIZE = 100
new_array = cv2.resize(img_array, (IMG_SIZE,IMG_SIZE))
plt.imshow(new_array)
plt.show()
training_data = []
#create & resize image to training data
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
class_num = CATEGORIES.index(category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path,img))
new_array = cv2.resize(img_array, (IMG_SIZE,IMG_SIZE))
training_data.append([new_array, class_num])
create_training_data()
#total data
print(len(training_data))
import random
#random data for training
random.shuffle(training_data)
#random result
for sample in training_data[:10]:
print(sample[1])
X = []
y = []
#convert to numpy array and reshape
for features, label in training_data:
X.append(features)
y.append(label)
# 100*100 image & 3 color = RGB
X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 3)
import pickle
#data -> pickle
pickle_out = open("X.pickle","wb")
pickle.dump(X, pickle_out)
pickle_out.close()
pickle_out = open("y.pickle","wb")
pickle.dump(y, pickle_out)
pickle_out.close()
pickle_in = open("X.pickle", "rb")
X = pickle.load(pickle_in)
#example numpy array
X[1]
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
from tensorflow import keras
import pickle
#load data from pickle
X = pickle.load(open("X.pickle","rb"))
y = pickle.load(open("y.pickle","rb"))
# 255=0xFF=maximum value, divide 255=use 0 to 1 represent the number
X = X/255.0
#neural network
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=(100,100,3))) # 1st convolutional layer
model.add(Conv2D(64, (3, 3), activation='relu')) # 2nd convolutional layer
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu')) # hidden layer
model.add(Dropout(0.5))
model.add(Dense(4, activation='softmax')) #output layer
#model compile
model.compile(loss='sparse_categorical_crossentropy',optimizer='Adam',metrics=['accuracy'])
#model fit & epochs for 100times
model.fit(X, y, epochs=100)
#save model
model.save('trash.model')
#print stat
test_loss, test_acc = model.evaluate(X, y, verbose=2)
print('\nTest accuracy:', test_acc)
| 0.438304 | 0.383382 |
```
import os, sys
import pandas as pd
import numpy as np
import glob
import pyarrow as pa
import pyarrow.parquet as pq
import pyarrow.dataset as ds
import statsmodels.formula.api as smf
import statsmodels.api as sm
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
import itertools
from tqdm.auto import tqdm
from multiprocessing import Pool
from stargazer.stargazer import Stargazer
from stargazer_mod import Stargazer as S_alt
from adjustText import adjust_text
import ipywidgets as widgets
import itertools
import copy
sns.set_context("paper", font_scale=1.7)
```
# Load Data
```
# Data for just SPY
daily_df = pd.read_feather('../../data/proc/SPY_daily.feather')
# Clean?
daily_df = daily_df.sort_values(by = ['datetime'])
# Check
daily_df.assign(cumret = daily_df['log_return'].cumsum()).plot('datetime', 'cumret')
```
# Analyze
```
## Add regression variables
# Rename
daily_df['SJ_vol'] = daily_df['real_vol_pos'] - daily_df['real_vol_neg']
# Differences
daily_df = daily_df.sort_values(by = 'datetime')
for col in ['real_var', 'real_var_pos', 'real_var_neg', 'SJ', 'real_vol', 'real_vol_pos', 'real_vol_neg', 'SJ_vol']:
daily_df[f'{col}_diff'] = daily_df[f'{col}'].diff(1)
```
## Daily Regressions (Main)
```
## Regression functions
def add_smooth_return(daily_df, window):
daily_df['log_return_rolling'] = daily_df.groupby(['permno'])['log_return'].transform(
lambda x: x.rolling(window, min_periods = window).mean())
daily_df['log_return_rolling_lead'] = daily_df.groupby(['permno'])['log_return_rolling'].transform(
lambda x: x.shift(-window))
return daily_df
def add_smooth_regressor(daily_df, window, regressor):
daily_df[f'{regressor}_rolling_mean'] = daily_df.groupby(['permno'])[regressor].transform(
lambda x: x.rolling(window, min_periods = window).mean())
return daily_df
def run_regression(daily_df, regressors, window, sample_period):
# Define sample for regression and add necessary RHS vars
reg_df = add_smooth_return(daily_df.copy(), window)
for regressor in regressors:
reg_df = add_smooth_regressor(reg_df, window, regressor)
# Filter by sample period
reg_df = reg_df.loc[reg_df['datetime'].between(*sample_period)]
# Fit
T = len(reg_df['log_return_rolling_lead'].dropna())
fit = smf.ols('log_return_rolling_lead ~ ' + ' + '.join(regressors), reg_df).fit(
cov_type="HAC", cov_kwds={"maxlags": int(0.75 * T ** (1 / 3))}
)
# Fit - alt for t-stats
T = len(reg_df[f'{regressors[0]}_rolling_mean'].dropna())
fit_alt = smf.ols('log_return_lead ~ ' + ' + '.join([x + '_rolling_mean' for x in regressors]), reg_df).fit(
cov_type="HAC", cov_kwds={"maxlags": int(0.75 * T ** (1 / 3))}
)
# Save results
result = [sample_period, regressors, window, fit.params, fit.tvalues, fit_alt.tvalues, fit]
return result
# Get regression results
date_param_list = [('2002', '2021'), ('2002', '2020'), ('2020', '2021')]
for date_param in date_param_list:
print(r'(' + date_param[0] + '-' + str(int(date_param[1])-1) + ')')
results_list = []
for window in (1,5,21):
for regressors in [['real_vol'], ['real_vol_pos', 'real_vol_neg']]:
result = run_regression(daily_df, regressors, window, date_param)
results_list.append(result)
## Format finished results
results_df = pd.DataFrame(results_list, columns = ['sample_period', 'regressors', 'window', 'beta', 't_stats', 't_stats_alt', 'fit'])
# Get fits and replace t-stats
fit_list_alt = []
for i in range(len(results_df)):
fit_alt = copy.deepcopy(results_df.iloc[i,:]['fit'])
t_stats = results_df.iloc[i]['t_stats_alt'].copy()
t_stats.index = fit_alt.tvalues.index
fit_alt.tvalues = t_stats
fit_list_alt.append(fit_alt)
sg = S_alt(fit_list_alt)
sg.cov_map = {'real_vol': r'$\sqrt{RV}$', 'real_vol_pos': r'$\sqrt{RV^+}$', 'real_vol_neg': r'$\sqrt{RV^-}$'}
sg.show_stars = False
sg.show_tstats = True
sg.show_residual_std_err = False
sg.show_f_statistic = False
sg.show_adj_r2 = False
print(sg.render_latex())
print(' '*100)
```
### With SJ vol (scratch tables)
```
# Get regression results
date_param_list = [('2002', '2021'), ('2002', '2020'), ('2020', '2021')]
for date_param in date_param_list:
print(r'\begin{landscape}')
print(r'\subsubsection{' + date_param[0] + '-' + str(int(date_param[1])-1) + '}')
results_list = []
for window in (1,5,21):
for regressors in [['real_vol'], ['real_vol_pos', 'real_vol_neg'], ['SJ_vol']]:
result = run_regression(daily_df, regressors, window, date_param)
results_list.append(result)
## Format finished results
results_df = pd.DataFrame(results_list, columns = ['sample_period', 'regressors', 'window', 'beta', 't_stats', 't_stats_alt', 'fit'])
# Get fits and replace t-stats
fit_list_alt = []
for i in range(len(results_df)):
fit_alt = copy.deepcopy(results_df.iloc[i,:]['fit'])
fit_alt.tvalues = results_df.iloc[i]['t_stats_alt']
fit_alt.tvalues.index = results_df.iloc[i]['t_stats'].index
fit_list_alt.append(fit_alt)
sg = S_alt(fit_list_alt)
sg.cov_map = {'real_vol': r'$\sqrt{RV}$', 'real_vol_pos': r'$\sqrt{RV^+}$',
'real_vol_neg': r'$\sqrt{RV^-}$', 'SJ_vol': r'$\sqrt{RV^+} - \sqrt{RV^-}$'}
sg.show_stars = False
sg.show_tstats = True
sg.show_residual_std_err = False
sg.show_f_statistic = False
sg.show_adj_r2 = False
sg.column_labels = ['Daily'] + ['Weekly'] + ['Monthly']
sg.column_separators = [3,3,3]
print(sg.render_latex())
print(' '*100)
print(r'\end{landscape}')
```
|
github_jupyter
|
import os, sys
import pandas as pd
import numpy as np
import glob
import pyarrow as pa
import pyarrow.parquet as pq
import pyarrow.dataset as ds
import statsmodels.formula.api as smf
import statsmodels.api as sm
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
import itertools
from tqdm.auto import tqdm
from multiprocessing import Pool
from stargazer.stargazer import Stargazer
from stargazer_mod import Stargazer as S_alt
from adjustText import adjust_text
import ipywidgets as widgets
import itertools
import copy
sns.set_context("paper", font_scale=1.7)
# Data for just SPY
daily_df = pd.read_feather('../../data/proc/SPY_daily.feather')
# Clean?
daily_df = daily_df.sort_values(by = ['datetime'])
# Check
daily_df.assign(cumret = daily_df['log_return'].cumsum()).plot('datetime', 'cumret')
## Add regression variables
# Rename
daily_df['SJ_vol'] = daily_df['real_vol_pos'] - daily_df['real_vol_neg']
# Differences
daily_df = daily_df.sort_values(by = 'datetime')
for col in ['real_var', 'real_var_pos', 'real_var_neg', 'SJ', 'real_vol', 'real_vol_pos', 'real_vol_neg', 'SJ_vol']:
daily_df[f'{col}_diff'] = daily_df[f'{col}'].diff(1)
## Regression functions
def add_smooth_return(daily_df, window):
daily_df['log_return_rolling'] = daily_df.groupby(['permno'])['log_return'].transform(
lambda x: x.rolling(window, min_periods = window).mean())
daily_df['log_return_rolling_lead'] = daily_df.groupby(['permno'])['log_return_rolling'].transform(
lambda x: x.shift(-window))
return daily_df
def add_smooth_regressor(daily_df, window, regressor):
daily_df[f'{regressor}_rolling_mean'] = daily_df.groupby(['permno'])[regressor].transform(
lambda x: x.rolling(window, min_periods = window).mean())
return daily_df
def run_regression(daily_df, regressors, window, sample_period):
# Define sample for regression and add necessary RHS vars
reg_df = add_smooth_return(daily_df.copy(), window)
for regressor in regressors:
reg_df = add_smooth_regressor(reg_df, window, regressor)
# Filter by sample period
reg_df = reg_df.loc[reg_df['datetime'].between(*sample_period)]
# Fit
T = len(reg_df['log_return_rolling_lead'].dropna())
fit = smf.ols('log_return_rolling_lead ~ ' + ' + '.join(regressors), reg_df).fit(
cov_type="HAC", cov_kwds={"maxlags": int(0.75 * T ** (1 / 3))}
)
# Fit - alt for t-stats
T = len(reg_df[f'{regressors[0]}_rolling_mean'].dropna())
fit_alt = smf.ols('log_return_lead ~ ' + ' + '.join([x + '_rolling_mean' for x in regressors]), reg_df).fit(
cov_type="HAC", cov_kwds={"maxlags": int(0.75 * T ** (1 / 3))}
)
# Save results
result = [sample_period, regressors, window, fit.params, fit.tvalues, fit_alt.tvalues, fit]
return result
# Get regression results
date_param_list = [('2002', '2021'), ('2002', '2020'), ('2020', '2021')]
for date_param in date_param_list:
print(r'(' + date_param[0] + '-' + str(int(date_param[1])-1) + ')')
results_list = []
for window in (1,5,21):
for regressors in [['real_vol'], ['real_vol_pos', 'real_vol_neg']]:
result = run_regression(daily_df, regressors, window, date_param)
results_list.append(result)
## Format finished results
results_df = pd.DataFrame(results_list, columns = ['sample_period', 'regressors', 'window', 'beta', 't_stats', 't_stats_alt', 'fit'])
# Get fits and replace t-stats
fit_list_alt = []
for i in range(len(results_df)):
fit_alt = copy.deepcopy(results_df.iloc[i,:]['fit'])
t_stats = results_df.iloc[i]['t_stats_alt'].copy()
t_stats.index = fit_alt.tvalues.index
fit_alt.tvalues = t_stats
fit_list_alt.append(fit_alt)
sg = S_alt(fit_list_alt)
sg.cov_map = {'real_vol': r'$\sqrt{RV}$', 'real_vol_pos': r'$\sqrt{RV^+}$', 'real_vol_neg': r'$\sqrt{RV^-}$'}
sg.show_stars = False
sg.show_tstats = True
sg.show_residual_std_err = False
sg.show_f_statistic = False
sg.show_adj_r2 = False
print(sg.render_latex())
print(' '*100)
# Get regression results
date_param_list = [('2002', '2021'), ('2002', '2020'), ('2020', '2021')]
for date_param in date_param_list:
print(r'\begin{landscape}')
print(r'\subsubsection{' + date_param[0] + '-' + str(int(date_param[1])-1) + '}')
results_list = []
for window in (1,5,21):
for regressors in [['real_vol'], ['real_vol_pos', 'real_vol_neg'], ['SJ_vol']]:
result = run_regression(daily_df, regressors, window, date_param)
results_list.append(result)
## Format finished results
results_df = pd.DataFrame(results_list, columns = ['sample_period', 'regressors', 'window', 'beta', 't_stats', 't_stats_alt', 'fit'])
# Get fits and replace t-stats
fit_list_alt = []
for i in range(len(results_df)):
fit_alt = copy.deepcopy(results_df.iloc[i,:]['fit'])
fit_alt.tvalues = results_df.iloc[i]['t_stats_alt']
fit_alt.tvalues.index = results_df.iloc[i]['t_stats'].index
fit_list_alt.append(fit_alt)
sg = S_alt(fit_list_alt)
sg.cov_map = {'real_vol': r'$\sqrt{RV}$', 'real_vol_pos': r'$\sqrt{RV^+}$',
'real_vol_neg': r'$\sqrt{RV^-}$', 'SJ_vol': r'$\sqrt{RV^+} - \sqrt{RV^-}$'}
sg.show_stars = False
sg.show_tstats = True
sg.show_residual_std_err = False
sg.show_f_statistic = False
sg.show_adj_r2 = False
sg.column_labels = ['Daily'] + ['Weekly'] + ['Monthly']
sg.column_separators = [3,3,3]
print(sg.render_latex())
print(' '*100)
print(r'\end{landscape}')
| 0.311951 | 0.650994 |
# Exercise 1: Basic Annotations
This exercise provides an introduction to the basic ruta types and how simple annotations are created.
#### Defining the document text
First, we define some input text for the following examples. In UIMA, this document text is also called **S**ubject **of** **A**nalysis (**SOFA**).
```
%%documentText
The dog barked at the cat.
Dogs, cats and mice are mammals.
Zander and tuna are fishes.
```
### Types
A central component in UIMA is the `TypeSystem`. A TypeSystem contains a list of `Types`. Each Type has a distinct name and optionally a list of features. This determines how the information is stored.
#### Ruta Basic Types
Ruta provides some initial annotations that are automatically generated for each document. Important Ruta Basic Types are:
* `ANY`: Any single Token, e.g. “hello” or “123”
* `W`: Any word, e.g. “hello”
* `NUM`: Any number, e.g. “123”
* `SPECIAL`: Any special character, e.g. “-”
* `COMMA` (,) `COLON` (:) `SEMICOLON` (;) `PERIOD` (.) `EXCLAMATION` (!) `QUESTION` (?)
#### Declaring a new Type
We can also declare new types using the `DECLARE` command. In the following, we define a new type `Animal`. With that, we can create annotations that will contain information about mentionings of animals in the text.
```
DECLARE Animal;
// Highlight Animal annotation in the following output
COLOR(Animal, "lightgreen");
```
### Creating annotations
In the following, we present different options that can be used to create new annotations of type Animal.
#### Option 1: Direct string matching
The following line creates a new annotation of type `Animal` on all occurrences of "dog" in the document. Please note that this literal string matching may be inefficient if it is used repeatedly and for large documents.
```
"dog" {-> Animal};
```
#### Option 2: General approach using a condition-action block
While the simple string matching in option 1 may be useful for quickly annotating simple keywords, Ruta provides a more powerful logic for complex annotations. The following line illustrates the most basic form of a condition-action.
```
W{REGEXP("Dogs|cats") -> Animal};
```
**Explanation**: The rule starts with the Ruta basic type `W` that iterates over all words in the document. For each word, it is checked whether the condition `REGEXP("Dogs|cats")` is satisfied. This condition is a regular expression that matches if the word is "Dogs" or "cats" (case sensitive). If the condition is satisfied, then the action is executed. In that case, the action is to create a new annotation of type `Animal`. You will see more complex conditions and actions in Exercise 4.
*Hint*: Please note that "dog" is still highlighted as the annotations are kept across cells.
An example for a slightly different action block is given below. It matches on any word (W) and references it with the label "w". Then it checks whether its covered text (ct) is "mice" in the condition, and if yes, then it creates a new Animal annotation.
```
w:W{w.ct == "mice" -> Animal};
```
#### Option 3: Using a wordlist
If many terms should be annotated, it is useful to place the words in a wordlist. The following snippet shows how we can annotate mentions of fishes by using a wordlist `fishes.txt`, a simple external dictionary file.
```
WORDLIST fishList = "resources/fishes.txt";
// Perform lookup for fishes and annotate them with the type Animal
// The third parameter specifies whether the lookup should be case insensitive.
MARKFAST(Animal, fishList, true);
```
|
github_jupyter
|
%%documentText
The dog barked at the cat.
Dogs, cats and mice are mammals.
Zander and tuna are fishes.
DECLARE Animal;
// Highlight Animal annotation in the following output
COLOR(Animal, "lightgreen");
"dog" {-> Animal};
W{REGEXP("Dogs|cats") -> Animal};
w:W{w.ct == "mice" -> Animal};
WORDLIST fishList = "resources/fishes.txt";
// Perform lookup for fishes and annotate them with the type Animal
// The third parameter specifies whether the lookup should be case insensitive.
MARKFAST(Animal, fishList, true);
| 0.318061 | 0.989119 |
#### Erin Orbits, Hmk 3
For this homework, your job is to assist in determining how to do end-of-day adjustments in the number of bikes at stations so that all stations will have enough bikes for the next day of operation (as estimated by the weekday average for the station for the year).
Your assistance will help in constructing a plan for each day of the week that specifies how many bikes should be moved from each station and how many bikes must be delievered to each station.
Your assignment is to construct plots of the differences between 'from' and 'to' counts for each station by day of the week. Do this as a set of 7 subplots. You should use at least one function to construct your plots.
```
import pandas as pd
import numpy as np
import scipy
import seaborn
import matplotlib.pyplot as plt
# The following ensures that the plots are in the notebook
%matplotlib inline
```
**Imported the Data**
```
df_original = pd.read_csv("2015_trip_data.csv")
```
#### Created four new columns with the days of the week and dates extracted from the starttime and stoptime for each row.
Note: Not sure whether I should delete the rows with "Pronto shop" from the "original data frame," but given the goal and the fact that there were only a dozen or so mentions in the data, I decided to go ahead and delete the rows with trips to and from the "Pronto shop."
```
df_original = df_original[df_original.from_station_name != "Pronto shop"]
df_original = df_original[df_original.to_station_name != "Pronto shop"]
```
Add "day of week" and "date" columns to the original data frame to correspond with the "To" and "From" groups using the days of the week and the dates from the starttime and stoptime columns.
```
df_original["from_day_of_week"] = pd.DatetimeIndex(df_original['starttime']).dayofweek
df_original["to_day_of_week"] = pd.DatetimeIndex(df_original['stoptime']).dayofweek
df_original["from_date"] = pd.DatetimeIndex(df_original['starttime']).date
df_original["to_date"] = pd.DatetimeIndex(df_original['stoptime']).date
```
Check that the four new columns were added to the data frame.
```
df_original.columns
```
#### Counted the number of bikes going to and from each station on each day of the week, and counted the number of days in order to take the average.
With groupby you can group by multiple values and use aggregation functions like mean, median, sum, minimum, maximum, standard deviation, or count: <br>
> `<data object>.groupby(<grouping values>).<aggregate>()`
```
# Create a groupby variable that groups stations by day of week
# and counts the days of the week
toStation_day_count = df_original.groupby(["to_station_id", "to_day_of_week"])["to_day_of_week"].count()
toStation_day_count.head(3)
fromStation_day_count = df_original.groupby(["from_station_id", "from_day_of_week"])["from_day_of_week"].count()
fromStation_day_count.head(3)
to_day_count = df_original.groupby(['to_day_of_week','to_date']).size().reset_index()
to_day_count = to_day_count.groupby(['to_day_of_week'])['to_date'].count()
from_day_count = df_original.groupby(['from_day_of_week','from_date']).size().reset_index()
from_day_count = from_day_count.groupby(['from_day_of_week'])['from_date'].count()
to_avg_bikes = toStation_day_count / to_day_count
from_avg_bikes = fromStation_day_count / from_day_count
deltaBikesPerStation = to_avg_bikes.unstack() - from_avg_bikes.unstack()
deltaBikesPerStation.head(4)
```
**Plotted the average difference bike counts per station per day of the week**
```
def plot_bar1(df, column, opts):
"""
Does a bar plot for a single column.
:param pd.DataFrame df:
:param str column: name of the column to plot
:param dict opts: key is plot attribute
"""
n_groups = len(df.index)
index = np.arange(n_groups) # The "raw" x-axis of the bar plot
rects1 = plt.bar(index, df[column])
plt.subplots_adjust(left=None, bottom=None, right=None,
top=None, wspace=None, hspace=0.4)
if 'xlabel' in opts:
plt.xlabel(opts['xlabel'])
if 'ylabel' in opts:
plt.ylabel(opts['ylabel'])
if ('xticks' and 'xticks') in opts:
plt.xticks(index, df.index) # Convert "raw" x-axis into labels
_, labels = plt.xticks() # Get the new labels of the plot
plt.setp(labels, rotation=90) # Rotate labels to make them readable
else:
labels = ['' for x in df.index]
plt.xticks(index, labels)
if 'ylim' in opts:
plt.ylim(opts['ylim'])
if 'title' in opts:
plt.title(opts['title'])
def plotNbar(df, columns, opts):
"""
Does a bar plot for a single column.
:param pd.DataFrame df:
:param list-of-str columns: names of the column to plot
:param dict opts: key is plot attribute
"""
num_columns = len(columns)
days = ["Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday", "Sunday"]
opts['title'] = days
local_opts = dict(opts) # Make a deep copy of the object
local_opts['title'] = days
idx = 0
for column in columns:
idx += 1
local_opts['xticks'] = False
local_opts['xlabel'] = ''
local_opts['title'] = opts['title'][column]
if idx == num_columns:
local_opts['xticks'] = True
local_opts['xlabel'] = opts['xlabel']
plt.subplot(num_columns, 1, idx)
# calls plot_bar1 to plot each column
plot_bar1(df, column, local_opts)
seaborn.axes_style()
current_palette = seaborn.color_palette()
seaborn.palplot(seaborn.color_palette("GnBu_d"))
with seaborn.color_palette("RdBu_r", 7):
seaborn.set_style("whitegrid", {"axes.facecolor": ".9"})
fig = plt.figure(figsize=(20, 40)) # Controls global properties of the bar plot
opts = {'xlabel': 'Stations',
'ylabel': 'Average Daily Bike Counts', 'ylim': [-12, 10]}
plotNbar(deltaBikesPerStation, [0, 1, 2, 3, 4, 5, 6], opts)
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import scipy
import seaborn
import matplotlib.pyplot as plt
# The following ensures that the plots are in the notebook
%matplotlib inline
df_original = pd.read_csv("2015_trip_data.csv")
df_original = df_original[df_original.from_station_name != "Pronto shop"]
df_original = df_original[df_original.to_station_name != "Pronto shop"]
df_original["from_day_of_week"] = pd.DatetimeIndex(df_original['starttime']).dayofweek
df_original["to_day_of_week"] = pd.DatetimeIndex(df_original['stoptime']).dayofweek
df_original["from_date"] = pd.DatetimeIndex(df_original['starttime']).date
df_original["to_date"] = pd.DatetimeIndex(df_original['stoptime']).date
df_original.columns
# Create a groupby variable that groups stations by day of week
# and counts the days of the week
toStation_day_count = df_original.groupby(["to_station_id", "to_day_of_week"])["to_day_of_week"].count()
toStation_day_count.head(3)
fromStation_day_count = df_original.groupby(["from_station_id", "from_day_of_week"])["from_day_of_week"].count()
fromStation_day_count.head(3)
to_day_count = df_original.groupby(['to_day_of_week','to_date']).size().reset_index()
to_day_count = to_day_count.groupby(['to_day_of_week'])['to_date'].count()
from_day_count = df_original.groupby(['from_day_of_week','from_date']).size().reset_index()
from_day_count = from_day_count.groupby(['from_day_of_week'])['from_date'].count()
to_avg_bikes = toStation_day_count / to_day_count
from_avg_bikes = fromStation_day_count / from_day_count
deltaBikesPerStation = to_avg_bikes.unstack() - from_avg_bikes.unstack()
deltaBikesPerStation.head(4)
def plot_bar1(df, column, opts):
"""
Does a bar plot for a single column.
:param pd.DataFrame df:
:param str column: name of the column to plot
:param dict opts: key is plot attribute
"""
n_groups = len(df.index)
index = np.arange(n_groups) # The "raw" x-axis of the bar plot
rects1 = plt.bar(index, df[column])
plt.subplots_adjust(left=None, bottom=None, right=None,
top=None, wspace=None, hspace=0.4)
if 'xlabel' in opts:
plt.xlabel(opts['xlabel'])
if 'ylabel' in opts:
plt.ylabel(opts['ylabel'])
if ('xticks' and 'xticks') in opts:
plt.xticks(index, df.index) # Convert "raw" x-axis into labels
_, labels = plt.xticks() # Get the new labels of the plot
plt.setp(labels, rotation=90) # Rotate labels to make them readable
else:
labels = ['' for x in df.index]
plt.xticks(index, labels)
if 'ylim' in opts:
plt.ylim(opts['ylim'])
if 'title' in opts:
plt.title(opts['title'])
def plotNbar(df, columns, opts):
"""
Does a bar plot for a single column.
:param pd.DataFrame df:
:param list-of-str columns: names of the column to plot
:param dict opts: key is plot attribute
"""
num_columns = len(columns)
days = ["Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday", "Sunday"]
opts['title'] = days
local_opts = dict(opts) # Make a deep copy of the object
local_opts['title'] = days
idx = 0
for column in columns:
idx += 1
local_opts['xticks'] = False
local_opts['xlabel'] = ''
local_opts['title'] = opts['title'][column]
if idx == num_columns:
local_opts['xticks'] = True
local_opts['xlabel'] = opts['xlabel']
plt.subplot(num_columns, 1, idx)
# calls plot_bar1 to plot each column
plot_bar1(df, column, local_opts)
seaborn.axes_style()
current_palette = seaborn.color_palette()
seaborn.palplot(seaborn.color_palette("GnBu_d"))
with seaborn.color_palette("RdBu_r", 7):
seaborn.set_style("whitegrid", {"axes.facecolor": ".9"})
fig = plt.figure(figsize=(20, 40)) # Controls global properties of the bar plot
opts = {'xlabel': 'Stations',
'ylabel': 'Average Daily Bike Counts', 'ylim': [-12, 10]}
plotNbar(deltaBikesPerStation, [0, 1, 2, 3, 4, 5, 6], opts)
| 0.603815 | 0.967808 |
```
'''
Adaptation of the Hilbert CNN at https://openreview.net/forum?id=HJvvRoe0W
Works by encoding each nucleotide as a one-hot vector,
then fits it to a image-like grid using a hilbert curve
such that each 'pixel' is a 1mer of length 4
'''
from keras.layers import Conv2D, BatchNormalization, AveragePooling2D, Dense, Dropout, SeparableConv2D, Add
from keras.layers import Activation, Input, Concatenate, Flatten, MaxPooling2D, Reshape, GaussianNoise
from keras.models import Model, load_model
from keras.optimizers import SGD
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, CSVLogger, LearningRateScheduler
import image
from keras import backend as K
import numpy as np
start_target_size = (32, 32, 4)
batch_size = 16
train_path = 'D:/Projects/iSynPro/iSynPro/HilbertCNN/train_val_npys/6count/1mer/train'
test_path = 'D:/Projects/iSynPro/iSynPro/HilbertCNN/train_val_npys/6count/1mer/test'
# define generators
train_datagen = image.ImageDataGenerator()
test_datagen = image.ImageDataGenerator()
train_generator = train_datagen.flow_np_from_directory(train_path,
target_size= start_target_size,
batch_size=batch_size,
class_mode='binary',
seed=42)
validation_generator = test_datagen.flow_np_from_directory(test_path,
target_size= start_target_size,
batch_size=batch_size,
class_mode='binary',
seed=42)
K.clear_session()
del model
# original implementation of Hilbert-CNN
# https://openreview.net/forum?id=HJvvRoe0W
def computation_block(in_layer, n_filters, filtersize_a, filtersize_b, filtersize_c, filtersize_d):
# residual 1
p1 = Conv2D(n_filters, (filtersize_a, filtersize_a), strides=(1, 1), padding='same')(in_layer)
p1 = BatchNormalization()(p1)
p1 = Activation('relu')(p1)
p1 = Conv2D(n_filters, (filtersize_b, filtersize_b), strides=(1, 1), padding='same')(p1)
p1 = BatchNormalization()(p1)
# residual 2
p2 = Conv2D(n_filters, (filtersize_c, filtersize_c), strides=(1, 1), padding='same')(in_layer)
p2 = BatchNormalization()(p2)
p2 = Activation('relu')(p2)
p2 = Conv2D(n_filters, (filtersize_d, filtersize_d), strides=(1, 1), padding='same')(p2)
p2 = BatchNormalization()(p2)
x = Concatenate()([in_layer, p1, p2])
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
# stem
inputs = Input(shape=[32, 32, 4])
x = GaussianNoise(0.2)(inputs)
x = Conv2D(64, (7, 7), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 8, 4, 4, 3)
x = computation_block(x, 4, 3, 3, 3, 3)
# mid-stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 2, 4, 4, 3)
x = computation_block(x, 4, 2, 2, 2, 2)
x = computation_block(x, 4, 3, 2, 2, 3)
# exit stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x) #we omit this last avgpool to retain dimensionality
x = Flatten()(x)
# FC layers
x = Dense(1024, activation='relu')(x)
x = Dense(1024, activation='relu')(x)
#x = Dropout(0.2)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer= SGD(lr= 0.01, momentum=0.9),
loss= 'binary_crossentropy',
metrics=[ 'binary_accuracy'])
model.summary()
train_size = 17588
test_size = 1955
learning_rate = 1e-3
learning_decay = 0.94
batch_size = 16
#our callbacks
lr_descent = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=5,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=1,
min_lr=0)
root_path = 'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/vanilla_hilbertcnn'
save_model = ModelCheckpoint(root_path + '/weights-{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_path = '{}/training_history.csv'.format(root_path)
csv_logger = CSVLogger(csv_path, separator=',', append=False)
def incep_resnet_schedule(epoch):
if epoch % 2 == 0:
return learning_rate*(learning_decay**(epoch))
else:
return learning_rate*(learning_decay**((epoch)-1.0))
lr_scheduler = LearningRateScheduler(incep_resnet_schedule)
#tracking = keras.callbacks.ProgbarLogger(count_mode='samples')
#train the model
model.fit_generator(train_generator,
steps_per_epoch= train_size // batch_size,
epochs=30,
validation_data= validation_generator,
validation_steps= test_size // batch_size,
verbose=2,
callbacks = [save_model, csv_logger, lr_scheduler])
# Modified version of Hilbert-CNN
# batchnorm after activation
def computation_block(in_layer, n_filters, filtersize_a, filtersize_b, filtersize_c, filtersize_d):
# residual 1
p1 = Conv2D(n_filters, (filtersize_a, filtersize_a), strides=(1, 1), padding='same', activation='relu')(in_layer)
p1 = BatchNormalization()(p1)
p1 = Conv2D(n_filters, (filtersize_b, filtersize_b), strides=(1, 1), padding='same', activation='relu')(p1)
p1 = BatchNormalization()(p1)
# residual 2
p2 = Conv2D(n_filters, (filtersize_c, filtersize_c), strides=(1, 1), padding='same', activation='relu')(in_layer)
p2 = BatchNormalization()(p2)
p2 = Conv2D(n_filters, (filtersize_d, filtersize_d), strides=(1, 1), padding='same', activation='relu')(p2)
p2 = BatchNormalization()(p2)
x = Concatenate()([in_layer, p1, p2])
#x = Activation('relu')(x)
#x = BatchNormalization()(x)
return x
# stem
inputs = Input(shape=[32, 32, 4])
x = GaussianNoise(0.3)(inputs)
x = Conv2D(64, (7, 7), strides=(1, 1), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (5, 5), strides=(1, 1), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 8, 4, 4, 3)
x = computation_block(x, 4, 3, 3, 3, 3)
# mid-stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 2, 4, 4, 3)
x = computation_block(x, 4, 2, 2, 2, 2)
x = computation_block(x, 4, 3, 2, 2, 3)
# exit stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
#x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x) #we omit this last avgpool to retain dimensionality
x = Flatten()(x)
# FC layers
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer= SGD(lr= 1e-2, momentum=0.9),
loss= 'binary_crossentropy',
metrics=[ 'binary_accuracy'])
train_size = 17588
test_size = 1955
learning_rate = 1e-3
learning_decay = 0.94
batch_size = 16
#our callbacks
root_path = 'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/modified_hilbertcnn'
save_model = ModelCheckpoint(root_path + '/weights-{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_path = '{}/training_history.csv'.format(root_path)
csv_logger = CSVLogger(csv_path, separator=',', append=False)
def incep_resnet_schedule(epoch):
if epoch % 2 == 0:
return learning_rate*(learning_decay**(epoch))
else:
return learning_rate*(learning_decay**((epoch)-1.0))
lr_scheduler = LearningRateScheduler(incep_resnet_schedule)
#train the model
model.fit_generator(train_generator,
steps_per_epoch= train_size // batch_size,
epochs=30,
validation_data= validation_generator,
validation_steps= test_size // batch_size,
verbose=2,
callbacks = [save_model, csv_logger, lr_scheduler])
model.summary()
# Modified version of Hilbert-CNN
# batchnorm after activation
def computation_block(in_layer, n_filters, filtersize_a, filtersize_b, filtersize_c, filtersize_d):
# residual 1
p1 = Conv2D(n_filters, (filtersize_a, filtersize_a), strides=(1, 1), padding='same', activation='relu')(in_layer)
p1 = BatchNormalization()(p1)
p1 = Conv2D(n_filters, (filtersize_b, filtersize_b), strides=(1, 1), padding='same')(p1)
#p1 = BatchNormalization()(p1)
# residual 2
p2 = Conv2D(n_filters, (filtersize_c, filtersize_c), strides=(1, 1), padding='same', activation='relu')(in_layer)
p2 = BatchNormalization()(p2)
p2 = Conv2D(n_filters, (filtersize_d, filtersize_d), strides=(1, 1), padding='same')(p2)
#p2 = BatchNormalization()(p2)
x = Concatenate()([in_layer, p1, p2])
x = Activation('relu')(x)
x = BatchNormalization()(x)
return x
# stem
inputs = Input(shape=[32, 32, 4])
x = GaussianNoise(0.3)(inputs)
x = Conv2D(64, (7, 7), strides=(1, 1), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (5, 5), strides=(1, 1), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 8, 4, 4, 3)
x = computation_block(x, 4, 3, 3, 3, 3)
# mid-stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 2, 4, 4, 3)
x = computation_block(x, 4, 2, 2, 2, 2)
x = computation_block(x, 4, 3, 2, 2, 3)
# exit stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
#x = BatchNormalization()(x)
#x = Activation('relu')(x)
#x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x) #we omit this last avgpool to retain dimensionality
x = Flatten()(x)
# FC layers
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer= SGD(lr= 1e-2, momentum=0.9),
loss= 'binary_crossentropy',
metrics=[ 'binary_accuracy'])
train_size = 17588
test_size = 1955
learning_rate = 1e-3
learning_decay = 0.94
batch_size = 16
#our callbacks
root_path = 'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/modified_close_hilbertcnn'
save_model = ModelCheckpoint(root_path + '/weights-{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_path = '{}/training_history.csv'.format(root_path)
csv_logger = CSVLogger(csv_path, separator=',', append=False)
def incep_resnet_schedule(epoch):
if epoch % 2 == 0:
return learning_rate*(learning_decay**(epoch))
else:
return learning_rate*(learning_decay**((epoch)-1.0))
lr_scheduler = LearningRateScheduler(incep_resnet_schedule)
#train the model
model.fit_generator(train_generator,
steps_per_epoch= train_size // batch_size,
epochs=30,
validation_data= validation_generator,
validation_steps= test_size // batch_size,
verbose=2,
callbacks = [save_model, csv_logger, lr_scheduler])
# Modified version of Hilbert-CNN
# batchnorm after activation
# wide network
# Max pooling
def computation_block(in_layer, n_filters, filtersize_a, filtersize_b, filtersize_c, filtersize_d):
# residual 1
p1 = Conv2D(n_filters, (filtersize_a, filtersize_a), strides=(1, 1), padding='same', activation='relu')(in_layer)
p1 = BatchNormalization()(p1)
p1 = Conv2D(n_filters, (filtersize_b, filtersize_b), strides=(1, 1), padding='same')(p1)
#p1 = BatchNormalization()(p1)
# residual 2
p2 = Conv2D(n_filters, (filtersize_c, filtersize_c), strides=(1, 1), padding='same', activation='relu')(in_layer)
p2 = BatchNormalization()(p2)
p2 = Conv2D(n_filters, (filtersize_d, filtersize_d), strides=(1, 1), padding='same')(p2)
#p2 = BatchNormalization()(p2)
x = Concatenate()([in_layer, p1, p2])
x = Activation('relu')(x)
x = BatchNormalization()(x)
return x
# stem
inputs = Input(shape=[32, 32, 4])
x = GaussianNoise(0.3)(inputs)
x = Conv2D(256, (7, 7), strides=(1, 1), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Conv2D(256, (5, 5), strides=(1, 1), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 32, 8, 4, 4, 3)
x = computation_block(x, 32, 3, 3, 3, 3)
# mid-stem
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 32, 2, 4, 4, 3)
x = computation_block(x, 32, 2, 2, 2, 2)
x = computation_block(x, 32, 3, 2, 2, 3)
# exit stem
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = Flatten()(x)
# FC layers
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer= SGD(lr= 1e-2, momentum=0.9),
loss= 'binary_crossentropy',
metrics=[ 'binary_accuracy'])
train_size = 17588
test_size = 1955
learning_rate = 1e-3
learning_decay = 0.94
batch_size = 16
#our callbacks
root_path = 'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/modified_wide_hilbertcnn'
save_model = ModelCheckpoint(root_path + '/weights-{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_path = '{}/training_history.csv'.format(root_path)
csv_logger = CSVLogger(csv_path, separator=',', append=False)
def incep_resnet_schedule(epoch):
if epoch % 2 == 0:
return learning_rate*(learning_decay**(epoch))
else:
return learning_rate*(learning_decay**((epoch)-1.0))
lr_scheduler = LearningRateScheduler(incep_resnet_schedule)
#train the model
model.fit_generator(train_generator,
steps_per_epoch= train_size // batch_size,
epochs=30,
validation_data= validation_generator,
validation_steps= test_size // batch_size,
verbose=2,
callbacks = [save_model, csv_logger, lr_scheduler])
model.summary()
# original implementation of Hilbert-CNN
# https://openreview.net/forum?id=HJvvRoe0W
def computation_block(in_layer, n_filters, filtersize_a, filtersize_b, filtersize_c, filtersize_d):
# residual 1
p1 = Conv2D(n_filters, (filtersize_a, filtersize_a), strides=(1, 1), padding='same')(in_layer)
p1 = BatchNormalization()(p1)
p1 = Activation('relu')(p1)
p1 = Conv2D(n_filters, (filtersize_b, filtersize_b), strides=(1, 1), padding='same')(p1)
p1 = BatchNormalization()(p1)
# residual 2
p2 = Conv2D(n_filters, (filtersize_c, filtersize_c), strides=(1, 1), padding='same')(in_layer)
p2 = BatchNormalization()(p2)
p2 = Activation('relu')(p2)
p2 = Conv2D(n_filters, (filtersize_d, filtersize_d), strides=(1, 1), padding='same')(p2)
p2 = BatchNormalization()(p2)
x = Concatenate()([in_layer, p1, p2])
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
# stem
inputs = Input(shape=[32, 32, 4])
x = GaussianNoise(0.2)(inputs)
x = Conv2D(64, (7, 7), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 8, 4, 4, 3)
x = computation_block(x, 4, 3, 3, 3, 3)
# mid-stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 2, 4, 4, 3)
x = computation_block(x, 4, 2, 2, 2, 2)
x = computation_block(x, 4, 3, 2, 2, 3)
# exit stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
#x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x) #we omit this last avgpool to retain dimensionality
x = Flatten()(x)
# FC layers
x = Dense(1024, activation='relu')(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer= SGD(lr= 0.01, momentum=0.9),
loss= 'binary_crossentropy',
metrics=[ 'binary_accuracy'])
train_size = 17588
test_size = 1955
learning_rate = 1e-3
learning_decay = 0.94
batch_size = 16
#our callbacks
root_path = 'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/highdimdropout_hilbertcnn'
save_model = ModelCheckpoint(root_path + '/weights-{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_path = '{}/training_history.csv'.format(root_path)
csv_logger = CSVLogger(csv_path, separator=',', append=False)
def incep_resnet_schedule(epoch):
if epoch % 2 == 0:
return learning_rate*(learning_decay**(epoch))
else:
return learning_rate*(learning_decay**((epoch)-1.0))
lr_scheduler = LearningRateScheduler(incep_resnet_schedule)
#train the model
model.fit_generator(train_generator,
steps_per_epoch= train_size // batch_size,
epochs=30,
validation_data= validation_generator,
validation_steps= test_size // batch_size,
verbose=2,
callbacks = [save_model, csv_logger, lr_scheduler])
# original implementation of Hilbert-CNN
# https://openreview.net/forum?id=HJvvRoe0W
def computation_block(in_layer, n_filters, filtersize_a, filtersize_b, filtersize_c, filtersize_d):
# residual 1
p1 = Conv2D(n_filters, (filtersize_a, filtersize_a), strides=(1, 1), padding='same')(in_layer)
p1 = BatchNormalization()(p1)
p1 = Activation('relu')(p1)
p1 = Conv2D(n_filters, (filtersize_b, filtersize_b), strides=(1, 1), padding='same')(p1)
p1 = BatchNormalization()(p1)
# residual 2
p2 = Conv2D(n_filters, (filtersize_c, filtersize_c), strides=(1, 1), padding='same')(in_layer)
p2 = BatchNormalization()(p2)
p2 = Activation('relu')(p2)
p2 = Conv2D(n_filters, (filtersize_d, filtersize_d), strides=(1, 1), padding='same')(p2)
p2 = BatchNormalization()(p2)
x = Concatenate()([in_layer, p1, p2])
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
# stem
inputs = Input(shape=[32, 32, 4])
x = GaussianNoise(0.2)(inputs)
x = Conv2D(64, (7, 7), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 8, 4, 4, 3)
x = computation_block(x, 4, 3, 3, 3, 3)
# mid-stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 2, 4, 4, 3)
x = computation_block(x, 4, 2, 2, 2, 2)
x = computation_block(x, 4, 3, 2, 2, 3)
# exit stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = Flatten()(x)
# FC layers
x = Dense(1024, activation='relu')(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer= SGD(lr= 0.01, momentum=0.9),
loss= 'binary_crossentropy',
metrics=[ 'binary_accuracy'])
train_size = 17588
test_size = 1955
learning_rate = 1e-3
learning_decay = 0.94
batch_size = 16
#our callbacks
root_path = 'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/lowdimdropout_hilbertcnn'
save_model = ModelCheckpoint(root_path + '/weights-{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_path = '{}/training_history.csv'.format(root_path)
csv_logger = CSVLogger(csv_path, separator=',', append=False)
def incep_resnet_schedule(epoch):
if epoch % 2 == 0:
return learning_rate*(learning_decay**(epoch))
else:
return learning_rate*(learning_decay**((epoch)-1.0))
lr_scheduler = LearningRateScheduler(incep_resnet_schedule)
#train the model
model.fit_generator(train_generator,
steps_per_epoch= train_size // batch_size,
epochs=30,
validation_data= validation_generator,
validation_steps= test_size // batch_size,
verbose=2,
callbacks = [save_model, csv_logger, lr_scheduler])
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score, roc_curve
import os
high_path = 'D:/Projects/iSynPro/iSynPro/HilbertCNN/train_val_npys/6count/1mer/test/high'
low_path = 'D:/Projects/iSynPro/iSynPro/HilbertCNN/train_val_npys/6count/1mer/test/low'
high_xcomp = []
high_ycomp = []
for root, subdir, files in os.walk(high_path):
for file in files:
high_xcomp.append(np.load(os.path.join(root, file)))
high_ycomp.append(0)
low_xcomp = []
low_ycomp = []
for root, subdir, files in os.walk(low_path):
for file in files:
low_xcomp.append(np.load(os.path.join(root, file)))
low_ycomp.append(1)
x_test = np.asarray(low_xcomp + high_xcomp)
y_test = np.asarray(low_ycomp + high_ycomp)
model_list = ['D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/vanilla_hilbertcnn/weights-18-0.50.hdf5',
'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/modified_hilbertcnn/weights-16-0.50.hdf5',
'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/modified_close_hilbertcnn/weights-07-0.50.hdf5',
'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/modified_wide_hilbertcnn/weights-04-0.55.hdf5',
'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/highdimdropout_hilbertcnn/weights-09-0.49.hdf5',
'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/lowdimdropout_hilbertcnn/weights-19-0.49.hdf5'
]
label_list = ['Vanilla Hilbert-CNN',
'Batchnorm -BlockRelu +FinalRelu',
'Batchnorm +BlockRelu -FinalRelu',
'Wide Hilbert-CNN',
'Hilbert-CNN -FinalMaxPool +Dropout',
'Hilbert-CNN +FinalMaxPool +Dropout']
roc_list = []
for path in model_list:
model = load_model(path)
y_pred = model.predict(x_test)
auc = roc_auc_score(y_test, y_pred)
fpr, tpr, _ = roc_curve(y_test, y_pred)
roc_list.append([fpr, tpr, auc])
palette = sns.color_palette("cubehelix", len(roc_list))
#plot roc curve
for i in range(len(roc_list)):
plt.plot(roc_list[i][0],
roc_list[i][1],
color=palette[i],
label='{0} (AUC = {1:.3f})'.format(label_list[i], roc_list[i][2]))
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
#plt.savefig('c:/users/wolf/desktop/SynPro/roc.png')
plt.show()
from keras.applications.mobilenet import MobileNet
from keras.applications import mobilenet
head_model = MobileNet(include_top=False,
weights=None,
input_shape = (32, 32, 4))
x = head_model.output
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=head_model.input, outputs=predictions)
model.compile(optimizer= SGD(lr= 0.01, momentum=0.9),
loss= 'binary_crossentropy',
metrics=[ 'binary_accuracy'])
model.summary()
train_size = 17588
test_size = 1955
learning_rate = 1e-3
learning_decay = 0.94
batch_size = 16
#our callbacks
root_path = 'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/mobile_hilbertcnn'
save_model = ModelCheckpoint(root_path + '/weights-{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_path = '{}/training_history.csv'.format(root_path)
csv_logger = CSVLogger(csv_path, separator=',', append=False)
def incep_resnet_schedule(epoch):
if epoch % 2 == 0:
return learning_rate*(learning_decay**(epoch))
else:
return learning_rate*(learning_decay**((epoch)-1.0))
lr_scheduler = LearningRateScheduler(incep_resnet_schedule)
#train the model
model.fit_generator(train_generator,
steps_per_epoch= train_size // batch_size,
epochs=30,
validation_data= validation_generator,
validation_steps= test_size // batch_size,
verbose=2,
callbacks = [save_model, csv_logger, lr_scheduler])
```
|
github_jupyter
|
'''
Adaptation of the Hilbert CNN at https://openreview.net/forum?id=HJvvRoe0W
Works by encoding each nucleotide as a one-hot vector,
then fits it to a image-like grid using a hilbert curve
such that each 'pixel' is a 1mer of length 4
'''
from keras.layers import Conv2D, BatchNormalization, AveragePooling2D, Dense, Dropout, SeparableConv2D, Add
from keras.layers import Activation, Input, Concatenate, Flatten, MaxPooling2D, Reshape, GaussianNoise
from keras.models import Model, load_model
from keras.optimizers import SGD
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, CSVLogger, LearningRateScheduler
import image
from keras import backend as K
import numpy as np
start_target_size = (32, 32, 4)
batch_size = 16
train_path = 'D:/Projects/iSynPro/iSynPro/HilbertCNN/train_val_npys/6count/1mer/train'
test_path = 'D:/Projects/iSynPro/iSynPro/HilbertCNN/train_val_npys/6count/1mer/test'
# define generators
train_datagen = image.ImageDataGenerator()
test_datagen = image.ImageDataGenerator()
train_generator = train_datagen.flow_np_from_directory(train_path,
target_size= start_target_size,
batch_size=batch_size,
class_mode='binary',
seed=42)
validation_generator = test_datagen.flow_np_from_directory(test_path,
target_size= start_target_size,
batch_size=batch_size,
class_mode='binary',
seed=42)
K.clear_session()
del model
# original implementation of Hilbert-CNN
# https://openreview.net/forum?id=HJvvRoe0W
def computation_block(in_layer, n_filters, filtersize_a, filtersize_b, filtersize_c, filtersize_d):
# residual 1
p1 = Conv2D(n_filters, (filtersize_a, filtersize_a), strides=(1, 1), padding='same')(in_layer)
p1 = BatchNormalization()(p1)
p1 = Activation('relu')(p1)
p1 = Conv2D(n_filters, (filtersize_b, filtersize_b), strides=(1, 1), padding='same')(p1)
p1 = BatchNormalization()(p1)
# residual 2
p2 = Conv2D(n_filters, (filtersize_c, filtersize_c), strides=(1, 1), padding='same')(in_layer)
p2 = BatchNormalization()(p2)
p2 = Activation('relu')(p2)
p2 = Conv2D(n_filters, (filtersize_d, filtersize_d), strides=(1, 1), padding='same')(p2)
p2 = BatchNormalization()(p2)
x = Concatenate()([in_layer, p1, p2])
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
# stem
inputs = Input(shape=[32, 32, 4])
x = GaussianNoise(0.2)(inputs)
x = Conv2D(64, (7, 7), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 8, 4, 4, 3)
x = computation_block(x, 4, 3, 3, 3, 3)
# mid-stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 2, 4, 4, 3)
x = computation_block(x, 4, 2, 2, 2, 2)
x = computation_block(x, 4, 3, 2, 2, 3)
# exit stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x) #we omit this last avgpool to retain dimensionality
x = Flatten()(x)
# FC layers
x = Dense(1024, activation='relu')(x)
x = Dense(1024, activation='relu')(x)
#x = Dropout(0.2)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer= SGD(lr= 0.01, momentum=0.9),
loss= 'binary_crossentropy',
metrics=[ 'binary_accuracy'])
model.summary()
train_size = 17588
test_size = 1955
learning_rate = 1e-3
learning_decay = 0.94
batch_size = 16
#our callbacks
lr_descent = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=5,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=1,
min_lr=0)
root_path = 'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/vanilla_hilbertcnn'
save_model = ModelCheckpoint(root_path + '/weights-{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_path = '{}/training_history.csv'.format(root_path)
csv_logger = CSVLogger(csv_path, separator=',', append=False)
def incep_resnet_schedule(epoch):
if epoch % 2 == 0:
return learning_rate*(learning_decay**(epoch))
else:
return learning_rate*(learning_decay**((epoch)-1.0))
lr_scheduler = LearningRateScheduler(incep_resnet_schedule)
#tracking = keras.callbacks.ProgbarLogger(count_mode='samples')
#train the model
model.fit_generator(train_generator,
steps_per_epoch= train_size // batch_size,
epochs=30,
validation_data= validation_generator,
validation_steps= test_size // batch_size,
verbose=2,
callbacks = [save_model, csv_logger, lr_scheduler])
# Modified version of Hilbert-CNN
# batchnorm after activation
def computation_block(in_layer, n_filters, filtersize_a, filtersize_b, filtersize_c, filtersize_d):
# residual 1
p1 = Conv2D(n_filters, (filtersize_a, filtersize_a), strides=(1, 1), padding='same', activation='relu')(in_layer)
p1 = BatchNormalization()(p1)
p1 = Conv2D(n_filters, (filtersize_b, filtersize_b), strides=(1, 1), padding='same', activation='relu')(p1)
p1 = BatchNormalization()(p1)
# residual 2
p2 = Conv2D(n_filters, (filtersize_c, filtersize_c), strides=(1, 1), padding='same', activation='relu')(in_layer)
p2 = BatchNormalization()(p2)
p2 = Conv2D(n_filters, (filtersize_d, filtersize_d), strides=(1, 1), padding='same', activation='relu')(p2)
p2 = BatchNormalization()(p2)
x = Concatenate()([in_layer, p1, p2])
#x = Activation('relu')(x)
#x = BatchNormalization()(x)
return x
# stem
inputs = Input(shape=[32, 32, 4])
x = GaussianNoise(0.3)(inputs)
x = Conv2D(64, (7, 7), strides=(1, 1), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (5, 5), strides=(1, 1), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 8, 4, 4, 3)
x = computation_block(x, 4, 3, 3, 3, 3)
# mid-stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 2, 4, 4, 3)
x = computation_block(x, 4, 2, 2, 2, 2)
x = computation_block(x, 4, 3, 2, 2, 3)
# exit stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
#x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x) #we omit this last avgpool to retain dimensionality
x = Flatten()(x)
# FC layers
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer= SGD(lr= 1e-2, momentum=0.9),
loss= 'binary_crossentropy',
metrics=[ 'binary_accuracy'])
train_size = 17588
test_size = 1955
learning_rate = 1e-3
learning_decay = 0.94
batch_size = 16
#our callbacks
root_path = 'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/modified_hilbertcnn'
save_model = ModelCheckpoint(root_path + '/weights-{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_path = '{}/training_history.csv'.format(root_path)
csv_logger = CSVLogger(csv_path, separator=',', append=False)
def incep_resnet_schedule(epoch):
if epoch % 2 == 0:
return learning_rate*(learning_decay**(epoch))
else:
return learning_rate*(learning_decay**((epoch)-1.0))
lr_scheduler = LearningRateScheduler(incep_resnet_schedule)
#train the model
model.fit_generator(train_generator,
steps_per_epoch= train_size // batch_size,
epochs=30,
validation_data= validation_generator,
validation_steps= test_size // batch_size,
verbose=2,
callbacks = [save_model, csv_logger, lr_scheduler])
model.summary()
# Modified version of Hilbert-CNN
# batchnorm after activation
def computation_block(in_layer, n_filters, filtersize_a, filtersize_b, filtersize_c, filtersize_d):
# residual 1
p1 = Conv2D(n_filters, (filtersize_a, filtersize_a), strides=(1, 1), padding='same', activation='relu')(in_layer)
p1 = BatchNormalization()(p1)
p1 = Conv2D(n_filters, (filtersize_b, filtersize_b), strides=(1, 1), padding='same')(p1)
#p1 = BatchNormalization()(p1)
# residual 2
p2 = Conv2D(n_filters, (filtersize_c, filtersize_c), strides=(1, 1), padding='same', activation='relu')(in_layer)
p2 = BatchNormalization()(p2)
p2 = Conv2D(n_filters, (filtersize_d, filtersize_d), strides=(1, 1), padding='same')(p2)
#p2 = BatchNormalization()(p2)
x = Concatenate()([in_layer, p1, p2])
x = Activation('relu')(x)
x = BatchNormalization()(x)
return x
# stem
inputs = Input(shape=[32, 32, 4])
x = GaussianNoise(0.3)(inputs)
x = Conv2D(64, (7, 7), strides=(1, 1), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (5, 5), strides=(1, 1), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 8, 4, 4, 3)
x = computation_block(x, 4, 3, 3, 3, 3)
# mid-stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 2, 4, 4, 3)
x = computation_block(x, 4, 2, 2, 2, 2)
x = computation_block(x, 4, 3, 2, 2, 3)
# exit stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
#x = BatchNormalization()(x)
#x = Activation('relu')(x)
#x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x) #we omit this last avgpool to retain dimensionality
x = Flatten()(x)
# FC layers
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer= SGD(lr= 1e-2, momentum=0.9),
loss= 'binary_crossentropy',
metrics=[ 'binary_accuracy'])
train_size = 17588
test_size = 1955
learning_rate = 1e-3
learning_decay = 0.94
batch_size = 16
#our callbacks
root_path = 'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/modified_close_hilbertcnn'
save_model = ModelCheckpoint(root_path + '/weights-{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_path = '{}/training_history.csv'.format(root_path)
csv_logger = CSVLogger(csv_path, separator=',', append=False)
def incep_resnet_schedule(epoch):
if epoch % 2 == 0:
return learning_rate*(learning_decay**(epoch))
else:
return learning_rate*(learning_decay**((epoch)-1.0))
lr_scheduler = LearningRateScheduler(incep_resnet_schedule)
#train the model
model.fit_generator(train_generator,
steps_per_epoch= train_size // batch_size,
epochs=30,
validation_data= validation_generator,
validation_steps= test_size // batch_size,
verbose=2,
callbacks = [save_model, csv_logger, lr_scheduler])
# Modified version of Hilbert-CNN
# batchnorm after activation
# wide network
# Max pooling
def computation_block(in_layer, n_filters, filtersize_a, filtersize_b, filtersize_c, filtersize_d):
# residual 1
p1 = Conv2D(n_filters, (filtersize_a, filtersize_a), strides=(1, 1), padding='same', activation='relu')(in_layer)
p1 = BatchNormalization()(p1)
p1 = Conv2D(n_filters, (filtersize_b, filtersize_b), strides=(1, 1), padding='same')(p1)
#p1 = BatchNormalization()(p1)
# residual 2
p2 = Conv2D(n_filters, (filtersize_c, filtersize_c), strides=(1, 1), padding='same', activation='relu')(in_layer)
p2 = BatchNormalization()(p2)
p2 = Conv2D(n_filters, (filtersize_d, filtersize_d), strides=(1, 1), padding='same')(p2)
#p2 = BatchNormalization()(p2)
x = Concatenate()([in_layer, p1, p2])
x = Activation('relu')(x)
x = BatchNormalization()(x)
return x
# stem
inputs = Input(shape=[32, 32, 4])
x = GaussianNoise(0.3)(inputs)
x = Conv2D(256, (7, 7), strides=(1, 1), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Conv2D(256, (5, 5), strides=(1, 1), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 32, 8, 4, 4, 3)
x = computation_block(x, 32, 3, 3, 3, 3)
# mid-stem
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 32, 2, 4, 4, 3)
x = computation_block(x, 32, 2, 2, 2, 2)
x = computation_block(x, 32, 3, 2, 2, 3)
# exit stem
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = Flatten()(x)
# FC layers
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer= SGD(lr= 1e-2, momentum=0.9),
loss= 'binary_crossentropy',
metrics=[ 'binary_accuracy'])
train_size = 17588
test_size = 1955
learning_rate = 1e-3
learning_decay = 0.94
batch_size = 16
#our callbacks
root_path = 'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/modified_wide_hilbertcnn'
save_model = ModelCheckpoint(root_path + '/weights-{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_path = '{}/training_history.csv'.format(root_path)
csv_logger = CSVLogger(csv_path, separator=',', append=False)
def incep_resnet_schedule(epoch):
if epoch % 2 == 0:
return learning_rate*(learning_decay**(epoch))
else:
return learning_rate*(learning_decay**((epoch)-1.0))
lr_scheduler = LearningRateScheduler(incep_resnet_schedule)
#train the model
model.fit_generator(train_generator,
steps_per_epoch= train_size // batch_size,
epochs=30,
validation_data= validation_generator,
validation_steps= test_size // batch_size,
verbose=2,
callbacks = [save_model, csv_logger, lr_scheduler])
model.summary()
# original implementation of Hilbert-CNN
# https://openreview.net/forum?id=HJvvRoe0W
def computation_block(in_layer, n_filters, filtersize_a, filtersize_b, filtersize_c, filtersize_d):
# residual 1
p1 = Conv2D(n_filters, (filtersize_a, filtersize_a), strides=(1, 1), padding='same')(in_layer)
p1 = BatchNormalization()(p1)
p1 = Activation('relu')(p1)
p1 = Conv2D(n_filters, (filtersize_b, filtersize_b), strides=(1, 1), padding='same')(p1)
p1 = BatchNormalization()(p1)
# residual 2
p2 = Conv2D(n_filters, (filtersize_c, filtersize_c), strides=(1, 1), padding='same')(in_layer)
p2 = BatchNormalization()(p2)
p2 = Activation('relu')(p2)
p2 = Conv2D(n_filters, (filtersize_d, filtersize_d), strides=(1, 1), padding='same')(p2)
p2 = BatchNormalization()(p2)
x = Concatenate()([in_layer, p1, p2])
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
# stem
inputs = Input(shape=[32, 32, 4])
x = GaussianNoise(0.2)(inputs)
x = Conv2D(64, (7, 7), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 8, 4, 4, 3)
x = computation_block(x, 4, 3, 3, 3, 3)
# mid-stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 2, 4, 4, 3)
x = computation_block(x, 4, 2, 2, 2, 2)
x = computation_block(x, 4, 3, 2, 2, 3)
# exit stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
#x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x) #we omit this last avgpool to retain dimensionality
x = Flatten()(x)
# FC layers
x = Dense(1024, activation='relu')(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer= SGD(lr= 0.01, momentum=0.9),
loss= 'binary_crossentropy',
metrics=[ 'binary_accuracy'])
train_size = 17588
test_size = 1955
learning_rate = 1e-3
learning_decay = 0.94
batch_size = 16
#our callbacks
root_path = 'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/highdimdropout_hilbertcnn'
save_model = ModelCheckpoint(root_path + '/weights-{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_path = '{}/training_history.csv'.format(root_path)
csv_logger = CSVLogger(csv_path, separator=',', append=False)
def incep_resnet_schedule(epoch):
if epoch % 2 == 0:
return learning_rate*(learning_decay**(epoch))
else:
return learning_rate*(learning_decay**((epoch)-1.0))
lr_scheduler = LearningRateScheduler(incep_resnet_schedule)
#train the model
model.fit_generator(train_generator,
steps_per_epoch= train_size // batch_size,
epochs=30,
validation_data= validation_generator,
validation_steps= test_size // batch_size,
verbose=2,
callbacks = [save_model, csv_logger, lr_scheduler])
# original implementation of Hilbert-CNN
# https://openreview.net/forum?id=HJvvRoe0W
def computation_block(in_layer, n_filters, filtersize_a, filtersize_b, filtersize_c, filtersize_d):
# residual 1
p1 = Conv2D(n_filters, (filtersize_a, filtersize_a), strides=(1, 1), padding='same')(in_layer)
p1 = BatchNormalization()(p1)
p1 = Activation('relu')(p1)
p1 = Conv2D(n_filters, (filtersize_b, filtersize_b), strides=(1, 1), padding='same')(p1)
p1 = BatchNormalization()(p1)
# residual 2
p2 = Conv2D(n_filters, (filtersize_c, filtersize_c), strides=(1, 1), padding='same')(in_layer)
p2 = BatchNormalization()(p2)
p2 = Activation('relu')(p2)
p2 = Conv2D(n_filters, (filtersize_d, filtersize_d), strides=(1, 1), padding='same')(p2)
p2 = BatchNormalization()(p2)
x = Concatenate()([in_layer, p1, p2])
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
# stem
inputs = Input(shape=[32, 32, 4])
x = GaussianNoise(0.2)(inputs)
x = Conv2D(64, (7, 7), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 8, 4, 4, 3)
x = computation_block(x, 4, 3, 3, 3, 3)
# mid-stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = computation_block(x, 4, 2, 4, 4, 3)
x = computation_block(x, 4, 2, 2, 2, 2)
x = computation_block(x, 4, 3, 2, 2, 3)
# exit stem
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = Flatten()(x)
# FC layers
x = Dense(1024, activation='relu')(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer= SGD(lr= 0.01, momentum=0.9),
loss= 'binary_crossentropy',
metrics=[ 'binary_accuracy'])
train_size = 17588
test_size = 1955
learning_rate = 1e-3
learning_decay = 0.94
batch_size = 16
#our callbacks
root_path = 'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/lowdimdropout_hilbertcnn'
save_model = ModelCheckpoint(root_path + '/weights-{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_path = '{}/training_history.csv'.format(root_path)
csv_logger = CSVLogger(csv_path, separator=',', append=False)
def incep_resnet_schedule(epoch):
if epoch % 2 == 0:
return learning_rate*(learning_decay**(epoch))
else:
return learning_rate*(learning_decay**((epoch)-1.0))
lr_scheduler = LearningRateScheduler(incep_resnet_schedule)
#train the model
model.fit_generator(train_generator,
steps_per_epoch= train_size // batch_size,
epochs=30,
validation_data= validation_generator,
validation_steps= test_size // batch_size,
verbose=2,
callbacks = [save_model, csv_logger, lr_scheduler])
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score, roc_curve
import os
high_path = 'D:/Projects/iSynPro/iSynPro/HilbertCNN/train_val_npys/6count/1mer/test/high'
low_path = 'D:/Projects/iSynPro/iSynPro/HilbertCNN/train_val_npys/6count/1mer/test/low'
high_xcomp = []
high_ycomp = []
for root, subdir, files in os.walk(high_path):
for file in files:
high_xcomp.append(np.load(os.path.join(root, file)))
high_ycomp.append(0)
low_xcomp = []
low_ycomp = []
for root, subdir, files in os.walk(low_path):
for file in files:
low_xcomp.append(np.load(os.path.join(root, file)))
low_ycomp.append(1)
x_test = np.asarray(low_xcomp + high_xcomp)
y_test = np.asarray(low_ycomp + high_ycomp)
model_list = ['D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/vanilla_hilbertcnn/weights-18-0.50.hdf5',
'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/modified_hilbertcnn/weights-16-0.50.hdf5',
'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/modified_close_hilbertcnn/weights-07-0.50.hdf5',
'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/modified_wide_hilbertcnn/weights-04-0.55.hdf5',
'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/highdimdropout_hilbertcnn/weights-09-0.49.hdf5',
'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/lowdimdropout_hilbertcnn/weights-19-0.49.hdf5'
]
label_list = ['Vanilla Hilbert-CNN',
'Batchnorm -BlockRelu +FinalRelu',
'Batchnorm +BlockRelu -FinalRelu',
'Wide Hilbert-CNN',
'Hilbert-CNN -FinalMaxPool +Dropout',
'Hilbert-CNN +FinalMaxPool +Dropout']
roc_list = []
for path in model_list:
model = load_model(path)
y_pred = model.predict(x_test)
auc = roc_auc_score(y_test, y_pred)
fpr, tpr, _ = roc_curve(y_test, y_pred)
roc_list.append([fpr, tpr, auc])
palette = sns.color_palette("cubehelix", len(roc_list))
#plot roc curve
for i in range(len(roc_list)):
plt.plot(roc_list[i][0],
roc_list[i][1],
color=palette[i],
label='{0} (AUC = {1:.3f})'.format(label_list[i], roc_list[i][2]))
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
#plt.savefig('c:/users/wolf/desktop/SynPro/roc.png')
plt.show()
from keras.applications.mobilenet import MobileNet
from keras.applications import mobilenet
head_model = MobileNet(include_top=False,
weights=None,
input_shape = (32, 32, 4))
x = head_model.output
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=head_model.input, outputs=predictions)
model.compile(optimizer= SGD(lr= 0.01, momentum=0.9),
loss= 'binary_crossentropy',
metrics=[ 'binary_accuracy'])
model.summary()
train_size = 17588
test_size = 1955
learning_rate = 1e-3
learning_decay = 0.94
batch_size = 16
#our callbacks
root_path = 'D:/Projects/Github/SyntheticPromoter/HilbertCNN/weights/1mer/mobile_hilbertcnn'
save_model = ModelCheckpoint(root_path + '/weights-{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_path = '{}/training_history.csv'.format(root_path)
csv_logger = CSVLogger(csv_path, separator=',', append=False)
def incep_resnet_schedule(epoch):
if epoch % 2 == 0:
return learning_rate*(learning_decay**(epoch))
else:
return learning_rate*(learning_decay**((epoch)-1.0))
lr_scheduler = LearningRateScheduler(incep_resnet_schedule)
#train the model
model.fit_generator(train_generator,
steps_per_epoch= train_size // batch_size,
epochs=30,
validation_data= validation_generator,
validation_steps= test_size // batch_size,
verbose=2,
callbacks = [save_model, csv_logger, lr_scheduler])
| 0.897491 | 0.645595 |
<a href="https://colab.research.google.com/github/setiawantirta/Corona-CADD/blob/main/Part_2_Exploratory_Data_Analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# **Bioinformatics Project - Computational Drug Discovery [Part 2] Exploratory Data Analysis**
Chanin Nantasenamat
[*'Data Professor' YouTube channel*](http://youtube.com/dataprofessor)
https://youtu.be/jBlTQjcKuaY
In this Jupyter notebook, we will be building a real-life **data science project** that you can include in your **data science portfolio**. Particularly, we will be building a machine learning model using the ChEMBL bioactivity data.
In **Part 2**, we will be performing Descriptor Calculation and Exploratory Data Analysis.
---
## **Install conda and rdkit**
```
! wget https://repo.anaconda.com/miniconda/Miniconda3-py37_4.8.2-Linux-x86_64.sh
! chmod +x Miniconda3-py37_4.8.2-Linux-x86_64.sh
! bash ./Miniconda3-py37_4.8.2-Linux-x86_64.sh -b -f -p /usr/local
! conda install -c rdkit rdkit -y
import sys
sys.path.append('/usr/local/lib/python3.7/site-packages/')
```
## **Load bioactivity data**
```
import pandas as pd
df = pd.read_csv('bioactivity_data_preprocessed.csv')
```
## **Calculate Lipinski descriptors**
Christopher Lipinski, a scientist at Pfizer, came up with a set of rule-of-thumb for evaluating the **druglikeness** of compounds. Such druglikeness is based on the Absorption, Distribution, Metabolism and Excretion (ADME) that is also known as the pharmacokinetic profile. Lipinski analyzed all orally active FDA-approved drugs in the formulation of what is to be known as the **Rule-of-Five** or **Lipinski's Rule**.
The Lipinski's Rule stated the following:
* Molecular weight < 500 Dalton
* Octanol-water partition coefficient (LogP) < 5
* Hydrogen bond donors < 5
* Hydrogen bond acceptors < 10
### **Import libraries**
```
import numpy as np
from rdkit import Chem
from rdkit.Chem import Descriptors, Lipinski
```
### **Calculate descriptors**
```
# Inspired by: https://codeocean.com/explore/capsules?query=tag:data-curation
def lipinski(smiles, verbose=False):
moldata= []
for elem in smiles:
mol=Chem.MolFromSmiles(elem)
moldata.append(mol)
baseData= np.arange(1,1)
i=0
for mol in moldata:
desc_MolWt = Descriptors.MolWt(mol)
desc_MolLogP = Descriptors.MolLogP(mol)
desc_NumHDonors = Lipinski.NumHDonors(mol)
desc_NumHAcceptors = Lipinski.NumHAcceptors(mol)
row = np.array([desc_MolWt,
desc_MolLogP,
desc_NumHDonors,
desc_NumHAcceptors])
if(i==0):
baseData=row
else:
baseData=np.vstack([baseData, row])
i=i+1
columnNames=["MW","LogP","NumHDonors","NumHAcceptors"]
descriptors = pd.DataFrame(data=baseData,columns=columnNames)
return descriptors
df_lipinski = lipinski(df.canonical_smiles)
```
### **Combine DataFrames**
Let's take a look at the 2 DataFrames that will be combined.
```
df_lipinski
df
```
Now, let's combine the 2 DataFrame
```
df_combined = pd.concat([df,df_lipinski], axis=1)
df_combined
```
### **Convert IC50 to pIC50**
To allow **IC50** data to be more uniformly distributed, we will convert **IC50** to the negative logarithmic scale which is essentially **-log10(IC50)**.
This custom function pIC50() will accept a DataFrame as input and will:
* Take the IC50 values from the ``standard_value`` column and converts it from nM to M by multiplying the value by 10$^{-9}$
* Take the molar value and apply -log10
* Delete the ``standard_value`` column and create a new ``pIC50`` column
```
# https://github.com/chaninlab/estrogen-receptor-alpha-qsar/blob/master/02_ER_alpha_RO5.ipynb
import numpy as np
def pIC50(input):
pIC50 = []
for i in input['standard_value_norm']:
molar = i*(10**-9) # Converts nM to M
pIC50.append(-np.log10(molar))
input['pIC50'] = pIC50
x = input.drop('standard_value_norm', 1)
return x
```
Point to note: Values greater than 100,000,000 will be fixed at 100,000,000 otherwise the negative logarithmic value will become negative.
```
df_combined.standard_value.describe()
-np.log10( (10**-9)* 100000000 )
-np.log10( (10**-9)* 10000000000 )
def norm_value(input):
norm = []
for i in input['standard_value']:
if i > 100000000:
i = 100000000
norm.append(i)
input['standard_value_norm'] = norm
x = input.drop('standard_value', 1)
return x
```
We will first apply the norm_value() function so that the values in the standard_value column is normalized.
```
df_norm = norm_value(df_combined)
df_norm
df_norm.standard_value_norm.describe()
df_final = pIC50(df_norm)
df_final
df_final.pIC50.describe()
```
### **Removing the 'intermediate' bioactivity class**
Here, we will be removing the ``intermediate`` class from our data set.
```
df_2class = df_final[df_final.bioactivity_class != 'intermediate']
df_2class
```
---
## **Exploratory Data Analysis (Chemical Space Analysis) via Lipinski descriptors**
### **Import library**
```
import seaborn as sns
sns.set(style='ticks')
import matplotlib.pyplot as plt
```
### **Frequency plot of the 2 bioactivity classes**
```
plt.figure(figsize=(5.5, 5.5))
sns.countplot(x='bioactivity_class', data=df_2class, edgecolor='black')
plt.xlabel('Bioactivity class', fontsize=14, fontweight='bold')
plt.ylabel('Frequency', fontsize=14, fontweight='bold')
plt.savefig('plot_bioactivity_class.pdf')
```
### **Scatter plot of MW versus LogP**
It can be seen that the 2 bioactivity classes are spanning similar chemical spaces as evident by the scatter plot of MW vs LogP.
```
plt.figure(figsize=(5.5, 5.5))
sns.scatterplot(x='MW', y='LogP', data=df_2class, hue='bioactivity_class', size='pIC50', edgecolor='black', alpha=0.7)
plt.xlabel('MW', fontsize=14, fontweight='bold')
plt.ylabel('LogP', fontsize=14, fontweight='bold')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)
plt.savefig('plot_MW_vs_LogP.pdf')
```
### **Box plots**
#### **pIC50 value**
```
plt.figure(figsize=(5.5, 5.5))
sns.boxplot(x = 'bioactivity_class', y = 'pIC50', data = df_2class)
plt.xlabel('Bioactivity class', fontsize=14, fontweight='bold')
plt.ylabel('pIC50 value', fontsize=14, fontweight='bold')
plt.savefig('plot_ic50.pdf')
```
**Statistical analysis | Mann-Whitney U Test**
```
def mannwhitney(descriptor, verbose=False):
# https://machinelearningmastery.com/nonparametric-statistical-significance-tests-in-python/
from numpy.random import seed
from numpy.random import randn
from scipy.stats import mannwhitneyu
# seed the random number generator
seed(1)
# actives and inactives
selection = [descriptor, 'bioactivity_class']
df = df_2class[selection]
active = df[df.bioactivity_class == 'active']
active = active[descriptor]
selection = [descriptor, 'bioactivity_class']
df = df_2class[selection]
inactive = df[df.bioactivity_class == 'inactive']
inactive = inactive[descriptor]
# compare samples
stat, p = mannwhitneyu(active, inactive)
#print('Statistics=%.3f, p=%.3f' % (stat, p))
# interpret
alpha = 0.05
if p > alpha:
interpretation = 'Same distribution (fail to reject H0)'
else:
interpretation = 'Different distribution (reject H0)'
results = pd.DataFrame({'Descriptor':descriptor,
'Statistics':stat,
'p':p,
'alpha':alpha,
'Interpretation':interpretation}, index=[0])
filename = 'mannwhitneyu_' + descriptor + '.csv'
results.to_csv(filename)
return results
mannwhitney('pIC50')
```
#### **MW**
```
plt.figure(figsize=(5.5, 5.5))
sns.boxplot(x = 'bioactivity_class', y = 'MW', data = df_2class)
plt.xlabel('Bioactivity class', fontsize=14, fontweight='bold')
plt.ylabel('MW', fontsize=14, fontweight='bold')
plt.savefig('plot_MW.pdf')
mannwhitney('MW')
```
#### **LogP**
```
plt.figure(figsize=(5.5, 5.5))
sns.boxplot(x = 'bioactivity_class', y = 'LogP', data = df_2class)
plt.xlabel('Bioactivity class', fontsize=14, fontweight='bold')
plt.ylabel('LogP', fontsize=14, fontweight='bold')
plt.savefig('plot_LogP.pdf')
```
**Statistical analysis | Mann-Whitney U Test**
```
mannwhitney('LogP')
```
#### **NumHDonors**
```
plt.figure(figsize=(5.5, 5.5))
sns.boxplot(x = 'bioactivity_class', y = 'NumHDonors', data = df_2class)
plt.xlabel('Bioactivity class', fontsize=14, fontweight='bold')
plt.ylabel('NumHDonors', fontsize=14, fontweight='bold')
plt.savefig('plot_NumHDonors.pdf')
```
**Statistical analysis | Mann-Whitney U Test**
```
mannwhitney('NumHDonors')
```
#### **NumHAcceptors**
```
plt.figure(figsize=(5.5, 5.5))
sns.boxplot(x = 'bioactivity_class', y = 'NumHAcceptors', data = df_2class)
plt.xlabel('Bioactivity class', fontsize=14, fontweight='bold')
plt.ylabel('NumHAcceptors', fontsize=14, fontweight='bold')
plt.savefig('plot_NumHAcceptors.pdf')
mannwhitney('NumHAcceptors')
```
#### **Interpretation of Statistical Results**
##### **Box Plots**
###### **pIC50 values**
Taking a look at pIC50 values, the **actives** and **inactives** displayed ***statistically significant difference***, which is to be expected since threshold values (``IC50 < 1,000 nM = Actives while IC50 > 10,000 nM = Inactives``, corresponding to ``pIC50 > 6 = Actives and pIC50 < 5 = Inactives``) were used to define actives and inactives.
###### **Lipinski's descriptors**
Of the 4 Lipinski's descriptors (MW, LogP, NumHDonors and NumHAcceptors), only LogP exhibited ***no difference*** between the **actives** and **inactives** while the other 3 descriptors (MW, NumHDonors and NumHAcceptors) shows ***statistically significant difference*** between **actives** and **inactives**.
## **Zip files**
```
! zip -r results.zip . -i *.csv *.pdf
```
## **Copy File ke Dalam folder Bioinfomatics**
```
from google.colab import drive
drive.mount('/content/gdrive/', force_remount=True)
! cp results.zip "/content/gdrive/My Drive/Colab Notebooks/Bioinformatics"
ls -l "/content/gdrive/My Drive/Colab Notebooks/Bioinformatics" # -l digunakan untuk melihat waktu file ini ditambahkan/dibuat
```
|
github_jupyter
|
! wget https://repo.anaconda.com/miniconda/Miniconda3-py37_4.8.2-Linux-x86_64.sh
! chmod +x Miniconda3-py37_4.8.2-Linux-x86_64.sh
! bash ./Miniconda3-py37_4.8.2-Linux-x86_64.sh -b -f -p /usr/local
! conda install -c rdkit rdkit -y
import sys
sys.path.append('/usr/local/lib/python3.7/site-packages/')
import pandas as pd
df = pd.read_csv('bioactivity_data_preprocessed.csv')
import numpy as np
from rdkit import Chem
from rdkit.Chem import Descriptors, Lipinski
# Inspired by: https://codeocean.com/explore/capsules?query=tag:data-curation
def lipinski(smiles, verbose=False):
moldata= []
for elem in smiles:
mol=Chem.MolFromSmiles(elem)
moldata.append(mol)
baseData= np.arange(1,1)
i=0
for mol in moldata:
desc_MolWt = Descriptors.MolWt(mol)
desc_MolLogP = Descriptors.MolLogP(mol)
desc_NumHDonors = Lipinski.NumHDonors(mol)
desc_NumHAcceptors = Lipinski.NumHAcceptors(mol)
row = np.array([desc_MolWt,
desc_MolLogP,
desc_NumHDonors,
desc_NumHAcceptors])
if(i==0):
baseData=row
else:
baseData=np.vstack([baseData, row])
i=i+1
columnNames=["MW","LogP","NumHDonors","NumHAcceptors"]
descriptors = pd.DataFrame(data=baseData,columns=columnNames)
return descriptors
df_lipinski = lipinski(df.canonical_smiles)
df_lipinski
df
df_combined = pd.concat([df,df_lipinski], axis=1)
df_combined
# https://github.com/chaninlab/estrogen-receptor-alpha-qsar/blob/master/02_ER_alpha_RO5.ipynb
import numpy as np
def pIC50(input):
pIC50 = []
for i in input['standard_value_norm']:
molar = i*(10**-9) # Converts nM to M
pIC50.append(-np.log10(molar))
input['pIC50'] = pIC50
x = input.drop('standard_value_norm', 1)
return x
df_combined.standard_value.describe()
-np.log10( (10**-9)* 100000000 )
-np.log10( (10**-9)* 10000000000 )
def norm_value(input):
norm = []
for i in input['standard_value']:
if i > 100000000:
i = 100000000
norm.append(i)
input['standard_value_norm'] = norm
x = input.drop('standard_value', 1)
return x
df_norm = norm_value(df_combined)
df_norm
df_norm.standard_value_norm.describe()
df_final = pIC50(df_norm)
df_final
df_final.pIC50.describe()
df_2class = df_final[df_final.bioactivity_class != 'intermediate']
df_2class
import seaborn as sns
sns.set(style='ticks')
import matplotlib.pyplot as plt
plt.figure(figsize=(5.5, 5.5))
sns.countplot(x='bioactivity_class', data=df_2class, edgecolor='black')
plt.xlabel('Bioactivity class', fontsize=14, fontweight='bold')
plt.ylabel('Frequency', fontsize=14, fontweight='bold')
plt.savefig('plot_bioactivity_class.pdf')
plt.figure(figsize=(5.5, 5.5))
sns.scatterplot(x='MW', y='LogP', data=df_2class, hue='bioactivity_class', size='pIC50', edgecolor='black', alpha=0.7)
plt.xlabel('MW', fontsize=14, fontweight='bold')
plt.ylabel('LogP', fontsize=14, fontweight='bold')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)
plt.savefig('plot_MW_vs_LogP.pdf')
plt.figure(figsize=(5.5, 5.5))
sns.boxplot(x = 'bioactivity_class', y = 'pIC50', data = df_2class)
plt.xlabel('Bioactivity class', fontsize=14, fontweight='bold')
plt.ylabel('pIC50 value', fontsize=14, fontweight='bold')
plt.savefig('plot_ic50.pdf')
def mannwhitney(descriptor, verbose=False):
# https://machinelearningmastery.com/nonparametric-statistical-significance-tests-in-python/
from numpy.random import seed
from numpy.random import randn
from scipy.stats import mannwhitneyu
# seed the random number generator
seed(1)
# actives and inactives
selection = [descriptor, 'bioactivity_class']
df = df_2class[selection]
active = df[df.bioactivity_class == 'active']
active = active[descriptor]
selection = [descriptor, 'bioactivity_class']
df = df_2class[selection]
inactive = df[df.bioactivity_class == 'inactive']
inactive = inactive[descriptor]
# compare samples
stat, p = mannwhitneyu(active, inactive)
#print('Statistics=%.3f, p=%.3f' % (stat, p))
# interpret
alpha = 0.05
if p > alpha:
interpretation = 'Same distribution (fail to reject H0)'
else:
interpretation = 'Different distribution (reject H0)'
results = pd.DataFrame({'Descriptor':descriptor,
'Statistics':stat,
'p':p,
'alpha':alpha,
'Interpretation':interpretation}, index=[0])
filename = 'mannwhitneyu_' + descriptor + '.csv'
results.to_csv(filename)
return results
mannwhitney('pIC50')
plt.figure(figsize=(5.5, 5.5))
sns.boxplot(x = 'bioactivity_class', y = 'MW', data = df_2class)
plt.xlabel('Bioactivity class', fontsize=14, fontweight='bold')
plt.ylabel('MW', fontsize=14, fontweight='bold')
plt.savefig('plot_MW.pdf')
mannwhitney('MW')
plt.figure(figsize=(5.5, 5.5))
sns.boxplot(x = 'bioactivity_class', y = 'LogP', data = df_2class)
plt.xlabel('Bioactivity class', fontsize=14, fontweight='bold')
plt.ylabel('LogP', fontsize=14, fontweight='bold')
plt.savefig('plot_LogP.pdf')
mannwhitney('LogP')
plt.figure(figsize=(5.5, 5.5))
sns.boxplot(x = 'bioactivity_class', y = 'NumHDonors', data = df_2class)
plt.xlabel('Bioactivity class', fontsize=14, fontweight='bold')
plt.ylabel('NumHDonors', fontsize=14, fontweight='bold')
plt.savefig('plot_NumHDonors.pdf')
mannwhitney('NumHDonors')
plt.figure(figsize=(5.5, 5.5))
sns.boxplot(x = 'bioactivity_class', y = 'NumHAcceptors', data = df_2class)
plt.xlabel('Bioactivity class', fontsize=14, fontweight='bold')
plt.ylabel('NumHAcceptors', fontsize=14, fontweight='bold')
plt.savefig('plot_NumHAcceptors.pdf')
mannwhitney('NumHAcceptors')
! zip -r results.zip . -i *.csv *.pdf
from google.colab import drive
drive.mount('/content/gdrive/', force_remount=True)
! cp results.zip "/content/gdrive/My Drive/Colab Notebooks/Bioinformatics"
ls -l "/content/gdrive/My Drive/Colab Notebooks/Bioinformatics" # -l digunakan untuk melihat waktu file ini ditambahkan/dibuat
| 0.438064 | 0.97024 |
# **Importing the Libraries**
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
```
# **Importing the Dataset**
```
df = pd.read_csv('StockData.csv', encoding = "ISO-8859-1")
df.head()
train=df[df['Date']<'20150101']
test=df[df['Date']>'20141231']
train.shape
```
# **Removing Punctuation**
```
data=train.iloc[:,2:27]
data.replace("[^a-zA-Z]", " ",regex=True, inplace=True)
data.columns
for col in data.columns:
data[col]=data[col].str.lower()
data.head(1)
headlines = []
for row in range(0,len(data.index)):
headlines.append(' '.join(str(x) for x in data.iloc[row,0:25]))
headlines[0]
```
# **Implementing Random Forest using Bag Of Words Model**
```
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
## implement BAG OF WORDS
countvector=CountVectorizer(ngram_range=(2,2))
traindataset=countvector.fit_transform(headlines)
# implement RandomForest Classifier
randomclassifier=RandomForestClassifier(n_estimators=200,criterion='entropy')
randomclassifier.fit(traindataset,train['Label'])
```
## **Predict for the Test Dataset**
```
test_transform= []
for row in range(0,len(test.index)):
test_transform.append(' '.join(str(x) for x in test.iloc[row,2:27]))
test_dataset = countvector.transform(test_transform)
predictions = randomclassifier.predict(test_dataset)
```
# **Making the confusion matrix**
```
from sklearn import metrics
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
matrix=confusion_matrix(test['Label'],predictions)
print(matrix)
score=accuracy_score(test['Label'],predictions)
print(score)
report=classification_report(test['Label'],predictions)
print(report)
plot_confusion_matrix(matrix, classes=['Down', 'Up'])
```
# **Multinomial NB using Bag of Words Model**
```
from sklearn.naive_bayes import MultinomialNB
nb=MultinomialNB()
nb.fit(traindataset,train['Label'])
predictions = nb.predict(test_dataset)
matrix=confusion_matrix(test['Label'],predictions)
print(matrix)
score=accuracy_score(test['Label'],predictions)
print(score)
report=classification_report(test['Label'],predictions)
print(report)
plot_confusion_matrix(matrix, classes=['Down', 'Up'])
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('StockData.csv', encoding = "ISO-8859-1")
df.head()
train=df[df['Date']<'20150101']
test=df[df['Date']>'20141231']
train.shape
data=train.iloc[:,2:27]
data.replace("[^a-zA-Z]", " ",regex=True, inplace=True)
data.columns
for col in data.columns:
data[col]=data[col].str.lower()
data.head(1)
headlines = []
for row in range(0,len(data.index)):
headlines.append(' '.join(str(x) for x in data.iloc[row,0:25]))
headlines[0]
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
## implement BAG OF WORDS
countvector=CountVectorizer(ngram_range=(2,2))
traindataset=countvector.fit_transform(headlines)
# implement RandomForest Classifier
randomclassifier=RandomForestClassifier(n_estimators=200,criterion='entropy')
randomclassifier.fit(traindataset,train['Label'])
test_transform= []
for row in range(0,len(test.index)):
test_transform.append(' '.join(str(x) for x in test.iloc[row,2:27]))
test_dataset = countvector.transform(test_transform)
predictions = randomclassifier.predict(test_dataset)
from sklearn import metrics
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
matrix=confusion_matrix(test['Label'],predictions)
print(matrix)
score=accuracy_score(test['Label'],predictions)
print(score)
report=classification_report(test['Label'],predictions)
print(report)
plot_confusion_matrix(matrix, classes=['Down', 'Up'])
from sklearn.naive_bayes import MultinomialNB
nb=MultinomialNB()
nb.fit(traindataset,train['Label'])
predictions = nb.predict(test_dataset)
matrix=confusion_matrix(test['Label'],predictions)
print(matrix)
score=accuracy_score(test['Label'],predictions)
print(score)
report=classification_report(test['Label'],predictions)
print(report)
plot_confusion_matrix(matrix, classes=['Down', 'Up'])
| 0.322846 | 0.866019 |
```
# -*- coding=utf-8 -*-
'''
Created on 2021/3/5
@Haihui Pan
'''
```
# Python特点
Python是著名的“龟叔”Guido van Rossum在1989年圣诞节期间,为了打发无聊的圣诞节而编写的一个编程语言。
* 编程语言目的:编程语言是用来写程序,目的是让计算机来执行程序;CPU只识别机器指令,因此不同的编程语言都需要"翻译"成机器指令;但是不同的编程语言写同一个任务所需要的代码量不同。比如,C语言1000行代码,Java只要100行,而Python可能只要20行,所以Python是一种相当高级的语言;代码少的代价是运行速度慢
其他流行的编程语言特点:<br>
* C语言:面向过程语言,速度快,适合编写操作系统和嵌入式程序<br>
* Java:面向对象语法简洁,适合网页App <br>
* JavaScript:网页编程<br>
**每种编程语言都有各自的特性,因此没有最好的语言只有最合适某种情况的语言**
使用Python开发的APP和其他应用:<br>
* Youtube - 视频社交网站<br>
* 豆瓣网 - 电影等文化产品的资料数据库网站<br>
* 知乎 - 一个问答网站<br>
* 人工智能程序框架 - TensorFlow
Python优点:<br>
1-语法简单,拥有大量的三方库 <br>
2-AI开发的“官方”语言
Python缺点:<br>
1-运行速度慢;因为Python是解释型语言,你的代码在执行时会一行一行地翻译成CPU能理解的机器码,这个翻译过程非常耗时;而C程序是运行前直接编译成CPU能执行的机器码,所有速度快<br>
2-代码不能加密。如果要发布你的Python程序,实际上就是发布源代码,这一点跟C语言不同,C语言不用发布源代码,只需要把编译后的机器码(也就是你在Windows上常见的xxx.exe文件)发布出去
# Python解释器
Python有多种不同的解释器<br>
1-CPython:Python官方网站下载并安装好Python 3.x后,就得到一个官方版本的解释器:CPython。这个解释器是用C语言开发的,所以叫CPython(使用最广泛的解释器)<br>
2-IPython:基于CPython之上的一个交互式解释器,IPython只是在交互方式上有所增强,但是执行Python代码的功能和CPython是完全一样的<br>
3-Jython:运行在Java平台上的Python解释器,可以直接把Python代码编译成Java字节码执行
# 运行Python文件
有同学问,能不能像.exe文件那样直接运行.py文件呢?在Windows上是不行的,但是在Mac和Linux上是可以的,方法是在.py文件的第一行加上一个特殊的注释:
!/usr/bin/env python3 <br>
print('hello, world')<br>
然后,通过命令给hello.py以执行权限:<br>
$ chmod a+x hello.py<br>
bash hello.py
注:现在基本都是通过安装Anaconda来创建所需的Python环境,同时Python的第三方包的安装和管理也十分方便。
# 编程语言流行榜-TIOBE排行榜(2020年)
```
%%html
<img src='TIOBE排行榜.png', width=800, height=500>
```
# 字符编码
## ASCII编码
因为计算机只能处理数字,如果要处理文本,就必须先把文本转换为数字才能处理。最早的计算机在设计时采用8个比特(bit)作为一个字节(byte),所以,一个字节能表示的最大的整数就是255(11111111=255)。如果要表示更大的整数,就必须用更多的字节,例如2个字节(1111111111111111=65535)
由于计算机是美国人发明的,因此,最早只有127个字符被编码到计算机里,也就是大小写英文字母、数字和一些符号,这个编码表被称为ASCII编码。<br>
缺点:<br>
1-要处理中文显然一个字节是不够的,至少需要两个字节,而且还不能和ASCII编码冲突。全世界有上百种语言,所以每个国家都执行了自己国家的字符编码。中国制定了GB2312编码;日本把日文编到Shift_JIS;<br>
2-制定字符编码时,各国有各国的标准,就会不可避免地出现冲突。结果就是,在多语言混合的文本中容易出现乱码。<br>
## Unicode & UTF-8
**Unicode**:把所有语言都统一到一套编码里,这样就不会再有乱码问题了。
ASCII编码和Unicode编码的区别:ASCII编码是1个字节,而Unicode编码通常是2个字节。<br>
例如:<br>
* 字母A用ASCII编码是十进制的65,二进制的01000001;<br>
* 字符0用ASCII编码是十进制的48,二进制的00110000,注意字符'0'和整数0是不同的;<br>
* 汉字'中'已经超出了ASCII编码的范围,用Unicode编码是十进制的20013,二进制的01001110 00101101。<br>
如果把ASCII编码的A用Unicode编码,只需要在前面补0就可以,因此,A的Unicode编码是00000000 01000001。
Unicode缺点:<br>
* Unicode编码可以解决乱码问题。但是用Unicode编码比ASCII编码需要多一倍的存储空间,在存储和传输上就十分不划算。
**UTF-8**:本着节约的精神,又出现了把Unicode编码转化为“可变长编码”的UTF-8编码。<br>
* 字符A在不同编码中的表示:
| 字符 | ASCII | Unicode |UTF-8 |
| ---- | -------- |---- |---- |
| A | 01000001 | 00000000 01000001 |01000001 |
UTF-8优点:<br>
* ASCII编码实际上可以被看成是UTF-8编码的一部分,所以,大量只支持ASCII编码的历史遗留软件可以在UTF-8编码下继续工作。
注意:在计算机内存中统一使用Unicode编码,当需要保存到硬盘或者需要传输的时候,就转换为UTF-8编码
|
github_jupyter
|
# -*- coding=utf-8 -*-
'''
Created on 2021/3/5
@Haihui Pan
'''
%%html
<img src='TIOBE排行榜.png', width=800, height=500>
| 0.160135 | 0.842215 |
# 项目:未前往就诊的挂号预约
## 目录
<ul>
<li><a href="#intro">简介</a></li>
<li><a href="#wrangling">数据整理</a></li>
<li><a href="#eda">探索性数据分析</a></li>
<li><a href="#conclusions">结论</a></li>
</ul>
<a id='intro'></a>
## 简介
本数据集包含10万条巴西预约挂号的求诊信息,研究病人是否如约前往医院就诊。每行数据录入了有关患者特点的多个数值,包括 “预约日期 (ScheduledDay)”指患者具体预约就诊的日期;“街区 (Neighborhood) ”指医院所在位置;“福利保障 (Scholarship)”说明病人是否是巴西福利项目 Bolsa Família 的保障人群;
- *请注意最后一列内容的编码:“No”表示病人已如约就诊,“Yes”说明病人未前往就诊。*
### 问题:有哪些重要因素能够帮助我们预测患者是否会按照其挂号预约前往医院就诊?
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
```
<a id='wrangling'></a>
## 数据整理
### 常规属性
```
# 加载数据
df = pd.read_csv("noshowappointments-kagglev2-may-2016.csv")
df.head()
df.info()
```
通过数据的info,可以看出总共有110527条数据,且没有缺失的数据
```
df.describe()
```
观察数据描述发现其中有一条异常数据,Age=-1,稍后清理掉
```
# 检查是否有重复数据
sum(df.duplicated())
```
规范命名
```
# 重命名No-show
df.rename(index=str, columns={'No-show':'No_show'}, inplace=True)
df.columns
```
### 清理异常及用不到的数据
- 删除年龄异常的数据,年龄小于0
```
df.query("Age < 0")
df = df[df.Age >= 0]
df.shape
df.query("Age < 0")
```
- 删除用不到的列PatientId,AppointmentID,ScheduledDay,AppointmentDay
```
df.drop(['PatientId','AppointmentID','ScheduledDay','AppointmentDay'], axis=1, inplace=True)
df.head()
# 观察地区的值
len(df.Neighbourhood.value_counts())
```
地区比较多,先忽略
```
df.drop('Neighbourhood', axis=1, inplace=True)
df.head()
```
- 给年龄分区,[29,39,49] [青年,中青年,中年,中老年]
```
df["Age"] = pd.cut(df.Age, [-1,29,39,49,130], labels=['youth','elder_youth','middle_aged','elder'])
df.head(5)
```
- 把 No_show 转化为 0,1的bool值,可以通过求No_show的平均值求的预约未就诊率
```
df.No_show = df.No_show == 'Yes'
df.No_show.head()
```
保存清理后的数据
```
df.to_csv('noshowappointment_edited.csv', index=False)
df_clean = pd.read_csv('noshowappointment_edited.csv')
df_clean.head()
```
<a id='eda'></a>
## 探索性数据分析
### 性别对预测预约就诊率有帮助吗?
```
# 加载数据
df_clean = pd.read_csv('noshowappointment_edited.csv')
df_clean.head()
df_clean.Gender.value_counts()
df_gender = df_clean.groupby('Gender')["No_show"].mean()
df_gender
df_gender.plot.bar();
plt.ylabel('rate of no show');
plt.title('rate of no show between gender');
```
> 答:通过比较数据可以看出 女性未就诊率为20.31%,而男性未就诊率为19.97%,没有太大的差异,可以认为基本上无法通过性别预测就诊率
### 年龄可以预测就诊率吗?那个年龄层的人未就诊率比较低?
```
df_age = df_clean.groupby('Age')["No_show"].mean()
df_age = df_age.sort_values()
df_age
```
可视化结果
```
df_age.plot.bar();
plt.ylabel('rate of no show');
plt.title('rate of no show between different ages');
```
> 答:从上图可以看出,年龄越大,未就诊率越低,而中老年人未就诊率最低。
<br>年龄是一个预测就诊率的因素
* 但是上面得出的结论仅依据 赴约率=该群体赴约人口数/该群体数量,我觉得还不够,考虑到某个年龄层人数可能会比较少,极端的情况下,如果老年层只有一个人,而且他赴约了,那老年层的赴约率就会是100%,这样的话上面的结论就没什么说服力
- 那么不同年龄层各有多少样本?
```
df_age_count = df_clean.groupby('Age')["No_show"].count()
df_age_count
```
> 样本基数都比较大,最少的中年层也有14209个样本,所以上面得出的预测结论 “年龄越大,未就诊率越低”,还是比较靠谱的
可视化年龄分布情况
```
df_age_count.plot.pie(figsize=(6,6),autopct='%.2f%%', fontsize='x-large');
plt.ylabel('proportion');
plt.title('age distribution', fontsize='x-large');
```
如上图所示,可以看出‘老’、‘少’的患者比较多,可能是这两个群体比较容易生病吧
### 其他因素呢?
```
df_clean.columns
```
剩余还没进行比较的数据有 ['Scholarship', 'Hipertension', 'Diabetes','Alcoholism', 'Handcap','SMS_received']
```
# 分别取出未就诊,和已就诊的数据
df_show = df_clean.query('No_show == False')
df_no_show = df_clean.query('No_show == True')
```
删除这个问题用不到的数据
```
df_show = df_show.drop(['Gender', 'Age','No_show'], axis=1)
df_show.head()
df_no_show = df_no_show.drop(['Gender', 'Age','No_show'], axis=1)
df_no_show.head()
```
在一个图标中绘制未就诊和已就诊的不同参数的比率
```
labels = df_show.columns
ind = np.arange(len(labels))
width = 0.35
labels
plt.figure(figsize=(10,5))
show_bar = plt.bar(ind, df_show.mean(), width, color='r', alpha=.7)
no_show_bar = plt.bar(ind+width, df_no_show.mean(), width, color='b', alpha=.7)
plt.ylabel('proportion')
plt.xlabel('params')
plt.title('proportion of show and no_show')
locations = ind + width/2
plt.xticks(locations, labels)
plt.legend((show_bar, no_show_bar), ('show', 'no_show'),fontsize='x-large');
```
> 答:从上面柱状图可以看出,未就诊的和就诊相比,福利保障比例高一些,高血压比例低一些,糖尿病比例低一些,酗酒比例基本一致,残障比例低一些,收到短信的比例高一些。<br>
所以,福利保障、高血压、糖尿病、残障、是否收到短信等因素可以用于预测是否未就诊。
<a id='conclusions'></a>
## 结论
#### 通过上面的数据分析可以得出这样的结论,年龄、福利保障、高血压、糖尿病、残障、是否收到短信等因素均可以用来预测患者的未就诊率。其中年龄、高血压、糖尿病、残障跟未就诊率呈负相关,福利保障、是否收到短信跟未就诊率呈正相关。
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
# 加载数据
df = pd.read_csv("noshowappointments-kagglev2-may-2016.csv")
df.head()
df.info()
df.describe()
# 检查是否有重复数据
sum(df.duplicated())
# 重命名No-show
df.rename(index=str, columns={'No-show':'No_show'}, inplace=True)
df.columns
df.query("Age < 0")
df = df[df.Age >= 0]
df.shape
df.query("Age < 0")
df.drop(['PatientId','AppointmentID','ScheduledDay','AppointmentDay'], axis=1, inplace=True)
df.head()
# 观察地区的值
len(df.Neighbourhood.value_counts())
df.drop('Neighbourhood', axis=1, inplace=True)
df.head()
df["Age"] = pd.cut(df.Age, [-1,29,39,49,130], labels=['youth','elder_youth','middle_aged','elder'])
df.head(5)
df.No_show = df.No_show == 'Yes'
df.No_show.head()
df.to_csv('noshowappointment_edited.csv', index=False)
df_clean = pd.read_csv('noshowappointment_edited.csv')
df_clean.head()
# 加载数据
df_clean = pd.read_csv('noshowappointment_edited.csv')
df_clean.head()
df_clean.Gender.value_counts()
df_gender = df_clean.groupby('Gender')["No_show"].mean()
df_gender
df_gender.plot.bar();
plt.ylabel('rate of no show');
plt.title('rate of no show between gender');
df_age = df_clean.groupby('Age')["No_show"].mean()
df_age = df_age.sort_values()
df_age
df_age.plot.bar();
plt.ylabel('rate of no show');
plt.title('rate of no show between different ages');
df_age_count = df_clean.groupby('Age')["No_show"].count()
df_age_count
df_age_count.plot.pie(figsize=(6,6),autopct='%.2f%%', fontsize='x-large');
plt.ylabel('proportion');
plt.title('age distribution', fontsize='x-large');
df_clean.columns
# 分别取出未就诊,和已就诊的数据
df_show = df_clean.query('No_show == False')
df_no_show = df_clean.query('No_show == True')
df_show = df_show.drop(['Gender', 'Age','No_show'], axis=1)
df_show.head()
df_no_show = df_no_show.drop(['Gender', 'Age','No_show'], axis=1)
df_no_show.head()
labels = df_show.columns
ind = np.arange(len(labels))
width = 0.35
labels
plt.figure(figsize=(10,5))
show_bar = plt.bar(ind, df_show.mean(), width, color='r', alpha=.7)
no_show_bar = plt.bar(ind+width, df_no_show.mean(), width, color='b', alpha=.7)
plt.ylabel('proportion')
plt.xlabel('params')
plt.title('proportion of show and no_show')
locations = ind + width/2
plt.xticks(locations, labels)
plt.legend((show_bar, no_show_bar), ('show', 'no_show'),fontsize='x-large');
| 0.325092 | 0.777004 |
# Generative Adversarial Network
In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!
GANs were [first reported on](https://arxiv.org/abs/1406.2661) in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:
* [Pix2Pix](https://affinelayer.com/pixsrv/)
* [CycleGAN](https://github.com/junyanz/CycleGAN)
* [A whole list](https://github.com/wiseodd/generative-models)
The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks _as close as possible_ to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.

The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can foold the discriminator.
The output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.
```
%matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
```
## Model Inputs
First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input `inputs_real` and the generator input `inputs_z`. We'll assign them the appropriate sizes for each of the networks.
```
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
return inputs_real, inputs_z
```
## Generator network

Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.
#### Variable Scope
Here we need to use `tf.variable_scope` for two reasons. Firstly, we're going to make sure all the variable names start with `generator`. Similarly, we'll prepend `discriminator` to the discriminator variables. This will help out later when we're training the separate networks.
We could just use `tf.name_scope` to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also _sample from it_ as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the `reuse` keyword for `tf.variable_scope` to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.
To use `tf.variable_scope`, you use a `with` statement:
```python
with tf.variable_scope('scope_name', reuse=False):
# code here
```
Here's more from [the TensorFlow documentation](https://www.tensorflow.org/programmers_guide/variable_scope#the_problem) to get another look at using `tf.variable_scope`.
#### Leaky ReLU
TensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can use take the outputs from a linear fully connected layer and pass them to `tf.maximum`. Typically, a parameter `alpha` sets the magnitude of the output for negative values. So, the output for negative input (`x`) values is `alpha*x`, and the output for positive `x` is `x`:
$$
f(x) = max(\alpha * x, x)
$$
#### Tanh Output
The generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.
```
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('generator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
```
## Discriminator
The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.
```
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('discriminator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
```
## Hyperparameters
```
# Size of input image to discriminator
input_size = 784
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Smoothing
smooth = 0.1
```
## Build network
Now we're building the network from the functions defined above.
First is to get our inputs, `input_real, input_z` from `model_inputs` using the sizes of the input and z.
Then, we'll create the generator, `generator(input_z, input_size)`. This builds the generator with the appropriate input and output sizes.
Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as `g_model`. So the real data discriminator is `discriminator(input_real)` while the fake discriminator is `discriminator(g_model, reuse=True)`.
```
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Build the model
g_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha)
# g_model is the generator output
d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, n_units=d_hidden_size, alpha=alpha)
```
## Discriminator and Generator Losses
Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, `d_loss = d_loss_real + d_loss_fake`. The losses will by sigmoid cross-entropys, which we can get with `tf.nn.sigmoid_cross_entropy_with_logits`. We'll also wrap that in `tf.reduce_mean` to get the mean for all the images in the batch. So the losses will look something like
```python
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
```
For the real image logits, we'll use `d_logits_real` which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter `smooth`. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like `labels = tf.ones_like(tensor) * (1 - smooth)`
The discriminator loss for the fake data is similar. The logits are `d_logits_fake`, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.
Finally, the generator losses are using `d_logits_fake`, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.
```
# Calculate losses
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_logits_real) * (1 - smooth)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_logits_real)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_logits_fake)))
```
## Optimizers
We want to update the generator and discriminator variables separately. So we need to get the variables for each part build optimizers for the two parts. To get all the trainable variables, we use `tf.trainable_variables()`. This creates a list of all the variables we've defined in our graph.
For the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with `generator`. So, we just need to iterate through the list from `tf.trainable_variables()` and keep variables to start with `generator`. Each variable object has an attribute `name` which holds the name of the variable as a string (`var.name == 'weights_0'` for instance).
We can do something similar with the discriminator. All the variables in the discriminator start with `discriminator`.
Then, in the optimizer we pass the variable lists to `var_list` in the `minimize` method. This tells the optimizer to only update the listed variables. Something like `tf.train.AdamOptimizer().minimize(loss, var_list=var_list)` will only train the variables in `var_list`.
```
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
```
## Training
```
batch_size = 100
epochs = 100
samples = []
losses = []
# Only save generator variables
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
```
## Training loss
Here we'll check out the training losses for the generator and discriminator.
```
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
```
## Generator samples from training
Here we can view samples of images from the generator. First we'll look at images taken while training.
```
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
```
These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 1, 7, 3, 2. Since this is just a sample, it isn't representative of the full range of images this generator can make.
```
_ = view_samples(-1, samples)
```
Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
```
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
```
It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise like 1s and 9s.
## Sampling from the generator
We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!
```
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
_ = view_samples(0, [gen_samples])
```
|
github_jupyter
|
%matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
return inputs_real, inputs_z
with tf.variable_scope('scope_name', reuse=False):
# code here
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('generator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('discriminator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
# Size of input image to discriminator
input_size = 784
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Smoothing
smooth = 0.1
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Build the model
g_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha)
# g_model is the generator output
d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, n_units=d_hidden_size, alpha=alpha)
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
# Calculate losses
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_logits_real) * (1 - smooth)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_logits_real)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_logits_fake)))
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
batch_size = 100
epochs = 100
samples = []
losses = []
# Only save generator variables
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
_ = view_samples(-1, samples)
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
_ = view_samples(0, [gen_samples])
| 0.81626 | 0.990394 |
# Step 8: Use model to perform inference
Use example data stored on disk to perform inference with your model by sending REST requests to Tesnorflow Serving.
```
"""A client for serving the chicago_taxi workshop example locally."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import base64
import json
import os
import subprocess
import tempfile
import requests
import tensorflow as tf
import tfx_utils
from tfx.utils import io_utils
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_transform import coders as tft_coders
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import schema_utils
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import
from tfx.examples.chicago_taxi.trainer import taxi
_INFERENCE_TIMEOUT_SECONDS = 5.0
_PIPELINE_NAME = 'taxi'
_LABEL_KEY = 'tips'
```
The data that we will use to send requests to our model is stored on disk in [csv](https://en.wikipedia.org/wiki/Comma-separated_values) format; we will convert these examples to [Tensorflow Example](https://www.tensorflow.org/api_docs/python/tf/train/Example) to send to our model being served by Tensorflow Serving.
Construct the following two utility functions:
* `_make_proto_coder` which creates a coder that will decode a single row from the CSV data file and output a tf.transform encoded dict.
* `_make_csv_coder` which creates a coder that will encode a tf.transform encoded dict object into a TF Example.
```
def _get_raw_feature_spec(schema):
"""Return raw feature spec for a given schema."""
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _make_proto_coder(schema):
"""Return a coder for tf.transform to read TF Examples."""
raw_feature_spec = _get_raw_feature_spec(schema)
raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)
return tft_coders.ExampleProtoCoder(raw_schema)
def _make_csv_coder(schema, column_names):
"""Return a coder for tf.transform to read csv files."""
raw_feature_spec = _get_raw_feature_spec(schema)
parsing_schema = dataset_schema.from_feature_spec(raw_feature_spec)
return tft_coders.CsvCoder(column_names, parsing_schema)
```
Implement routine to read examples from a CSV file and for each example, send an inference request containing a base-64 encoding of the serialized TF Example.
```
def do_inference(server_addr, model_name, examples_file, num_examples, schema):
"""Sends requests to the model and prints the results.
Args:
server_addr: network address of model server in "host:port" format
model_name: name of the model as understood by the model server
examples_file: path to csv file containing examples, with the first line
assumed to have the column headers
num_examples: number of requests to send to the server
schema: a Schema describing the input data
Returns:
Response from model server
"""
filtered_features = [
feature for feature in schema.feature if feature.name != _LABEL_KEY
]
del schema.feature[:]
schema.feature.extend(filtered_features)
column_names = io_utils.load_csv_column_names(examples_file)
csv_coder = _make_csv_coder(schema, column_names)
proto_coder = _make_proto_coder(schema)
input_file = open(examples_file, 'r')
input_file.readline() # skip header line
serialized_examples = []
for _ in range(num_examples):
one_line = input_file.readline()
if not one_line:
print('End of example file reached')
break
one_example = csv_coder.decode(one_line)
serialized_example = proto_coder.encode(one_example)
serialized_examples.append(serialized_example)
parsed_server_addr = server_addr.split(':')
host=parsed_server_addr[0]
port=parsed_server_addr[1]
json_examples = []
for serialized_example in serialized_examples:
# The encoding follows the guidelines in:
# https://www.tensorflow.org/tfx/serving/api_rest
example_bytes = base64.b64encode(serialized_example).decode('utf-8')
predict_request = '{ "b64": "%s" }' % example_bytes
json_examples.append(predict_request)
json_request = '{ "instances": [' + ','.join(map(str, json_examples)) + ']}'
server_url = 'http://' + host + ':' + port + '/v1/models/' + model_name + ':predict'
response = requests.post(
server_url, data=json_request, timeout=_INFERENCE_TIMEOUT_SECONDS)
response.raise_for_status()
prediction = response.json()
print(json.dumps(prediction, indent=4))
```
Open the metadata store, obtain the URI for the schema of your model, as inferred by TF DV, fetch the schema file and parse it into a `Schema` object.
```
def _make_schema(pipeline_name):
"""Reads and constructs schema object for provided pipeline.
Args:
pipeline_name: The name of the pipeline for which TFX Metadata Store has Schema.
Returns:
An instance of Schema or raises Exception if more or fewer than one schema
was found for the given pipeline.
"""
db_path = os.path.join(os.environ['HOME'], 'airflow/tfx/metadata/', pipeline_name, 'metadata.db')
store = tfx_utils.TFXReadonlyMetadataStore.from_sqlite_db(db_path)
schemas = store.get_artifacts_of_type_df(tfx_utils.TFXArtifactTypes.SCHEMA)
assert len(schemas.URI) == 1
schema_uri = schemas.URI.iloc[0] + 'schema.pbtxt'
schema = schema_pb2.Schema()
contents = file_io.read_file_to_string(schema_uri)
text_format.Parse(contents, schema)
return schema
```
Use the utilities that we have defined to send a series of inference requests to the model being served by Tensorflow Serving listening on the host's network interface.
```
do_inference(server_addr='127.0.0.1:8501',
model_name=_PIPELINE_NAME,
examples_file='/root/airflow/data/taxi_data/data.csv',
num_examples=3,
schema=_make_schema(_PIPELINE_NAME))
```
|
github_jupyter
|
"""A client for serving the chicago_taxi workshop example locally."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import base64
import json
import os
import subprocess
import tempfile
import requests
import tensorflow as tf
import tfx_utils
from tfx.utils import io_utils
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_transform import coders as tft_coders
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import schema_utils
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import
from tfx.examples.chicago_taxi.trainer import taxi
_INFERENCE_TIMEOUT_SECONDS = 5.0
_PIPELINE_NAME = 'taxi'
_LABEL_KEY = 'tips'
def _get_raw_feature_spec(schema):
"""Return raw feature spec for a given schema."""
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _make_proto_coder(schema):
"""Return a coder for tf.transform to read TF Examples."""
raw_feature_spec = _get_raw_feature_spec(schema)
raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)
return tft_coders.ExampleProtoCoder(raw_schema)
def _make_csv_coder(schema, column_names):
"""Return a coder for tf.transform to read csv files."""
raw_feature_spec = _get_raw_feature_spec(schema)
parsing_schema = dataset_schema.from_feature_spec(raw_feature_spec)
return tft_coders.CsvCoder(column_names, parsing_schema)
def do_inference(server_addr, model_name, examples_file, num_examples, schema):
"""Sends requests to the model and prints the results.
Args:
server_addr: network address of model server in "host:port" format
model_name: name of the model as understood by the model server
examples_file: path to csv file containing examples, with the first line
assumed to have the column headers
num_examples: number of requests to send to the server
schema: a Schema describing the input data
Returns:
Response from model server
"""
filtered_features = [
feature for feature in schema.feature if feature.name != _LABEL_KEY
]
del schema.feature[:]
schema.feature.extend(filtered_features)
column_names = io_utils.load_csv_column_names(examples_file)
csv_coder = _make_csv_coder(schema, column_names)
proto_coder = _make_proto_coder(schema)
input_file = open(examples_file, 'r')
input_file.readline() # skip header line
serialized_examples = []
for _ in range(num_examples):
one_line = input_file.readline()
if not one_line:
print('End of example file reached')
break
one_example = csv_coder.decode(one_line)
serialized_example = proto_coder.encode(one_example)
serialized_examples.append(serialized_example)
parsed_server_addr = server_addr.split(':')
host=parsed_server_addr[0]
port=parsed_server_addr[1]
json_examples = []
for serialized_example in serialized_examples:
# The encoding follows the guidelines in:
# https://www.tensorflow.org/tfx/serving/api_rest
example_bytes = base64.b64encode(serialized_example).decode('utf-8')
predict_request = '{ "b64": "%s" }' % example_bytes
json_examples.append(predict_request)
json_request = '{ "instances": [' + ','.join(map(str, json_examples)) + ']}'
server_url = 'http://' + host + ':' + port + '/v1/models/' + model_name + ':predict'
response = requests.post(
server_url, data=json_request, timeout=_INFERENCE_TIMEOUT_SECONDS)
response.raise_for_status()
prediction = response.json()
print(json.dumps(prediction, indent=4))
def _make_schema(pipeline_name):
"""Reads and constructs schema object for provided pipeline.
Args:
pipeline_name: The name of the pipeline for which TFX Metadata Store has Schema.
Returns:
An instance of Schema or raises Exception if more or fewer than one schema
was found for the given pipeline.
"""
db_path = os.path.join(os.environ['HOME'], 'airflow/tfx/metadata/', pipeline_name, 'metadata.db')
store = tfx_utils.TFXReadonlyMetadataStore.from_sqlite_db(db_path)
schemas = store.get_artifacts_of_type_df(tfx_utils.TFXArtifactTypes.SCHEMA)
assert len(schemas.URI) == 1
schema_uri = schemas.URI.iloc[0] + 'schema.pbtxt'
schema = schema_pb2.Schema()
contents = file_io.read_file_to_string(schema_uri)
text_format.Parse(contents, schema)
return schema
do_inference(server_addr='127.0.0.1:8501',
model_name=_PIPELINE_NAME,
examples_file='/root/airflow/data/taxi_data/data.csv',
num_examples=3,
schema=_make_schema(_PIPELINE_NAME))
| 0.800029 | 0.801004 |
<a href="https://colab.research.google.com/github/SotaYoshida/Lecture_DataScience/blob/2021/notebooks/Python_r08.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
回帰分析において、
相関係数などの統計量が同じものの、散布図を書いてみると全く異なる現象が観測されることがあります。
統計学者のフランク・アンスコムによって紹介され、データを可視化する可視化するや基本統計量のみをもちいた解析では不十分だということを示す例として知られています。
以下では、アンスコムの例とは少し違いますが、
4つの異なる散布図を生成するプログラムしてみましょう。
実行すると、
(左上) 適当な直線にノイズを加えたデータ
(右上) 二次式にノイズを加えたデータ
(左下) 外れ値が存在するデータ
(右下) 2つのグループが存在するデータ
これらはいずれも
xとyの相関係数が0.01以下の精度で0.8に等しくなっています。
```
import numpy as np
from matplotlib import pyplot as plt
!pip install japanize_matplotlib
import japanize_matplotlib
def m_sigma(X,Y):
Xb=np.mean(X);Yb=np.mean(Y)
return np.dot(X-Xb,Y-Yb)
def m_corr(X,Y):
return m_sigma(X,Y)/np.sqrt(m_sigma(X,X)*m_sigma(Y,Y))
def mainplot(XYs):
fig = plt.figure(figsize=(8,9))
axs = [fig.add_subplot(2,2,1),fig.add_subplot(2,2,2),
fig.add_subplot(2,2,3),fig.add_subplot(2,2,4)]
cols = ["red", "blue", "green", "orange"]
for i, XY in enumerate(XYs):
axs[i].set_facecolor("#D3DEF1")
axs[i].set_xlabel("x"); axs[i].set_ylabel("y")
axs[i].set_title(XY[3])
axs[i].scatter(XY[0],XY[1],s=5,color=cols[i])
plt.show()
plt.close()
if __name__ == "__main__":
target_r = 0.80
itnum = 10000
XYs=[]
np.random.seed(10)
X= np.arange(0,10,0.1)
hit=0; hit1 = 0; hit2=0
for tmp in range(itnum):
Y= 3.0+0.5*X + np.random.normal(0,1.0,len(X))
corr=m_corr(X,Y)
if abs(corr -target_r) < 1.e-4:
XYs += [ [X,Y,corr,"linear"] ]
break
for tmp in range(itnum):
Y= -6+2.78*X -0.207*X*X + np.random.normal(0,0.3,len(X))
corr=m_corr(X,Y)
if abs(corr -target_r) < 1.e-4:
XYs += [ [X,Y,corr,"polynomial"] ]
break
for tmp in range(itnum):
Y = 0.3 * X
for j in range(len(Y)):
if j % 20 == 0:
Y[j] += np.random.normal(0,3.0)
corr=m_corr(X,Y)
if abs(corr -target_r) < 1.e-4:
XYs += [ [X,Y,corr,"outlier"] ]
break
X1=[]; X2=[]
for i,tmp in enumerate(X):
if i < len(X)//2 :
X1 += [tmp]
else:
X2 += [tmp]
X = np.hstack([X1,X2])
for i in range(itnum):
Y1 = 2.026 -0.2*np.array(X1) #+ np.random.normal(0,1.0,len(X1))
Y2 = 5.904- 0.1*np.array(X2) #+ np.random.normal(0,1.0,len(X2))
Y = np.hstack([Y1,Y2])
corr=m_corr(X,Y)
if abs(corr -target_r) < 1.e-4:
XYs += [ [X,Y,corr,"two-modes"] ]
break
mainplot(XYs)
```
|
github_jupyter
|
import numpy as np
from matplotlib import pyplot as plt
!pip install japanize_matplotlib
import japanize_matplotlib
def m_sigma(X,Y):
Xb=np.mean(X);Yb=np.mean(Y)
return np.dot(X-Xb,Y-Yb)
def m_corr(X,Y):
return m_sigma(X,Y)/np.sqrt(m_sigma(X,X)*m_sigma(Y,Y))
def mainplot(XYs):
fig = plt.figure(figsize=(8,9))
axs = [fig.add_subplot(2,2,1),fig.add_subplot(2,2,2),
fig.add_subplot(2,2,3),fig.add_subplot(2,2,4)]
cols = ["red", "blue", "green", "orange"]
for i, XY in enumerate(XYs):
axs[i].set_facecolor("#D3DEF1")
axs[i].set_xlabel("x"); axs[i].set_ylabel("y")
axs[i].set_title(XY[3])
axs[i].scatter(XY[0],XY[1],s=5,color=cols[i])
plt.show()
plt.close()
if __name__ == "__main__":
target_r = 0.80
itnum = 10000
XYs=[]
np.random.seed(10)
X= np.arange(0,10,0.1)
hit=0; hit1 = 0; hit2=0
for tmp in range(itnum):
Y= 3.0+0.5*X + np.random.normal(0,1.0,len(X))
corr=m_corr(X,Y)
if abs(corr -target_r) < 1.e-4:
XYs += [ [X,Y,corr,"linear"] ]
break
for tmp in range(itnum):
Y= -6+2.78*X -0.207*X*X + np.random.normal(0,0.3,len(X))
corr=m_corr(X,Y)
if abs(corr -target_r) < 1.e-4:
XYs += [ [X,Y,corr,"polynomial"] ]
break
for tmp in range(itnum):
Y = 0.3 * X
for j in range(len(Y)):
if j % 20 == 0:
Y[j] += np.random.normal(0,3.0)
corr=m_corr(X,Y)
if abs(corr -target_r) < 1.e-4:
XYs += [ [X,Y,corr,"outlier"] ]
break
X1=[]; X2=[]
for i,tmp in enumerate(X):
if i < len(X)//2 :
X1 += [tmp]
else:
X2 += [tmp]
X = np.hstack([X1,X2])
for i in range(itnum):
Y1 = 2.026 -0.2*np.array(X1) #+ np.random.normal(0,1.0,len(X1))
Y2 = 5.904- 0.1*np.array(X2) #+ np.random.normal(0,1.0,len(X2))
Y = np.hstack([Y1,Y2])
corr=m_corr(X,Y)
if abs(corr -target_r) < 1.e-4:
XYs += [ [X,Y,corr,"two-modes"] ]
break
mainplot(XYs)
| 0.212681 | 0.96157 |
```
import pandas as pd
train = pd.read_csv('train.csv')
stores = pd.read_csv('store.csv')
df = train.merge(stores, on='Store', how='left')
df['Month'] = pd.to_datetime(df['Date']).dt.month
df['Year'] = pd.to_datetime(df['Date']).dt.year
df['Day'] = pd.to_datetime(df['Date']).dt.day
df['is_Prom_Jan']=(df['PromoInterval'].str.contains("Jan").fillna(0)).astype(int)
df['is_Prom_Feb']=(df['PromoInterval'].str.contains("Feb").fillna(0)).astype(int)
df['is_Prom_Mar']=(df['PromoInterval'].str.contains("Mar").fillna(0)).astype(int)
df['hasCompetition']= df['CompetitionDistance'].notnull().astype(int)
df['CompetitionOpenSinceMonth']=df['CompetitionOpenSinceMonth'].fillna(1)
df['CompetitionOpenSinceYear']=df['CompetitionOpenSinceYear'].fillna(2100)
df['DayC']=1
#df['DateCompetition']=pd.to_datetime(df.CompetitionOpenSinceYear*10000
# +df.CompetitionOpenSinceMonth*100+df.DayC,format='%Y%m%d')
#df['hasCompetiton'] = (df['DateCompetition'] < pd.to_datetime(df['Date'])).astype(int)
df
df.columns
colToKeep = ['Store', 'DayOfWeek', 'Open', 'Promo', 'StateHoliday', 'SchoolHoliday', 'StoreType',
'Assortment', 'hasCompetition', 'Month', 'Year', 'Day',
'is_Prom_Jan', 'is_Prom_Feb', 'is_Prom_Mar']
colToDelete = ['CompetitionOpenSinceMonth', 'CompetitionDistance',
'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek',
'Promo2SinceYear', 'PromoInterval', 'Customers']
df['Assortment']=df['Assortment'].replace({'a':1,'b':2,'c':3})
df['StoreType'] = df['StoreType'].replace({'a':1, 'b':2, 'c':3, 'd':4})
df['StateHoliday']=df['StateHoliday'].replace({'0':0, 'a':1, 'b':2, 'c': 3 })
import numpy as np
df['CompetitionDistance']=df['CompetitionDistance'].fillna(20000)
df['CompetitionDistance']=df['CompetitionDistance'].replace({0:1})
df['CompetitionDistance']=np.log(df['CompetitionDistance'])
df[colToKeep[4]].value_counts()
pd.unique(df['StateHoliday'])
from sklearn.model_selection import train_test_split
df
train = df[pd.to_datetime(df['Date'])<pd.to_datetime('2015-06-15')]
test = df[pd.to_datetime(df['Date'])>=pd.to_datetime('2015-06-15')]
test
y=df['Sales']
x=df[colToKeep]
x_train = train[colToKeep]
y_train = train['Sales']
x_test = test[colToKeep]
y_test = test['Sales']
from sklearn.neighbors import KNeighborsRegressor
#10
neigh = KNeighborsRegressor(n_neighbors=15)
neigh.fit(x_train, y_train)
y_pred = neigh.predict(x_test)
from sklearn.metrics import mean_squared_error
from math import sqrt
sqrt(mean_squared_error(y_test, y_pred))
from sklearn.ensemble import RandomForestRegressor
regrS = RandomForestRegressor(n_estimators = 30, max_features = 7, max_depth = 3)
#model joblib.dump(classifier, 'classifier.joblib') from sklearn.externals import joblib
regrS.fit(x_train, y_train)
# Extract single tree
estimator = regrS.estimators_[5]
from sklearn.tree import export_graphviz
# Export as dot file
export_graphviz(estimator, out_file='tree.dot',
feature_names = colToKeep,
class_names = ['Sales'],
rounded = True, proportion = False,
precision = 2, filled = True)
from sklearn.ensemble import RandomForestRegressor
regr = RandomForestRegressor(n_estimators = 100)
#model joblib.dump(classifier, 'classifier.joblib') from sklearn.externals import joblib
regr.fit(x_train, y_train)
import joblib
joblib.dump(regr, 'classifier.joblib.pkl.z')
from sklearn.metrics import mean_squared_error
from math import sqrt
y_predRF = regr.predict(x_test)
sqrt(mean_squared_error(y_test, y_predRF))
colToKeep
import matplotlib.pyplot as plt
x_test.iloc[0]
a=x_test.iloc[46800]
a['Month']=8
a['Day'] = 1
a['DayOfWeek']=6
Day=[]
Y=[]
old = a
for i in range(0,15):
new = old
new['Promo']=1
date = pd.to_datetime(old.Year*10000+old.Month*100+old.Day,format='%Y%m%d') + datetime.timedelta(days=1)
if date.dayofweek == 0:
new['DayOfWeek'] = 7
new['Open']= 0
else:
new['DayOfWeek'] = date.dayofweek
if date.dayofweek == 1 :
new['Open']=1
new['Month']=date.month
new['Day']=date.day
new['DayOfWeek'] = date.dayofweek
y_p = regr.predict([new])
Day.append(date)
Y.append(y_p[0])
old = new
Y
plt.plot(Day, Y)
plt.xticks(rotation=90)
plt.show()
YnoPromo = [4592.78,
0.0,
5094.21,
4941.9,
4756.31,
4830.36,
4848.63,
3832.17,
0.0,
4711.58,
4405.02,
4818.01,
5033.9,
5028.34,
3715.99]
YPromo = [8161.08,
0.0,
10664.13,
8121.32,
7606.89,
7539.1,
6954.02,
6958.99,
0.0,
9904.81,
7909.98,
7177.56,
7096.43,
6901.86,
6925.86]
Ydiff = []
for i in range(0, len(YPromo)):
Ydiff.append(YPromo[i] - YnoPromo[i])
plt.plot(Day,Ydiff)
plt.xticks(rotation=90)
import numpy as np
YGain = []
moyenne = sum(Ydiff)/12
for i in range(0, len(Ydiff)):
YGain.append(Ydiff[i] - moyenne)
YGain
sum(YPromo)/12
plt.plot(Day,YPromo)
plt.xticks(rotation=90)
x_test.iloc[46800]
feat_importances = pd.DataFrame(regr.feature_importances_, index=x_test.columns, columns=["Importance"])
feat_importances.sort_values(by='Importance', ascending=False, inplace=True)
feat_importances.plot(kind='bar', figsize=(8,6))
difference = pd.DataFrame(abs(y_test-y_predRF))
print((difference['Sales']<500).astype(int).sum())
print((difference['Sales']<900).astype(int).sum()/52405)
from matplotlib import pyplot as plt
pred_df=pd.DataFrame({'Predictions':y_predRF,'Actual':y_test})
plt.figure(figsize=(10,10))
pred_df["Actual"][-50:,].plot.line()
pred_df["Predictions"][-50:,].plot.line()
plt.legend()
plt.show()
from matplotlib import pyplot as plt
pred_df=pd.DataFrame({'Predictions':y_predRF,'Actual':y_test})
plt.figure(figsize=(10,10))
pred_df["Actual"][25560:25600,].plot.line()
pred_df["Predictions"][25560:25600,].plot.line()
plt.legend()
plt.show()
from matplotlib import pyplot as plt
pred_df=pd.DataFrame({'Predictions':y_predRF,'Actual':y_test})
plt.figure(figsize=(10,10))
pred_df["Actual"][:50,].plot.line()
pred_df["Predictions"][:50,].plot.line()
plt.legend()
plt.show()
import sklearn
print('The scikit-learn version is {}.'.format(sklearn.__version__))
from sklearn.metrics import mean_absolute_error
```
|
github_jupyter
|
import pandas as pd
train = pd.read_csv('train.csv')
stores = pd.read_csv('store.csv')
df = train.merge(stores, on='Store', how='left')
df['Month'] = pd.to_datetime(df['Date']).dt.month
df['Year'] = pd.to_datetime(df['Date']).dt.year
df['Day'] = pd.to_datetime(df['Date']).dt.day
df['is_Prom_Jan']=(df['PromoInterval'].str.contains("Jan").fillna(0)).astype(int)
df['is_Prom_Feb']=(df['PromoInterval'].str.contains("Feb").fillna(0)).astype(int)
df['is_Prom_Mar']=(df['PromoInterval'].str.contains("Mar").fillna(0)).astype(int)
df['hasCompetition']= df['CompetitionDistance'].notnull().astype(int)
df['CompetitionOpenSinceMonth']=df['CompetitionOpenSinceMonth'].fillna(1)
df['CompetitionOpenSinceYear']=df['CompetitionOpenSinceYear'].fillna(2100)
df['DayC']=1
#df['DateCompetition']=pd.to_datetime(df.CompetitionOpenSinceYear*10000
# +df.CompetitionOpenSinceMonth*100+df.DayC,format='%Y%m%d')
#df['hasCompetiton'] = (df['DateCompetition'] < pd.to_datetime(df['Date'])).astype(int)
df
df.columns
colToKeep = ['Store', 'DayOfWeek', 'Open', 'Promo', 'StateHoliday', 'SchoolHoliday', 'StoreType',
'Assortment', 'hasCompetition', 'Month', 'Year', 'Day',
'is_Prom_Jan', 'is_Prom_Feb', 'is_Prom_Mar']
colToDelete = ['CompetitionOpenSinceMonth', 'CompetitionDistance',
'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek',
'Promo2SinceYear', 'PromoInterval', 'Customers']
df['Assortment']=df['Assortment'].replace({'a':1,'b':2,'c':3})
df['StoreType'] = df['StoreType'].replace({'a':1, 'b':2, 'c':3, 'd':4})
df['StateHoliday']=df['StateHoliday'].replace({'0':0, 'a':1, 'b':2, 'c': 3 })
import numpy as np
df['CompetitionDistance']=df['CompetitionDistance'].fillna(20000)
df['CompetitionDistance']=df['CompetitionDistance'].replace({0:1})
df['CompetitionDistance']=np.log(df['CompetitionDistance'])
df[colToKeep[4]].value_counts()
pd.unique(df['StateHoliday'])
from sklearn.model_selection import train_test_split
df
train = df[pd.to_datetime(df['Date'])<pd.to_datetime('2015-06-15')]
test = df[pd.to_datetime(df['Date'])>=pd.to_datetime('2015-06-15')]
test
y=df['Sales']
x=df[colToKeep]
x_train = train[colToKeep]
y_train = train['Sales']
x_test = test[colToKeep]
y_test = test['Sales']
from sklearn.neighbors import KNeighborsRegressor
#10
neigh = KNeighborsRegressor(n_neighbors=15)
neigh.fit(x_train, y_train)
y_pred = neigh.predict(x_test)
from sklearn.metrics import mean_squared_error
from math import sqrt
sqrt(mean_squared_error(y_test, y_pred))
from sklearn.ensemble import RandomForestRegressor
regrS = RandomForestRegressor(n_estimators = 30, max_features = 7, max_depth = 3)
#model joblib.dump(classifier, 'classifier.joblib') from sklearn.externals import joblib
regrS.fit(x_train, y_train)
# Extract single tree
estimator = regrS.estimators_[5]
from sklearn.tree import export_graphviz
# Export as dot file
export_graphviz(estimator, out_file='tree.dot',
feature_names = colToKeep,
class_names = ['Sales'],
rounded = True, proportion = False,
precision = 2, filled = True)
from sklearn.ensemble import RandomForestRegressor
regr = RandomForestRegressor(n_estimators = 100)
#model joblib.dump(classifier, 'classifier.joblib') from sklearn.externals import joblib
regr.fit(x_train, y_train)
import joblib
joblib.dump(regr, 'classifier.joblib.pkl.z')
from sklearn.metrics import mean_squared_error
from math import sqrt
y_predRF = regr.predict(x_test)
sqrt(mean_squared_error(y_test, y_predRF))
colToKeep
import matplotlib.pyplot as plt
x_test.iloc[0]
a=x_test.iloc[46800]
a['Month']=8
a['Day'] = 1
a['DayOfWeek']=6
Day=[]
Y=[]
old = a
for i in range(0,15):
new = old
new['Promo']=1
date = pd.to_datetime(old.Year*10000+old.Month*100+old.Day,format='%Y%m%d') + datetime.timedelta(days=1)
if date.dayofweek == 0:
new['DayOfWeek'] = 7
new['Open']= 0
else:
new['DayOfWeek'] = date.dayofweek
if date.dayofweek == 1 :
new['Open']=1
new['Month']=date.month
new['Day']=date.day
new['DayOfWeek'] = date.dayofweek
y_p = regr.predict([new])
Day.append(date)
Y.append(y_p[0])
old = new
Y
plt.plot(Day, Y)
plt.xticks(rotation=90)
plt.show()
YnoPromo = [4592.78,
0.0,
5094.21,
4941.9,
4756.31,
4830.36,
4848.63,
3832.17,
0.0,
4711.58,
4405.02,
4818.01,
5033.9,
5028.34,
3715.99]
YPromo = [8161.08,
0.0,
10664.13,
8121.32,
7606.89,
7539.1,
6954.02,
6958.99,
0.0,
9904.81,
7909.98,
7177.56,
7096.43,
6901.86,
6925.86]
Ydiff = []
for i in range(0, len(YPromo)):
Ydiff.append(YPromo[i] - YnoPromo[i])
plt.plot(Day,Ydiff)
plt.xticks(rotation=90)
import numpy as np
YGain = []
moyenne = sum(Ydiff)/12
for i in range(0, len(Ydiff)):
YGain.append(Ydiff[i] - moyenne)
YGain
sum(YPromo)/12
plt.plot(Day,YPromo)
plt.xticks(rotation=90)
x_test.iloc[46800]
feat_importances = pd.DataFrame(regr.feature_importances_, index=x_test.columns, columns=["Importance"])
feat_importances.sort_values(by='Importance', ascending=False, inplace=True)
feat_importances.plot(kind='bar', figsize=(8,6))
difference = pd.DataFrame(abs(y_test-y_predRF))
print((difference['Sales']<500).astype(int).sum())
print((difference['Sales']<900).astype(int).sum()/52405)
from matplotlib import pyplot as plt
pred_df=pd.DataFrame({'Predictions':y_predRF,'Actual':y_test})
plt.figure(figsize=(10,10))
pred_df["Actual"][-50:,].plot.line()
pred_df["Predictions"][-50:,].plot.line()
plt.legend()
plt.show()
from matplotlib import pyplot as plt
pred_df=pd.DataFrame({'Predictions':y_predRF,'Actual':y_test})
plt.figure(figsize=(10,10))
pred_df["Actual"][25560:25600,].plot.line()
pred_df["Predictions"][25560:25600,].plot.line()
plt.legend()
plt.show()
from matplotlib import pyplot as plt
pred_df=pd.DataFrame({'Predictions':y_predRF,'Actual':y_test})
plt.figure(figsize=(10,10))
pred_df["Actual"][:50,].plot.line()
pred_df["Predictions"][:50,].plot.line()
plt.legend()
plt.show()
import sklearn
print('The scikit-learn version is {}.'.format(sklearn.__version__))
from sklearn.metrics import mean_absolute_error
| 0.368633 | 0.398406 |

# Bases de datos NoSQL
### Práctica Redis. Servicios de publicación/subscripción
#### Parte 2. Subscripción
Lo primero, el nombre:
Nombre
Ahora conectamos a Redis. Es importante que cambieis los datos por los de vuestra cuenta, para no saturar el servidor. Además, los datos deben ser los mismos del publisher, si no no funcionará
```
# Ojo, cambiar estos datos por los de vuestro acceso a red en redislabs
redisconexion = "redis-13665.c55.eu-central-1-1.ec2.cloud.redislabs.com"
redispuerto = 13665
passwd = "csVe77ZtQL7sKQocZZHUlnjmSf0WpGxE"
import redis
r = redis.Redis(host=redisconexion, password=passwd, port=redispuerto, charset="utf-8", decode_responses=True, db=0)
r.ping() # debe mostrar True
```
#### Pregunta 2: 1 punto
Declarar 3 subcriptores, s1, s2 y s3:
* s1 estará suscrito solo a 'news-money'
* s2 estará suscrito solo a 'news-selected'
* s3 estará suscrito solo a 'news-selected' y a 'news-other'
```
# solución
```
#### Pregunta 3: 2 puntos
Tras ejecutar el código anterior, ejecutar el publisher, que tardará unos segundos (puede que un minuto). Ahora ya tenemos tanto subcriptores (s1,s2,s3) como mensajes publicados. Para probarlo, escribir una función muestra que imprime la clave 'data' y 'chanel' de los n primeros mensajes del subscriptor de tipo (type) `message` o `pmessage`.
Nota: suponer que hay n mensajes, no hace falta comprobarlo
```
# solución
# s es un subcriptor, n el número de mensajes a mostrar
def muestra(s,n):
```
Para probar el código anterior
```
print("s1")
muestra(s1,3)
print("s2")
muestra(s2,3)
print("s3")
muestra(s3,3)
```
La salida dependerá de las noticias, pero debe seguir el formato:
s1
Mensaje 1 Canal news-money
Titular: Maharashtra govt to distribute ₹2,900 cr for drought relief
Mensaje 2 Canal news-money
Titular: Fresh milk startup Country Delight raises $7-10 mn: Reports
Mensaje 3 Canal news-money
Titular: Deals worth ₹3 lakh cr signed at Global Investors Meet: TN
s2
Mensaje 1 Canal news-selected
Titular: Came back from India trip, amazed at the changes: Mark Mobius
....
#### Pregunta 4: 1 punto
* Crear un nuevo subscriptor s4
* Utilizar la subscripción por patrones para subscribirlo a todos los canales que empiezan por `news-`
```
```
Para probar:
```
# antes de ejecutar este código ejecutar de nuevo el publisher (la última caja de código)
muestra(s4,4)
```
#### Pregunta 5: 2 puntos
Lo normal es que no queramos mostrar la información sin más por pantalla, sino que queramos grabar lo publicado en una base de datos. En nuestro caso se pide:
* Crear un servidor de mongo (o conectar a Atlas, como se desee)
* Escribir una función graba(s,db) que grabe cada mensaje en una colección con el mismo nombre que el canal en el que se ha publicado el mensaje. Cada documento solo tendrá una clave `titular` (aparte del _id, que dejaremos que se genera automáticamente)
graba(s,db) grabará en mongo titulares hasta que la función `get_message` devuelva None
**AViso**: el siguiente código borra la base de datos news, asegurarse de no tener nada importante
```
# iniciamos mongo
from pymongo import MongoClient
client = MongoClient('mongodb://127.0.0.1:27017/')
## OJO: borramos la base de datos news
client.drop_database('news')
db = client.news
# graba todos los mensajes del publisher s en la base de datos db, con el nombre
# de colecciones que indice 'channel'
def graba(s,db):
```
Para probarlos nos declaramos un subcriptor
```
s5 = r.pubsub()
s5.subscribe("news-selected")
s5.subscribe("news-money")
```
Ahora ejecutamos el publisher, y a continuación la llamada a nuestra función, que grabará en mongo en dos colecciones, news-selected, y news-money
```
graba(s5,db)
```
Vamos a ver si se ha insertado bien mirando los primeros documentos de cada coleccion
```
def muestra_primeros(db,n):
for c in db.list_collection_names():
print("Colección ",c)
for doc in db[c].find({},{"_id":0}).limit(n):
print(doc)
muestra_primeros(db,10)
```
La salida debe tener el aspecto:
Colección news-selected
{'titular': 'US govt agency condemns Pak as food aid being denied to Hindus amid coronavirus'}
{'titular': 'Coronaviruses found in two bat species in India: ICMR'}
....
|
github_jupyter
|
# Ojo, cambiar estos datos por los de vuestro acceso a red en redislabs
redisconexion = "redis-13665.c55.eu-central-1-1.ec2.cloud.redislabs.com"
redispuerto = 13665
passwd = "csVe77ZtQL7sKQocZZHUlnjmSf0WpGxE"
import redis
r = redis.Redis(host=redisconexion, password=passwd, port=redispuerto, charset="utf-8", decode_responses=True, db=0)
r.ping() # debe mostrar True
# solución
# solución
# s es un subcriptor, n el número de mensajes a mostrar
def muestra(s,n):
print("s1")
muestra(s1,3)
print("s2")
muestra(s2,3)
print("s3")
muestra(s3,3)
```
Para probar:
#### Pregunta 5: 2 puntos
Lo normal es que no queramos mostrar la información sin más por pantalla, sino que queramos grabar lo publicado en una base de datos. En nuestro caso se pide:
* Crear un servidor de mongo (o conectar a Atlas, como se desee)
* Escribir una función graba(s,db) que grabe cada mensaje en una colección con el mismo nombre que el canal en el que se ha publicado el mensaje. Cada documento solo tendrá una clave `titular` (aparte del _id, que dejaremos que se genera automáticamente)
graba(s,db) grabará en mongo titulares hasta que la función `get_message` devuelva None
**AViso**: el siguiente código borra la base de datos news, asegurarse de no tener nada importante
Para probarlos nos declaramos un subcriptor
Ahora ejecutamos el publisher, y a continuación la llamada a nuestra función, que grabará en mongo en dos colecciones, news-selected, y news-money
Vamos a ver si se ha insertado bien mirando los primeros documentos de cada coleccion
| 0.325521 | 0.901704 |
Copyright (c) 2020 Ryan Cohn and Elizabeth Holm. All rights reserved. <br />
Licensed under the MIT License (see LICENSE for details) <br />
Written by Ryan Cohn
# Instance segmentation performance evaluation and sample characterization
In this example we will do the following:
* Evaluate how well the predicted masks agree with the hand-drawn annotations
* Perform basic sample measurements (ie particle size)
* Match satellites to particles to measure the satellite content of samples
## Note:
We lump the predictions on training images with the validation images. This is because our available data so far is very limited, so we just
want to show the process for analyzing the results. The process is exactly the same for analyzing larger quantities of data, so after generating predictions
you can replace the filepath with more validation or even unlabeled images to get a better representation of the performance of the model.
```
import json
import matplotlib.pyplot as plt
import numpy as np
import os
from pathlib import Path
import pandas as pd
import pickle
import pycocotools.mask as RLE
import seaborn as sns
import skimage
import skimage.io
root = str(Path('..'))
import sys
if root not in sys.path:
sys.path.append(root)
from sat_helpers import analyze, data_utils
from sat_helpers.applications import powder
from sat_helpers.structures import InstanceSet
from sat_helpers.visualize import display_iset
```
# Loading Data
To save time, we can avoid calculating performances by loading in already saved performances
```
with open('../data/segmentation_data.json', 'r') as fp:
data = json.load(fp)
average_p = data['P']
average_r = data['R']
```
You can use your own predictions generated from before by replacing the paths, but as an example I am including mine from the fully trained model.
```
average_p = []
average_r = []
NUM_STAGES = 7
for i in range(NUM_STAGES):
#Loading Ground Truth Labels
satellites_gt_path = Path('..', 'data', 'RESULTS', 'satellite_auto_validation_v1.2.json')
for path in [satellites_gt_path]:
assert path.is_file(), f'File not found : {path}'
satellites_gt_dd = data_utils.get_ddicts('via2', satellites_gt_path, dataset_class='train')
#Loading Prediction Labels
satellites_path = Path('..', 'data', 'RESULTS', f'satellite-stage-{i}.pickle')
assert satellites_path.is_file()
with open(satellites_path, 'rb') as f:
satellites_pred = pickle.load(f)
iset_satellites_gt = [InstanceSet().read_from_ddict(x, inplace=False) for x in satellites_gt_dd]
iset_satellites_pred = [InstanceSet().read_from_model_out(x, inplace=False) for x in satellites_pred]
#Creating Instance Set Objects
iset_satellites_gt, iset_satellites_pred = analyze.align_instance_sets(iset_satellites_gt, iset_satellites_pred)
#Re-ordering instance sets to be concurrent
for gt, pred in zip(iset_satellites_gt, iset_satellites_pred):
pred.HFW = gt.HFW
pred.HFW_units = gt.HFW_units
print(f'gt filename: {Path(gt.filepath).name}\t pred filename: {Path(pred.filepath).name}')
#Creating Detection Scores
dss_satellites = [analyze.det_seg_scores(gt, pred, size=gt.instances.image_size)
for gt, pred in zip(iset_satellites_gt, iset_satellites_pred)]
labels = []
counts = {'train': 0, 'validation': 0}
for iset in iset_satellites_gt:
counts[iset.dataset_class] += 1
labels.append(iset.filepath.name)
x=[*([1] * len(labels)), *([2] * len(labels))]
# y values are the bar heights
scores = [*[x['det_precision'] for x in dss_satellites],
*[x['det_recall'] for x in dss_satellites]]
labels = labels * 2
print('x: ', x)
print('y: ', [np.round(x, decimals=2) for x in scores])
#print('labels: ', labels)
fig, ax = plt.subplots(figsize=(6,3), dpi=150)
sns.barplot(x=x, y=scores, hue=labels, ax=ax)
ax.legend(bbox_to_anchor=(1,1))
ax.set_ylabel('detection score')
ax.set_xticklabels(['precision','recall'])
print("Average Precision Score: ", str(sum([*[x['det_precision'] for x in dss_satellites]])/len([*[x['det_precision'] for x in dss_satellites]])))
print("Average Precision Score: ", str(sum([*[x['det_recall'] for x in dss_satellites]])/len([*[x['det_recall'] for x in dss_satellites]])))
#Analyzing Prediction Scores on a pixel level
temp_p = []
temp_r = []
total_area = 1024*768
for instance in range(len(iset_satellites_pred)):
fp_area = 0
fn_area = 0
tp_area = 0
iset_satellites_pred[instance].compute_rprops(keys=['area'])
for i in dss_satellites[instance]['det_fp']:
try:
fp_area += int(iset_satellites_pred[instance].rprops['area'][i])
except:
pass
for i in dss_satellites[instance]['det_fn']:
try:
fn_area += int(iset_satellites_pred[instance].rprops['area'][i])
except:
pass
#print(dss_satellites[0]['seg_tp'])
for i in dss_satellites[instance]['det_tp']:
try:
tp_area += int(iset_satellites_pred[instance].rprops['area'][i[1]])
except:
pass
print("Precision:", str(tp_area/(tp_area+fp_area)))
print('Recall:', str(tp_area/(tp_area+fn_area)))
temp_p.append(tp_area/(tp_area+fp_area))
temp_r.append(tp_area/(tp_area+fn_area))
print('---')
average_p.append(temp_p)
average_r.append(temp_r)
counter = 0
for iset in iset_satellites_gt:
gt = iset_satellites_gt[counter]
pred = iset_satellites_pred[counter]
iset_det, colormap = analyze.det_perf_iset(gt, pred)
img = skimage.color.gray2rgb(skimage.io.imread(iset.filepath))
#display_iset(img, iset=iset_det)
counter += 1
```
# Saving Data
To save time later, calculated performances are saved to avoid calculating every time opened
This block of code only needs to be run if new segmentations are included, otherwise just skip
```
data = {'P': average_p, 'R': average_r}
with open('../data/segmentation_data.json', 'w') as fp:
json.dump(data, fp)
```
# Seperating Segmentation Results Per Powder
Each models performance is seperated per powder so that individual performances can be identified
```
S08_1000_p = []
S06_500_p = []
S04_1000_p = []
S03_1250_p = []
S02_300_p = []
HP_250_p = []
S08_1000_r = []
S06_500_r = []
S04_1000_r = []
S03_1250_r = []
S02_300_r = []
HP_250_r = []
for i in range(len(average_p)):
S08_1000_p.append([i, average_p[i][0]])
S06_500_p.append([i, average_p[i][1]])
S04_1000_p.append([i, average_p[i][2]])
S03_1250_p.append([i, average_p[i][3]])
S02_300_p.append([i, average_p[i][4]])
HP_250_p.append([i, average_p[i][5]])
for i in range(len(average_r)):
S08_1000_r.append([i, average_r[i][0]])
S06_500_r.append([i, average_r[i][1]])
S04_1000_r.append([i, average_r[i][2]])
S03_1250_r.append([i, average_r[i][3]])
S02_300_r.append([i, average_r[i][4]])
HP_250_r.append([i, average_r[i][5]])
```
# Visualizing Individual Performance Scores Over Time
Each graph represents performance on a specific powder or magnification. The stage in which a powder is included is highlighted in vertical red lines
### Inconel [0-63] at 1250x
```
plt.title("Precision And Recall Score for Inconel [0-63] \nat 1250x After Each Stage of Training")
xyz = np.array(S03_1250_p)
XYZ = np.array(S03_1250_r)
plt.ylabel("Detection Scores")
plt.xlabel("Stage of Model")
plt.scatter(xyz[:,0], xyz[:,1])
plt.plot(xyz[:,0], xyz[:,1], label = 'Precision')
plt.scatter(XYZ[:,0], XYZ[:,1])
plt.plot(XYZ[:,0], XYZ[:,1], label = 'Recall')
plt.axvline(0, c='r')
plt.axvline(1, c='r')
plt.legend(loc="lower right")
plt.axis([-0.25, 6.25, 0.0, 1.0])
print('')
```
### Ti-6Al-4V [0-63] at 500x
```
plt.title("Precision And Recall Score for Ti-6Al-4V [0-63] \nat 500x After Each Stage of Training")
xyz = np.array(S06_500_p)
XYZ = np.array(S06_500_r)
plt.ylabel("Detection Scores")
plt.xlabel("Stage of Model")
plt.scatter(xyz[:,0], xyz[:,1])
plt.plot(xyz[:,0], xyz[:,1], label = 'Precision')
plt.scatter(XYZ[:,0], XYZ[:,1])
plt.plot(XYZ[:,0], XYZ[:,1], label = 'Recall')
plt.axvline(1, c='r')
plt.axvline(2, c='r')
plt.legend(loc="lower right")
plt.axis([-0.25, 6.25, 0.0, 1.0])
print('')
```
### Inconel 718 [64-150] at 300x
```
plt.title("Precision And Recall Score for Inconel 718 [64-150] \nat 300x After Each Stage of Training")
xyz = np.array(S02_300_p)
XYZ = np.array(S02_300_r)
plt.ylabel("Detection Scores")
plt.xlabel("Stage of Model")
plt.scatter(xyz[:,0], xyz[:,1])
plt.plot(xyz[:,0], xyz[:,1], label = 'Precision')
plt.scatter(XYZ[:,0], XYZ[:,1])
plt.plot(XYZ[:,0], XYZ[:,1], label = 'Recall')
plt.axvline(2, c='r')
plt.axvline(3, c='r')
plt.legend(loc="lower right")
plt.axis([-0.25, 6.25, 0.0, 1.0])
print('')
```
### Al 5056 at 250x
```
plt.title("Precision And Recall Score for Al 5056 \nat 250x After Each Stage of Training")
xyz = np.array(HP_250_p)
XYZ = np.array(HP_250_r)
plt.ylabel("Detection Scores")
plt.xlabel("Stage of Model")
plt.scatter(xyz[:,0], xyz[:,1])
plt.plot(xyz[:,0], xyz[:,1], label = 'Precision')
plt.scatter(XYZ[:,0], XYZ[:,1])
plt.plot(XYZ[:,0], XYZ[:,1], label = 'Recall')
plt.axvline(3, c='r')
plt.axvline(4, c='r')
plt.legend(loc="lower right")
plt.axis([-0.25, 6.25, 0.0, 1.0])
print('')
```
### Ti-Nb-Zr [0-63] at 1000x
```
plt.title("Precision And Recall Score for Ti-Nb-Zr [0-63] \nat 1000x After Each Stage of Training")
xyz = np.array(S08_1000_p)
XYZ = np.array(S08_1000_r)
plt.ylabel("Detection Scores")
plt.xlabel("Stage of Model")
plt.scatter(xyz[:,0], xyz[:,1])
plt.plot(xyz[:,0], xyz[:,1], label = 'Precision')
plt.scatter(XYZ[:,0], XYZ[:,1])
plt.plot(XYZ[:,0], XYZ[:,1], label = 'Recall')
plt.axvline(4, c='r')
plt.axvline(5, c='r')
plt.legend(loc="lower right")
plt.axis([-0.25, 6.25, 0.0, 1.0])
print('')
```
### SS-17-4 [10-45] at 1000x
```
plt.title("Precision And Recall Score for SS-17-4 [10-45] \nat 1000x After Each Stage of Training")
xyz = np.array(S04_1000_p)
XYZ = np.array(S04_1000_r)
plt.ylabel("Detection Scores")
plt.xlabel("Stage of Model")
plt.scatter(xyz[:,0], xyz[:,1])
plt.plot(xyz[:,0], xyz[:,1], label = 'Precision')
plt.scatter(XYZ[:,0], XYZ[:,1])
plt.plot(XYZ[:,0], XYZ[:,1], label = 'Recall')
plt.axvline(5, c='r')
plt.axvline(6, c='r')
plt.legend(loc="lower right")
plt.axis([-0.25, 6.25, 0.0, 1.0])
print('')
```
# Calculating Average Performances Scores And Adjusted Average Performance Scores
```
print(average_p[0])
print(average_p[-1])
print(average_r[0])
print(average_r[-1])
print("Initial Model Precision:", str(sum(average_p[0])/len(average_p[0])))
print("Final Model Precision:", str(sum(average_p[-1])/len(average_p[-1])))
print("Initial Model Recall:", str(sum(average_r[0])/len(average_r[0])))
print("Final Model Recall:", str(sum(average_r[-1])/len(average_r[-1])))
altered_average_p = []
altered_average_r = []
for i in average_p:
temp = []
for j in i:
temp.append(j)
altered_average_p.append(temp)
for i in average_r:
temp = []
for j in i:
temp.append(j)
altered_average_r.append(temp)
for i in range(len(altered_average_p)):
print(altered_average_p[i].pop(-1))
for i in range(len(altered_average_r)):
altered_average_r[i].pop(-1)
#print(altered_average_r)
print("Initial Model Precision:", str(sum(altered_average_p[0])/len(altered_average_p[0])))
print("Final Model Precision:", str(sum(altered_average_p[-1])/len(altered_average_p[-1])))
print("Initial Model Recall:", str(sum(altered_average_r[0])/len(altered_average_r[0])))
print("Final Model Recall:", str(sum(altered_average_r[-1])/len(altered_average_r[-1])))
ave_p = []
ave_r = []
for i in range(len(altered_average_p)):
ave_p.append([i, sum(altered_average_p[i])/len(altered_average_p[i])])
for i in range(len(altered_average_r)):
ave_r.append([i, sum(altered_average_r[i])/len(altered_average_r[i])])
print(ave_p)
plt.title("Average Precision and Recall Score of Model Over Time")
xyz = np.array(ave_p)
XYZ = np.array(ave_r)
plt.ylabel("Detection Scores")
plt.xlabel("Stage of Model")
plt.scatter(xyz[:,0], xyz[:,1])
plt.plot(xyz[:,0], xyz[:,1], label = 'Precision')
plt.scatter(XYZ[:,0], XYZ[:,1])
plt.plot(XYZ[:,0], XYZ[:,1], label = 'Recall')
plt.legend(loc="lower right")
plt.axis([-0.25, 6.25, 0.0, 1.0])
print('')
```
|
github_jupyter
|
import json
import matplotlib.pyplot as plt
import numpy as np
import os
from pathlib import Path
import pandas as pd
import pickle
import pycocotools.mask as RLE
import seaborn as sns
import skimage
import skimage.io
root = str(Path('..'))
import sys
if root not in sys.path:
sys.path.append(root)
from sat_helpers import analyze, data_utils
from sat_helpers.applications import powder
from sat_helpers.structures import InstanceSet
from sat_helpers.visualize import display_iset
with open('../data/segmentation_data.json', 'r') as fp:
data = json.load(fp)
average_p = data['P']
average_r = data['R']
average_p = []
average_r = []
NUM_STAGES = 7
for i in range(NUM_STAGES):
#Loading Ground Truth Labels
satellites_gt_path = Path('..', 'data', 'RESULTS', 'satellite_auto_validation_v1.2.json')
for path in [satellites_gt_path]:
assert path.is_file(), f'File not found : {path}'
satellites_gt_dd = data_utils.get_ddicts('via2', satellites_gt_path, dataset_class='train')
#Loading Prediction Labels
satellites_path = Path('..', 'data', 'RESULTS', f'satellite-stage-{i}.pickle')
assert satellites_path.is_file()
with open(satellites_path, 'rb') as f:
satellites_pred = pickle.load(f)
iset_satellites_gt = [InstanceSet().read_from_ddict(x, inplace=False) for x in satellites_gt_dd]
iset_satellites_pred = [InstanceSet().read_from_model_out(x, inplace=False) for x in satellites_pred]
#Creating Instance Set Objects
iset_satellites_gt, iset_satellites_pred = analyze.align_instance_sets(iset_satellites_gt, iset_satellites_pred)
#Re-ordering instance sets to be concurrent
for gt, pred in zip(iset_satellites_gt, iset_satellites_pred):
pred.HFW = gt.HFW
pred.HFW_units = gt.HFW_units
print(f'gt filename: {Path(gt.filepath).name}\t pred filename: {Path(pred.filepath).name}')
#Creating Detection Scores
dss_satellites = [analyze.det_seg_scores(gt, pred, size=gt.instances.image_size)
for gt, pred in zip(iset_satellites_gt, iset_satellites_pred)]
labels = []
counts = {'train': 0, 'validation': 0}
for iset in iset_satellites_gt:
counts[iset.dataset_class] += 1
labels.append(iset.filepath.name)
x=[*([1] * len(labels)), *([2] * len(labels))]
# y values are the bar heights
scores = [*[x['det_precision'] for x in dss_satellites],
*[x['det_recall'] for x in dss_satellites]]
labels = labels * 2
print('x: ', x)
print('y: ', [np.round(x, decimals=2) for x in scores])
#print('labels: ', labels)
fig, ax = plt.subplots(figsize=(6,3), dpi=150)
sns.barplot(x=x, y=scores, hue=labels, ax=ax)
ax.legend(bbox_to_anchor=(1,1))
ax.set_ylabel('detection score')
ax.set_xticklabels(['precision','recall'])
print("Average Precision Score: ", str(sum([*[x['det_precision'] for x in dss_satellites]])/len([*[x['det_precision'] for x in dss_satellites]])))
print("Average Precision Score: ", str(sum([*[x['det_recall'] for x in dss_satellites]])/len([*[x['det_recall'] for x in dss_satellites]])))
#Analyzing Prediction Scores on a pixel level
temp_p = []
temp_r = []
total_area = 1024*768
for instance in range(len(iset_satellites_pred)):
fp_area = 0
fn_area = 0
tp_area = 0
iset_satellites_pred[instance].compute_rprops(keys=['area'])
for i in dss_satellites[instance]['det_fp']:
try:
fp_area += int(iset_satellites_pred[instance].rprops['area'][i])
except:
pass
for i in dss_satellites[instance]['det_fn']:
try:
fn_area += int(iset_satellites_pred[instance].rprops['area'][i])
except:
pass
#print(dss_satellites[0]['seg_tp'])
for i in dss_satellites[instance]['det_tp']:
try:
tp_area += int(iset_satellites_pred[instance].rprops['area'][i[1]])
except:
pass
print("Precision:", str(tp_area/(tp_area+fp_area)))
print('Recall:', str(tp_area/(tp_area+fn_area)))
temp_p.append(tp_area/(tp_area+fp_area))
temp_r.append(tp_area/(tp_area+fn_area))
print('---')
average_p.append(temp_p)
average_r.append(temp_r)
counter = 0
for iset in iset_satellites_gt:
gt = iset_satellites_gt[counter]
pred = iset_satellites_pred[counter]
iset_det, colormap = analyze.det_perf_iset(gt, pred)
img = skimage.color.gray2rgb(skimage.io.imread(iset.filepath))
#display_iset(img, iset=iset_det)
counter += 1
data = {'P': average_p, 'R': average_r}
with open('../data/segmentation_data.json', 'w') as fp:
json.dump(data, fp)
S08_1000_p = []
S06_500_p = []
S04_1000_p = []
S03_1250_p = []
S02_300_p = []
HP_250_p = []
S08_1000_r = []
S06_500_r = []
S04_1000_r = []
S03_1250_r = []
S02_300_r = []
HP_250_r = []
for i in range(len(average_p)):
S08_1000_p.append([i, average_p[i][0]])
S06_500_p.append([i, average_p[i][1]])
S04_1000_p.append([i, average_p[i][2]])
S03_1250_p.append([i, average_p[i][3]])
S02_300_p.append([i, average_p[i][4]])
HP_250_p.append([i, average_p[i][5]])
for i in range(len(average_r)):
S08_1000_r.append([i, average_r[i][0]])
S06_500_r.append([i, average_r[i][1]])
S04_1000_r.append([i, average_r[i][2]])
S03_1250_r.append([i, average_r[i][3]])
S02_300_r.append([i, average_r[i][4]])
HP_250_r.append([i, average_r[i][5]])
plt.title("Precision And Recall Score for Inconel [0-63] \nat 1250x After Each Stage of Training")
xyz = np.array(S03_1250_p)
XYZ = np.array(S03_1250_r)
plt.ylabel("Detection Scores")
plt.xlabel("Stage of Model")
plt.scatter(xyz[:,0], xyz[:,1])
plt.plot(xyz[:,0], xyz[:,1], label = 'Precision')
plt.scatter(XYZ[:,0], XYZ[:,1])
plt.plot(XYZ[:,0], XYZ[:,1], label = 'Recall')
plt.axvline(0, c='r')
plt.axvline(1, c='r')
plt.legend(loc="lower right")
plt.axis([-0.25, 6.25, 0.0, 1.0])
print('')
plt.title("Precision And Recall Score for Ti-6Al-4V [0-63] \nat 500x After Each Stage of Training")
xyz = np.array(S06_500_p)
XYZ = np.array(S06_500_r)
plt.ylabel("Detection Scores")
plt.xlabel("Stage of Model")
plt.scatter(xyz[:,0], xyz[:,1])
plt.plot(xyz[:,0], xyz[:,1], label = 'Precision')
plt.scatter(XYZ[:,0], XYZ[:,1])
plt.plot(XYZ[:,0], XYZ[:,1], label = 'Recall')
plt.axvline(1, c='r')
plt.axvline(2, c='r')
plt.legend(loc="lower right")
plt.axis([-0.25, 6.25, 0.0, 1.0])
print('')
plt.title("Precision And Recall Score for Inconel 718 [64-150] \nat 300x After Each Stage of Training")
xyz = np.array(S02_300_p)
XYZ = np.array(S02_300_r)
plt.ylabel("Detection Scores")
plt.xlabel("Stage of Model")
plt.scatter(xyz[:,0], xyz[:,1])
plt.plot(xyz[:,0], xyz[:,1], label = 'Precision')
plt.scatter(XYZ[:,0], XYZ[:,1])
plt.plot(XYZ[:,0], XYZ[:,1], label = 'Recall')
plt.axvline(2, c='r')
plt.axvline(3, c='r')
plt.legend(loc="lower right")
plt.axis([-0.25, 6.25, 0.0, 1.0])
print('')
plt.title("Precision And Recall Score for Al 5056 \nat 250x After Each Stage of Training")
xyz = np.array(HP_250_p)
XYZ = np.array(HP_250_r)
plt.ylabel("Detection Scores")
plt.xlabel("Stage of Model")
plt.scatter(xyz[:,0], xyz[:,1])
plt.plot(xyz[:,0], xyz[:,1], label = 'Precision')
plt.scatter(XYZ[:,0], XYZ[:,1])
plt.plot(XYZ[:,0], XYZ[:,1], label = 'Recall')
plt.axvline(3, c='r')
plt.axvline(4, c='r')
plt.legend(loc="lower right")
plt.axis([-0.25, 6.25, 0.0, 1.0])
print('')
plt.title("Precision And Recall Score for Ti-Nb-Zr [0-63] \nat 1000x After Each Stage of Training")
xyz = np.array(S08_1000_p)
XYZ = np.array(S08_1000_r)
plt.ylabel("Detection Scores")
plt.xlabel("Stage of Model")
plt.scatter(xyz[:,0], xyz[:,1])
plt.plot(xyz[:,0], xyz[:,1], label = 'Precision')
plt.scatter(XYZ[:,0], XYZ[:,1])
plt.plot(XYZ[:,0], XYZ[:,1], label = 'Recall')
plt.axvline(4, c='r')
plt.axvline(5, c='r')
plt.legend(loc="lower right")
plt.axis([-0.25, 6.25, 0.0, 1.0])
print('')
plt.title("Precision And Recall Score for SS-17-4 [10-45] \nat 1000x After Each Stage of Training")
xyz = np.array(S04_1000_p)
XYZ = np.array(S04_1000_r)
plt.ylabel("Detection Scores")
plt.xlabel("Stage of Model")
plt.scatter(xyz[:,0], xyz[:,1])
plt.plot(xyz[:,0], xyz[:,1], label = 'Precision')
plt.scatter(XYZ[:,0], XYZ[:,1])
plt.plot(XYZ[:,0], XYZ[:,1], label = 'Recall')
plt.axvline(5, c='r')
plt.axvline(6, c='r')
plt.legend(loc="lower right")
plt.axis([-0.25, 6.25, 0.0, 1.0])
print('')
print(average_p[0])
print(average_p[-1])
print(average_r[0])
print(average_r[-1])
print("Initial Model Precision:", str(sum(average_p[0])/len(average_p[0])))
print("Final Model Precision:", str(sum(average_p[-1])/len(average_p[-1])))
print("Initial Model Recall:", str(sum(average_r[0])/len(average_r[0])))
print("Final Model Recall:", str(sum(average_r[-1])/len(average_r[-1])))
altered_average_p = []
altered_average_r = []
for i in average_p:
temp = []
for j in i:
temp.append(j)
altered_average_p.append(temp)
for i in average_r:
temp = []
for j in i:
temp.append(j)
altered_average_r.append(temp)
for i in range(len(altered_average_p)):
print(altered_average_p[i].pop(-1))
for i in range(len(altered_average_r)):
altered_average_r[i].pop(-1)
#print(altered_average_r)
print("Initial Model Precision:", str(sum(altered_average_p[0])/len(altered_average_p[0])))
print("Final Model Precision:", str(sum(altered_average_p[-1])/len(altered_average_p[-1])))
print("Initial Model Recall:", str(sum(altered_average_r[0])/len(altered_average_r[0])))
print("Final Model Recall:", str(sum(altered_average_r[-1])/len(altered_average_r[-1])))
ave_p = []
ave_r = []
for i in range(len(altered_average_p)):
ave_p.append([i, sum(altered_average_p[i])/len(altered_average_p[i])])
for i in range(len(altered_average_r)):
ave_r.append([i, sum(altered_average_r[i])/len(altered_average_r[i])])
print(ave_p)
plt.title("Average Precision and Recall Score of Model Over Time")
xyz = np.array(ave_p)
XYZ = np.array(ave_r)
plt.ylabel("Detection Scores")
plt.xlabel("Stage of Model")
plt.scatter(xyz[:,0], xyz[:,1])
plt.plot(xyz[:,0], xyz[:,1], label = 'Precision')
plt.scatter(XYZ[:,0], XYZ[:,1])
plt.plot(XYZ[:,0], XYZ[:,1], label = 'Recall')
plt.legend(loc="lower right")
plt.axis([-0.25, 6.25, 0.0, 1.0])
print('')
| 0.116575 | 0.921852 |
<a href="https://colab.research.google.com/github/joaovictor-loureiro/data-science/blob/master/Manipulacao_de_strings.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Manipulação de Strings com Python
Neste notebook irei apresentar algumas operações e conceitos, presentes na linguagem de programação Python, para manipulação de strings.
<center>
<a href="https://www.flaticon.com" target="_blank" alt="Ícones feitos por Freepik" name="oo"><img src='https://raw.githubusercontent.com/joaovictor-loureiro/data-science/master/data-science/arquivos/imagens/python.png' alt="Ícones feitos por Freepik from www.flaticon.com" height='200'></a>
</center>
### Imprimindo uma String
Para imprimir uma String é utilizado a função `print()`. Ela pode ser usada passando diretamente uma String como parâmetro ou uma variável, veja:
```
# Imprimindo Strings
print("Olá mundo!")
frase = "Olá mundo!"
print(frase)
```
###Concatenando Strings
Strings também podem ser concatenadas, ou seja, pode-se utilizar diferentes Strings para imprimir a informação desejada. Para isso, basta utilizar o sinal de adição. Segue um exemplo:
```
# Definindo as variáves
cidade = "Cornélio Procópio"
estado = "Paraná"
pais = "Brasil"
# Imprimindo os dados e concatenando as Strings
print("ENDEREÇO: " + cidade + ", " + estado + " - " + pais)
```
### Índices
Uma String pode ser divida em índices, a primeira letra da palavra é o índice 0, a segunda letre é o índice 1, a terceira índice 2 e assim por diante. Isso permite pegar apenas uma letra da palavra sem se preocupar com as outras. Veja:
```
# Definindo uma variável
string = "Manipulação"
# Primeira letra da palavra
print(string[0])
# Segunda letra da palavra
print(string[1])
# Última letra da palavra
print(string[-1])
# Penúltima letra da palavra
print(string[-2])
```
### Tamanho de uma String
É possível descobrir o tamanho de uma String por meio da função `len()`. Veja abaixo:
```
# Definindo uma variável
string = "Paralelepípedo"
# Imprimindo o tamanho da variável, utilizando a função len()
print(len(string))
```
Ou seja, a palavra *Paralelepípedo* possui 14 caracteres.
### Função `split()`
Serve para separar Strings com base em algum argumento, retornando uma lista com os itens separados. Exemplo:
```
# Definindo a variável
string = "Laranja - Maçã - Banana - Uva - Morango"
# Suponha que você deseja separar cada fruta em uma String diferente,
# como elas tem um 'delimitador' em comum (no caso, o hífen '-'),
# pode se passa-lo como argumento para a função split()
string.split(" - ")
```
Como pode-se observar, o resultado foi uma lista com os 5 itens separados.
###Função `replace()`
Serve para substituir elementos dentro de uma String. Observe:
```
# Definindo a variável
string = "esta-mensagem-não-foi-criptografada"
# Suponha que você deseja substituir os hífens por espaços,
# basta informar o que deseja substituir, e o que será inserido no lugar.
string.replace('-', ' ')
```
Nesse caso, as palavras que antes eram separadas por hífens, agora são separadas por espaços.
###Função `strip()`
Serve para remover espaços em branco ou caracteres indesejados em uma String. Veja o exemplo:
```
# Definindo a variável
string = " Olá Mundo! "
# Pode-se utilizar a função strip() para remover os espaços em branco
# no começo e no final da frase.
string.strip()
# Também pode-se remover caracteres indesejados, exemplo a palavra Olá:
string.strip('Olá ')
```
###Slicing
É uma técnica que permite "cortar" uma String, usando como referência seus próprios índices. Observe:
```
# Definindo a variável
string = "Manipulação"
# Cortando a String para exibir somente as 3 primeiras letras
print(string[:3])
# Cortando a String para exibir a partir das 7 primeiras letras
print(string[7:])
# Cortando a String para exibir parte central da palavra
print(string[2:8])
```
Como visto, essa técnica permite múltiplas possibilidades de manipulação da String, possibilitando corta-la da forma que bem entender.
###Função `capitalize()`
Transforma o primeiro caractere da String em **maiúsculo**.
```
# Definindo a variável
string = "manipulação"
# Usando a função capitalize()
string.capitalize()
```
###Função `lower()`
Transforma todos os carateceres da String em **minúsculo**.
```
# Definindo a variável
string = "MANIPULAÇÃO"
# Usando a função lower()
string.lower()
```
###Função `uper()`
Transforma todos os caracteres da String em **maiúsculo**.
```
# Definindo a variável
string = "manipulação"
# Usando a função upper()
string.upper()
```
###Função `title()`
Transforma a primeira letra de cada palavra em maiúscula.
```
# Definindo a variável
string = "maria clara almeida"
# Usando a função title()
string.title()
```
###Função `swapcase()`
Transforma as letras maiúsculas em minúsculas e vice-versa em uma String.
```
# Definindo a variável
string = "MANIPULAÇÃO De strings"
# Usando a função swapcase()
string.swapcase()
```
|
github_jupyter
|
# Imprimindo Strings
print("Olá mundo!")
frase = "Olá mundo!"
print(frase)
# Definindo as variáves
cidade = "Cornélio Procópio"
estado = "Paraná"
pais = "Brasil"
# Imprimindo os dados e concatenando as Strings
print("ENDEREÇO: " + cidade + ", " + estado + " - " + pais)
# Definindo uma variável
string = "Manipulação"
# Primeira letra da palavra
print(string[0])
# Segunda letra da palavra
print(string[1])
# Última letra da palavra
print(string[-1])
# Penúltima letra da palavra
print(string[-2])
# Definindo uma variável
string = "Paralelepípedo"
# Imprimindo o tamanho da variável, utilizando a função len()
print(len(string))
# Definindo a variável
string = "Laranja - Maçã - Banana - Uva - Morango"
# Suponha que você deseja separar cada fruta em uma String diferente,
# como elas tem um 'delimitador' em comum (no caso, o hífen '-'),
# pode se passa-lo como argumento para a função split()
string.split(" - ")
# Definindo a variável
string = "esta-mensagem-não-foi-criptografada"
# Suponha que você deseja substituir os hífens por espaços,
# basta informar o que deseja substituir, e o que será inserido no lugar.
string.replace('-', ' ')
# Definindo a variável
string = " Olá Mundo! "
# Pode-se utilizar a função strip() para remover os espaços em branco
# no começo e no final da frase.
string.strip()
# Também pode-se remover caracteres indesejados, exemplo a palavra Olá:
string.strip('Olá ')
# Definindo a variável
string = "Manipulação"
# Cortando a String para exibir somente as 3 primeiras letras
print(string[:3])
# Cortando a String para exibir a partir das 7 primeiras letras
print(string[7:])
# Cortando a String para exibir parte central da palavra
print(string[2:8])
# Definindo a variável
string = "manipulação"
# Usando a função capitalize()
string.capitalize()
# Definindo a variável
string = "MANIPULAÇÃO"
# Usando a função lower()
string.lower()
# Definindo a variável
string = "manipulação"
# Usando a função upper()
string.upper()
# Definindo a variável
string = "maria clara almeida"
# Usando a função title()
string.title()
# Definindo a variável
string = "MANIPULAÇÃO De strings"
# Usando a função swapcase()
string.swapcase()
| 0.18462 | 0.974215 |
```
import numpy as np
import pandas as pd
import os
import glob
from analysis_helper_exp3 import *
from IPython.display import clear_output
%load_ext autoreload
%autoreload 2
iter_max=50
task_col=None
cluster_col='BT_0.4 ID'
run_threshold=0
hs_params = 3
hs_job_count = hs_params*107*1
root_dir = '../../../aldd_results/aldd_exp_3_uncert_bonus/params_results\\'
hs_dir = glob.glob(root_dir+'sampled_hyparams/*/*/*/*/')
df_from_file = True
if not df_from_file:
all_96_hs, all_384_hs, all_1536_hs, all_df_hs, successful_jobs, failed_jobs = get_results(hs_dir, iter_max, task_col, cluster_col, run_threshold, True)
print('----------------------------------------------------------------------------')
print('HS Jobs:')
print('Total jobs: {}'.format(hs_job_count))
print('Failed jobs: {}'.format(len(failed_jobs)))
print('Successful jobs: {}'.format(len(successful_jobs)))
hs_unique = np.unique(["_".join(x.split('_')[0:2]) for x in successful_jobs])
print('Total HS: {}'.format(hs_params))
print('Successful HS: {}'.format(len(hs_unique)))
else:
all_96_hs = pd.read_csv('./exp3/exp3_bonus.csv.gz')
recompute_task_info=False
if recompute_task_info:
task_names = [r.split('\\')[-2][:-6] for r in glob.glob('../datasets/pcba/*_cv_96/')]
task_hit_dict = {}
for task_col in task_names:
task_df = pd.concat([pd.read_csv(x) for x in glob.glob('../datasets/pcba/{}_cv_96/unlabeled_*.csv'.format(task_col))])
cpd_count = task_df.shape[0]
hit_limit = task_df[task_col].sum()
unique_hit_limit = task_df[task_df[task_col] == 1][cluster_col].unique().shape[0]
task_hit_dict[task_col] = (hit_limit, unique_hit_limit, cpd_count)
else:
import pickle
with open('task_info_dict.pickle', 'rb') as handle:
task_hit_dict = pickle.load(handle)
des_cols = ['hs_id', 'rf_id', 'max_iter', 'exploitation_hits', 'exploration_hits', 'total_hits',
'total_unique_hits', 'total_batch_size', 'hs_group', 'task_col']
cdf = all_96_hs[all_96_hs['rf_id'] == '0']
hit_limit_list = []
uhit_limit_list = []
cpd_count_list = []
for tcol in cdf['task_col'].tolist():
a, b, c = task_hit_dict[tcol]
hit_limit_list.append(a)
uhit_limit_list.append(b)
cpd_count_list.append(c)
cdf['hit_limit'] = hit_limit_list
cdf['unique_hit_limit'] = uhit_limit_list
cdf['cpd_count'] = cpd_count_list
task_info = cdf[['task_col', 'hit_limit', 'unique_hit_limit', 'cpd_count']].drop_duplicates()
task_info['active_ratio'] = np.around(100.0 * task_info['hit_limit'] / task_info['cpd_count'], decimals=2)
task_info['hit_limit'] = task_info['hit_limit'].astype(int)
full_task_info = task_info.copy()
excluded_tasks = ['pcba-aid588342','pcba-aid1030', 'pcba-aid504332',
'pcba-aid686979', 'pcba-aid686978']
full_cdf = cdf.copy()
cdf = full_cdf[~full_cdf['task_col'].isin(excluded_tasks)]
task_info = full_task_info[~full_task_info['task_col'].isin(excluded_tasks)]
sorted_task_info = task_info.sort_values('active_ratio')
```
---
# Summary per 10, 20, 30, 40, 50 iterations
```
def helper_agg(col):
if col.name in ['rf_id', 'task_col']:
return '-'
elif col.name in ['hs_id', 'hs_group']:
return col.unique()[0]
else:
if '_std' in col.name:
return col.std()
else:
return col.mean()
def get_last_iter_summary(results_df, iter_max, group_cols = ['hs_id', 'rf_id']):
sdf1 = results_df[results_df['iter_num']==iter_max][des_cols]
sdf1 = sdf1.groupby(group_cols).agg(helper_agg).sort_values('total_hits', ascending=False)
sorted_hid_list = sdf1.index.tolist()
sdf2 = results_df[results_df['iter_num']==iter_max][des_cols]
sdf2 = sdf2[[c for c in sdf2.columns if ('_hits' in c or 'hs_id' in c or 'rf_id' in c)]]
sdf2.columns = [c.replace('hits', 'std') for c in sdf2.columns]
sdf2 = sdf2.groupby(group_cols).agg(helper_agg).loc[sorted_hid_list]
sdf = pd.concat([sdf1, sdf2], axis=1)
return sdf
cdf_without_inactives = cdf[cdf['rf_id'] != 'allinactive0']
max_iter_list = [9010, 9020, 9030, 9040, 9050]
for max_iter in max_iter_list:
miter_summary = get_last_iter_summary(cdf_without_inactives, max_iter, ['hs_id'])
miter_summary = miter_summary.drop(['rf_id', 'max_iter', 'total_batch_size', 'task_col', 'hs_group', 'exploitation_std', 'exploration_std'], axis=1)
miter_summary.index.name = 'max_iter: {}'.format(max_iter)
display(miter_summary)
```
---
# Per task active ratio
```
cdf_without_inactives = cdf[cdf['rf_id'] != 'allinactive0']
n_bins = 8
tasks_per_bin = int(np.ceil(107/n_bins))
binned_tasks = []
iter_max = 9050
for i in range(n_bins):
temp_df = sorted_task_info.iloc[tasks_per_bin*i:tasks_per_bin*(i+1),:]
qualifying_tasks, bin_min, bin_max = temp_df['task_col'].tolist(), temp_df['active_ratio'].min(), temp_df['active_ratio'].max()
ldf = cdf_without_inactives[cdf_without_inactives['task_col'].isin(qualifying_tasks)]
bin_df = get_last_iter_summary(ldf, iter_max, ['hs_id'])
bin_df.index.name = '{}%-to-{}% total: {} tasks'.format(bin_min, bin_max, len(qualifying_tasks))
bin_df = bin_df.drop(['rf_id', 'max_iter', 'total_batch_size', 'task_col', 'hs_group', 'exploitation_std', 'exploration_std'], axis=1)
binned_tasks.append(bin_df)
display(binned_tasks[i])
```
---
# Compound comparison
```
import numpy as np
import pandas as pd
import os
import glob
from analysis_helper_exp3 import *
from IPython.display import clear_output
%load_ext autoreload
%autoreload 2
iter_max=50
task_col=None
cluster_col='BT_0.4 ID'
run_threshold=0
hs_params, benchmark_params, custom_params = 3, 4, 1
hs_job_count = hs_params*107*11
benchmark_job_count = benchmark_params*107*11
custom_job_count = custom_params*107*11
hs_ids = ['ClusterBasedWCSelector_609', 'MABSelector_exploitive']
root_dir = '../../../aldd_results/aldd_exp_3_uncert_bonus//params_results\\'
a_dir = glob.glob(root_dir+'sampled_hyparams/ClusterBasedWCSelector_609/*/*/*/')
b_dir = glob.glob(root_dir+'benchmarks/ClusterBasedWCSelector_609_uncert_normalized/*/*/*/')
c_dir = glob.glob(root_dir+'benchmarks/ClusterBasedWCSelector_609_qbc/*/*/*/')
recompute_task_info=False
if recompute_task_info:
task_names = [r.split('\\')[-2][:-6] for r in glob.glob('../datasets/pcba/*_cv_96/')]
task_hit_dict = {}
for task_col in task_names:
task_df = pd.concat([pd.read_csv(x) for x in glob.glob('../datasets/pcba/{}_cv_96/unlabeled_*.csv'.format(task_col))])
cpd_count = task_df.shape[0]
hit_limit = task_df[task_col].sum()
unique_hit_limit = task_df[task_df[task_col] == 1][cluster_col].unique().shape[0]
task_hit_dict[task_col] = (hit_limit, unique_hit_limit, cpd_count)
else:
import pickle
with open('task_info_dict.pickle', 'rb') as handle:
task_hit_dict = pickle.load(handle)
task_list = np.unique([af.split('\\')[-4] for af in a_dir])
task_info_list = []
for tcol in task_list:
a, b, c = task_hit_dict[tcol]
task_info_list.append([tcol, a, b, c])
task_info = pd.DataFrame(data=task_info_list, columns=['task_col', 'hit_limit', 'unique_hit_limit', 'cpd_count'])
task_info['active_ratio'] = np.around(100.0 * task_info['hit_limit'] / task_info['cpd_count'], decimals=2)
task_info['hit_limit'] = task_info['hit_limit'].astype(int)
cluster_col = 'BT_0.4 ID'
rf_ids = ['{}'.format(i) for i in range(1)]
data = []
for task_col in task_list:
for rf_id in rf_ids:
task_data = task_info[task_info['task_col'] == task_col].iloc[0].tolist()[1:]
af = root_dir+'sampled_hyparams/ClusterBasedWCSelector_609/{}/{}/batch_size_96/'.format(task_col, rf_id)
bf = root_dir+'sampled_hyparams/ClusterBasedWCSelector_609_uncert_normalized/{}/{}/batch_size_96/'.format(task_col, rf_id)
cf = root_dir+'sampled_hyparams/ClusterBasedWCSelector_609_qbc/{}/{}/batch_size_96/'.format(task_col, rf_id)
adf = pd.concat([pd.read_csv(af+'/training_data/iter_{}.csv'.format(i)) for i in range(1, iter_max+1)])
bdf = pd.concat([pd.read_csv(bf+'/training_data/iter_{}.csv'.format(i)) for i in range(1, iter_max+1)])
cdf = pd.concat([pd.read_csv(cf+'/training_data/iter_{}.csv'.format(i)) for i in range(1, iter_max+1)])
a_actives = adf[adf[task_col] == 1]
b_actives = bdf[bdf[task_col] == 1]
c_actives = cdf[cdf[task_col] == 1]
a_actives_idx, b_actives_idx, c_actives_idx = a_actives['Index ID'].values, b_actives['Index ID'].values, c_actives['Index ID'].values
a_uactives, b_uactives, c_uactives = a_actives[cluster_col].unique(), b_actives[cluster_col].unique(), c_actives[cluster_col].unique()
a_hits, b_hits, c_hits = a_actives.shape[0], b_actives.shape[0], c_actives.shape[0]
a_uhits, b_uhits, c_uhits = a_uactives.shape[0], b_uactives.shape[0], c_uactives.shape[0]
xy_data = []
intersect_actives = np.intersect1d(a_actives_idx, b_actives_idx)
union_actives = np.union1d(a_actives_idx, b_actives_idx)
symmetric_diff_actives = np.setdiff1d(union_actives, intersect_actives)
intersect_uactives = np.intersect1d(a_uactives, b_uactives)
union_uactives = np.union1d(a_uactives, b_uactives)
symmetric_diff_uactives = np.setdiff1d(union_uactives, intersect_uactives)
xy_data.extend([intersect_actives.shape[0], union_actives.shape[0], symmetric_diff_actives.shape[0],
intersect_uactives.shape[0], union_uactives.shape[0], symmetric_diff_uactives.shape[0]])
intersect_actives = np.intersect1d(a_actives_idx, c_actives_idx)
union_actives = np.union1d(a_actives_idx, c_actives_idx)
symmetric_diff_actives = np.setdiff1d(union_actives, intersect_actives)
intersect_uactives = np.intersect1d(a_uactives, c_uactives)
union_uactives = np.union1d(a_uactives, c_uactives)
symmetric_diff_uactives = np.setdiff1d(union_uactives, intersect_uactives)
xy_data.extend([intersect_actives.shape[0], union_actives.shape[0], symmetric_diff_actives.shape[0],
intersect_uactives.shape[0], union_uactives.shape[0], symmetric_diff_uactives.shape[0]])
data.append([task_col, rf_id, a_hits, b_hits, c_hits, a_uhits, b_uhits, c_uhits] + xy_data + task_data)
data_df = pd.DataFrame(data=data,
columns=['task_col', 'rf_id',
'609_hits', '609_UC_hits', '609_qbc_hits',
'609_uhits', '609_UC_uhits', '609_qbc_uhits',
'intersect_1', 'union_1', 'sym_diff_1', 'intersect_u_1', 'union_u_1', 'sym_diff_u_1',
'intersect_2', 'union_2', 'sym_diff_2', 'intersect_u_2', 'union_u_2', 'sym_diff_u_2',
'hit_limit', 'unique_hit_limit', 'cpd_count', 'active_ratio'])
sorted_tasks = task_info.sort_values('active_ratio')['task_col'].tolist()
task_means = data_df.groupby('task_col').mean().loc[sorted_tasks]
task_max = data_df.groupby('task_col').max().loc[sorted_tasks]
task_min = data_df.groupby('task_col').min().loc[sorted_tasks]
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_context("paper")
sns.set(font_scale=1.5)
figsize=(32, 8)
plt.figure(figsize=figsize)
sns.lineplot(x=task_means.index, y=task_means['sym_diff_1'].values, sort=False)
sns.lineplot(x=task_means.index, y=task_means['sym_diff_2'].values, sort=False)
plt.xticks(rotation=90);
plt.legend(['sym_diff_1', 'sym_diff_2'])
task_means[['609_hits', '609_UC_hits', '609_qbc_hits',
'union_1', 'sym_diff_1',
'union_2', 'sym_diff_2', 'active_ratio', 'hit_limit']]
import glob
a = '../../../informer_set_results/cycle_training_data_shape_pkis_6_task_*.txt'
for x in glob.glob(a):
with open(x, 'r') as f:
content = f.readlines()
n = int(content[-1].split(', ')[-1][:-1])
if n != 16:
print(x)
x = task_means[['609_hits', '609_UC_hits', '609_qbc_hits',
'union_1', 'sym_diff_1',
'union_2', 'sym_diff_2', 'active_ratio', 'hit_limit']]
x[x.index.isin(['pcba-aid588456', 'pcba-aid602310', 'pcba-aid1458'])]
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import os
import glob
from analysis_helper_exp3 import *
from IPython.display import clear_output
%load_ext autoreload
%autoreload 2
iter_max=50
task_col=None
cluster_col='BT_0.4 ID'
run_threshold=0
hs_params = 3
hs_job_count = hs_params*107*1
root_dir = '../../../aldd_results/aldd_exp_3_uncert_bonus/params_results\\'
hs_dir = glob.glob(root_dir+'sampled_hyparams/*/*/*/*/')
df_from_file = True
if not df_from_file:
all_96_hs, all_384_hs, all_1536_hs, all_df_hs, successful_jobs, failed_jobs = get_results(hs_dir, iter_max, task_col, cluster_col, run_threshold, True)
print('----------------------------------------------------------------------------')
print('HS Jobs:')
print('Total jobs: {}'.format(hs_job_count))
print('Failed jobs: {}'.format(len(failed_jobs)))
print('Successful jobs: {}'.format(len(successful_jobs)))
hs_unique = np.unique(["_".join(x.split('_')[0:2]) for x in successful_jobs])
print('Total HS: {}'.format(hs_params))
print('Successful HS: {}'.format(len(hs_unique)))
else:
all_96_hs = pd.read_csv('./exp3/exp3_bonus.csv.gz')
recompute_task_info=False
if recompute_task_info:
task_names = [r.split('\\')[-2][:-6] for r in glob.glob('../datasets/pcba/*_cv_96/')]
task_hit_dict = {}
for task_col in task_names:
task_df = pd.concat([pd.read_csv(x) for x in glob.glob('../datasets/pcba/{}_cv_96/unlabeled_*.csv'.format(task_col))])
cpd_count = task_df.shape[0]
hit_limit = task_df[task_col].sum()
unique_hit_limit = task_df[task_df[task_col] == 1][cluster_col].unique().shape[0]
task_hit_dict[task_col] = (hit_limit, unique_hit_limit, cpd_count)
else:
import pickle
with open('task_info_dict.pickle', 'rb') as handle:
task_hit_dict = pickle.load(handle)
des_cols = ['hs_id', 'rf_id', 'max_iter', 'exploitation_hits', 'exploration_hits', 'total_hits',
'total_unique_hits', 'total_batch_size', 'hs_group', 'task_col']
cdf = all_96_hs[all_96_hs['rf_id'] == '0']
hit_limit_list = []
uhit_limit_list = []
cpd_count_list = []
for tcol in cdf['task_col'].tolist():
a, b, c = task_hit_dict[tcol]
hit_limit_list.append(a)
uhit_limit_list.append(b)
cpd_count_list.append(c)
cdf['hit_limit'] = hit_limit_list
cdf['unique_hit_limit'] = uhit_limit_list
cdf['cpd_count'] = cpd_count_list
task_info = cdf[['task_col', 'hit_limit', 'unique_hit_limit', 'cpd_count']].drop_duplicates()
task_info['active_ratio'] = np.around(100.0 * task_info['hit_limit'] / task_info['cpd_count'], decimals=2)
task_info['hit_limit'] = task_info['hit_limit'].astype(int)
full_task_info = task_info.copy()
excluded_tasks = ['pcba-aid588342','pcba-aid1030', 'pcba-aid504332',
'pcba-aid686979', 'pcba-aid686978']
full_cdf = cdf.copy()
cdf = full_cdf[~full_cdf['task_col'].isin(excluded_tasks)]
task_info = full_task_info[~full_task_info['task_col'].isin(excluded_tasks)]
sorted_task_info = task_info.sort_values('active_ratio')
def helper_agg(col):
if col.name in ['rf_id', 'task_col']:
return '-'
elif col.name in ['hs_id', 'hs_group']:
return col.unique()[0]
else:
if '_std' in col.name:
return col.std()
else:
return col.mean()
def get_last_iter_summary(results_df, iter_max, group_cols = ['hs_id', 'rf_id']):
sdf1 = results_df[results_df['iter_num']==iter_max][des_cols]
sdf1 = sdf1.groupby(group_cols).agg(helper_agg).sort_values('total_hits', ascending=False)
sorted_hid_list = sdf1.index.tolist()
sdf2 = results_df[results_df['iter_num']==iter_max][des_cols]
sdf2 = sdf2[[c for c in sdf2.columns if ('_hits' in c or 'hs_id' in c or 'rf_id' in c)]]
sdf2.columns = [c.replace('hits', 'std') for c in sdf2.columns]
sdf2 = sdf2.groupby(group_cols).agg(helper_agg).loc[sorted_hid_list]
sdf = pd.concat([sdf1, sdf2], axis=1)
return sdf
cdf_without_inactives = cdf[cdf['rf_id'] != 'allinactive0']
max_iter_list = [9010, 9020, 9030, 9040, 9050]
for max_iter in max_iter_list:
miter_summary = get_last_iter_summary(cdf_without_inactives, max_iter, ['hs_id'])
miter_summary = miter_summary.drop(['rf_id', 'max_iter', 'total_batch_size', 'task_col', 'hs_group', 'exploitation_std', 'exploration_std'], axis=1)
miter_summary.index.name = 'max_iter: {}'.format(max_iter)
display(miter_summary)
cdf_without_inactives = cdf[cdf['rf_id'] != 'allinactive0']
n_bins = 8
tasks_per_bin = int(np.ceil(107/n_bins))
binned_tasks = []
iter_max = 9050
for i in range(n_bins):
temp_df = sorted_task_info.iloc[tasks_per_bin*i:tasks_per_bin*(i+1),:]
qualifying_tasks, bin_min, bin_max = temp_df['task_col'].tolist(), temp_df['active_ratio'].min(), temp_df['active_ratio'].max()
ldf = cdf_without_inactives[cdf_without_inactives['task_col'].isin(qualifying_tasks)]
bin_df = get_last_iter_summary(ldf, iter_max, ['hs_id'])
bin_df.index.name = '{}%-to-{}% total: {} tasks'.format(bin_min, bin_max, len(qualifying_tasks))
bin_df = bin_df.drop(['rf_id', 'max_iter', 'total_batch_size', 'task_col', 'hs_group', 'exploitation_std', 'exploration_std'], axis=1)
binned_tasks.append(bin_df)
display(binned_tasks[i])
import numpy as np
import pandas as pd
import os
import glob
from analysis_helper_exp3 import *
from IPython.display import clear_output
%load_ext autoreload
%autoreload 2
iter_max=50
task_col=None
cluster_col='BT_0.4 ID'
run_threshold=0
hs_params, benchmark_params, custom_params = 3, 4, 1
hs_job_count = hs_params*107*11
benchmark_job_count = benchmark_params*107*11
custom_job_count = custom_params*107*11
hs_ids = ['ClusterBasedWCSelector_609', 'MABSelector_exploitive']
root_dir = '../../../aldd_results/aldd_exp_3_uncert_bonus//params_results\\'
a_dir = glob.glob(root_dir+'sampled_hyparams/ClusterBasedWCSelector_609/*/*/*/')
b_dir = glob.glob(root_dir+'benchmarks/ClusterBasedWCSelector_609_uncert_normalized/*/*/*/')
c_dir = glob.glob(root_dir+'benchmarks/ClusterBasedWCSelector_609_qbc/*/*/*/')
recompute_task_info=False
if recompute_task_info:
task_names = [r.split('\\')[-2][:-6] for r in glob.glob('../datasets/pcba/*_cv_96/')]
task_hit_dict = {}
for task_col in task_names:
task_df = pd.concat([pd.read_csv(x) for x in glob.glob('../datasets/pcba/{}_cv_96/unlabeled_*.csv'.format(task_col))])
cpd_count = task_df.shape[0]
hit_limit = task_df[task_col].sum()
unique_hit_limit = task_df[task_df[task_col] == 1][cluster_col].unique().shape[0]
task_hit_dict[task_col] = (hit_limit, unique_hit_limit, cpd_count)
else:
import pickle
with open('task_info_dict.pickle', 'rb') as handle:
task_hit_dict = pickle.load(handle)
task_list = np.unique([af.split('\\')[-4] for af in a_dir])
task_info_list = []
for tcol in task_list:
a, b, c = task_hit_dict[tcol]
task_info_list.append([tcol, a, b, c])
task_info = pd.DataFrame(data=task_info_list, columns=['task_col', 'hit_limit', 'unique_hit_limit', 'cpd_count'])
task_info['active_ratio'] = np.around(100.0 * task_info['hit_limit'] / task_info['cpd_count'], decimals=2)
task_info['hit_limit'] = task_info['hit_limit'].astype(int)
cluster_col = 'BT_0.4 ID'
rf_ids = ['{}'.format(i) for i in range(1)]
data = []
for task_col in task_list:
for rf_id in rf_ids:
task_data = task_info[task_info['task_col'] == task_col].iloc[0].tolist()[1:]
af = root_dir+'sampled_hyparams/ClusterBasedWCSelector_609/{}/{}/batch_size_96/'.format(task_col, rf_id)
bf = root_dir+'sampled_hyparams/ClusterBasedWCSelector_609_uncert_normalized/{}/{}/batch_size_96/'.format(task_col, rf_id)
cf = root_dir+'sampled_hyparams/ClusterBasedWCSelector_609_qbc/{}/{}/batch_size_96/'.format(task_col, rf_id)
adf = pd.concat([pd.read_csv(af+'/training_data/iter_{}.csv'.format(i)) for i in range(1, iter_max+1)])
bdf = pd.concat([pd.read_csv(bf+'/training_data/iter_{}.csv'.format(i)) for i in range(1, iter_max+1)])
cdf = pd.concat([pd.read_csv(cf+'/training_data/iter_{}.csv'.format(i)) for i in range(1, iter_max+1)])
a_actives = adf[adf[task_col] == 1]
b_actives = bdf[bdf[task_col] == 1]
c_actives = cdf[cdf[task_col] == 1]
a_actives_idx, b_actives_idx, c_actives_idx = a_actives['Index ID'].values, b_actives['Index ID'].values, c_actives['Index ID'].values
a_uactives, b_uactives, c_uactives = a_actives[cluster_col].unique(), b_actives[cluster_col].unique(), c_actives[cluster_col].unique()
a_hits, b_hits, c_hits = a_actives.shape[0], b_actives.shape[0], c_actives.shape[0]
a_uhits, b_uhits, c_uhits = a_uactives.shape[0], b_uactives.shape[0], c_uactives.shape[0]
xy_data = []
intersect_actives = np.intersect1d(a_actives_idx, b_actives_idx)
union_actives = np.union1d(a_actives_idx, b_actives_idx)
symmetric_diff_actives = np.setdiff1d(union_actives, intersect_actives)
intersect_uactives = np.intersect1d(a_uactives, b_uactives)
union_uactives = np.union1d(a_uactives, b_uactives)
symmetric_diff_uactives = np.setdiff1d(union_uactives, intersect_uactives)
xy_data.extend([intersect_actives.shape[0], union_actives.shape[0], symmetric_diff_actives.shape[0],
intersect_uactives.shape[0], union_uactives.shape[0], symmetric_diff_uactives.shape[0]])
intersect_actives = np.intersect1d(a_actives_idx, c_actives_idx)
union_actives = np.union1d(a_actives_idx, c_actives_idx)
symmetric_diff_actives = np.setdiff1d(union_actives, intersect_actives)
intersect_uactives = np.intersect1d(a_uactives, c_uactives)
union_uactives = np.union1d(a_uactives, c_uactives)
symmetric_diff_uactives = np.setdiff1d(union_uactives, intersect_uactives)
xy_data.extend([intersect_actives.shape[0], union_actives.shape[0], symmetric_diff_actives.shape[0],
intersect_uactives.shape[0], union_uactives.shape[0], symmetric_diff_uactives.shape[0]])
data.append([task_col, rf_id, a_hits, b_hits, c_hits, a_uhits, b_uhits, c_uhits] + xy_data + task_data)
data_df = pd.DataFrame(data=data,
columns=['task_col', 'rf_id',
'609_hits', '609_UC_hits', '609_qbc_hits',
'609_uhits', '609_UC_uhits', '609_qbc_uhits',
'intersect_1', 'union_1', 'sym_diff_1', 'intersect_u_1', 'union_u_1', 'sym_diff_u_1',
'intersect_2', 'union_2', 'sym_diff_2', 'intersect_u_2', 'union_u_2', 'sym_diff_u_2',
'hit_limit', 'unique_hit_limit', 'cpd_count', 'active_ratio'])
sorted_tasks = task_info.sort_values('active_ratio')['task_col'].tolist()
task_means = data_df.groupby('task_col').mean().loc[sorted_tasks]
task_max = data_df.groupby('task_col').max().loc[sorted_tasks]
task_min = data_df.groupby('task_col').min().loc[sorted_tasks]
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_context("paper")
sns.set(font_scale=1.5)
figsize=(32, 8)
plt.figure(figsize=figsize)
sns.lineplot(x=task_means.index, y=task_means['sym_diff_1'].values, sort=False)
sns.lineplot(x=task_means.index, y=task_means['sym_diff_2'].values, sort=False)
plt.xticks(rotation=90);
plt.legend(['sym_diff_1', 'sym_diff_2'])
task_means[['609_hits', '609_UC_hits', '609_qbc_hits',
'union_1', 'sym_diff_1',
'union_2', 'sym_diff_2', 'active_ratio', 'hit_limit']]
import glob
a = '../../../informer_set_results/cycle_training_data_shape_pkis_6_task_*.txt'
for x in glob.glob(a):
with open(x, 'r') as f:
content = f.readlines()
n = int(content[-1].split(', ')[-1][:-1])
if n != 16:
print(x)
x = task_means[['609_hits', '609_UC_hits', '609_qbc_hits',
'union_1', 'sym_diff_1',
'union_2', 'sym_diff_2', 'active_ratio', 'hit_limit']]
x[x.index.isin(['pcba-aid588456', 'pcba-aid602310', 'pcba-aid1458'])]
| 0.163079 | 0.214321 |
# Ionoshpere Dataset - Lipschitz Continuity - LIME - SHAP
```
print("Bismillahir Rahmanir Rahim")
```
## Imports and Paths
```
from IPython.display import display, HTML
from lime.lime_tabular import LimeTabularExplainer
from pprint import pprint
from scipy.spatial.distance import pdist, squareform
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, confusion_matrix
from sklearn.utils.multiclass import unique_labels
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.metrics.pairwise import cosine_similarity
from scipy import spatial
%matplotlib inline
import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pathlib
import sklearn
import seaborn as sns
import statsmodels
import eli5
import lime
import shap
shap.initjs()
```
## Load and preprocess data
Train/test split = 0.80/0.20
```
# Set the seed experimentations and interpretations.
np.random.seed(111)
project_path = pathlib.Path.cwd().parent.parent
import pathlib
dataset_path = str(project_path) + '/datasets/ionosphere/ionosphere.data'
# print(dataset_path)
fp = open(dataset_path, "r")
rows = []
for line in fp:
rows.append(line)
rows_sep = [sub.split(",") for sub in rows]
iono = pd.DataFrame(np.array(rows_sep))
iono_col_names = np.array(iono.columns.tolist()).astype(str)
iono_col_names = np.where(iono_col_names=='34', 'label', iono_col_names)
iono.columns = iono_col_names
iono['label'] = iono['label'].apply(lambda label: label.split('\n')[0])
labels_iono = iono['label']
labels_iono_list = labels_iono.values.tolist()
# labels_iono_codes = labels_train_iono.astype("category").cat.codes
features_iono = iono.iloc[:,:-1]
display(iono.head())
train_iono, test_iono, labels_train_iono, labels_test_iono = train_test_split(
features_iono, labels_iono, train_size=0.80)
labels_train_iono_codes = labels_train_iono.astype("category").cat.codes
labels_test_iono_codes = labels_test_iono.astype("category").cat.codes
""" This form is only compatiable with rest of the notebook code.
"""
train = train_iono.to_numpy().astype(float)
labels_train = labels_train_iono.to_numpy()
test = test_iono.to_numpy().astype(float)
labels_test = labels_test_iono.to_numpy()
x_testset = test
feature_names = features_iono.columns.values
target_names = np.unique(labels_test)
# here 0 = 'b' & 1 = 'g'
unique_targets = np.unique([0, 1]) # LIME only takes integer,
print("Feature names", feature_names)
print("Target names", target_names)
print("Number of uniques label or target names", unique_targets)
print("Training record", train[0:1])
print("Label for training record", labels_train[0:1])
```
## Train and evaluate models.
Train Logistic Regression and Random Forest models so these can be used as black box models when evaluating explanations methods.
### Fit Logistic Regression and Random Forest
```
lr = sklearn.linear_model.LogisticRegression(class_weight='balanced')
lr.fit(train, labels_train)
rf = RandomForestClassifier(n_estimators=500, class_weight='balanced_subsample')
rf.fit(train, labels_train)
```
### Predict using logistic regression and random forest models
```
labels_pred_lr = lr.predict(test)
labels_pred_rf = rf.predict(test)
score_lr = metrics.accuracy_score(labels_test, labels_pred_lr)
score_rf = metrics.accuracy_score(labels_test, labels_pred_rf)
print("Logitic Regression accuracy score.", score_lr)
predict_proba_lr = lr.predict_proba(test[:5])
print("\nLogistic Regression predict probabilities\n\n", predict_proba_lr)
predict_lr = lr.predict(test[:5])
print("\nLogistic Regression predictions", predict_lr)
print("\n\n\nRandom Forest accuracy score.", score_rf)
predict_proba_rf = rf.predict_proba(test[:5])
print("\nRandom Forest predict probabilities\n\n", predict_proba_rf)
predict_rf = rf.predict(test[:5])
print("\nRandom Forest predictions", predict_rf)
```
### Classification reports of logistic regression and random forest
```
report_lr = classification_report(labels_test, labels_pred_lr, target_names=target_names)
print("Logistic Regression classification report.")
print(report_lr)
report_rf = classification_report(labels_test, labels_pred_rf, target_names=target_names)
print("Random Forestclassification report.")
print(report_rf)
```
### Classification reports display as dataframes
```
total_targets = len(target_names)
report_lr = classification_report(labels_test, labels_pred_lr, target_names=target_names, output_dict=True)
report_lr = pd.DataFrame(report_lr).transpose().round(2)
report_lr = report_lr.iloc[:total_targets,:-1]
display(report_lr)
report_rf = classification_report(labels_test, labels_pred_rf, target_names=target_names, output_dict=True)
report_rf = pd.DataFrame(report_rf).transpose().round(2)
report_rf = report_rf.iloc[:total_targets,:-1]
display(report_rf)
avg_f1_lr = report_lr['f1-score'].mean()
print("Logistic Regression average f1-score", avg_f1_lr)
avg_f1_rf = report_rf['f1-score'].mean()
print("Random Forest average f1-score", avg_f1_rf)
```
### Confusion matrix of logistic regression and random forest
```
matrix_lr = confusion_matrix(labels_test, labels_pred_lr)
matrix_lr = pd.DataFrame(matrix_lr, columns=target_names).transpose()
matrix_lr.columns = target_names
display(matrix_lr)
matrix_rf = confusion_matrix(labels_test, labels_pred_rf)
matrix_rf = pd.DataFrame(matrix_rf, columns=target_names).transpose()
matrix_rf.columns = target_names
display(matrix_rf)
```
### Combine confusion matrix and classification report of logistic regression and random forest
```
matrix_report_lr = pd.concat([matrix_lr, report_lr], axis=1)
display(matrix_report_lr)
matrix_report_rf = pd.concat([matrix_rf, report_rf], axis=1)
display(matrix_report_rf)
```
### Saving matrices and reports into csv
These CSVs can be used easily to draw tables in LaTex.
```
file_path = str(project_path) + '/datasets/modelling-results/'
filename = 'iono_matrix_report_lr.csv'
matrix_report_lr.to_csv(file_path + filename, index=True)
filename = 'iono_matrix_report_rf.csv'
matrix_report_rf.to_csv(file_path + filename, index=True)
```
### Extract predicted target names for logistic regression and random forest
```
target_names = target_names
targets = unique_targets
targets_labels = dict(zip(targets, target_names))
print(targets_labels)
```
### Ionoshpere dataset specific changes to extract codes
Extracting code such as [0, 1] against ['b', 'g'] values
```
dummies = pd.get_dummies(labels_pred_lr)
labels_pred_codes_lr = dummies.values.argmax(1)
dummies = pd.get_dummies(labels_pred_rf)
labels_pred_codes_rf = dummies.values.argmax(1)
labels_names_pred_lr = []
for label in labels_pred_codes_lr:
labels_names_pred_lr.append(targets_labels[label])
labels_names_pred_rf = []
for label in labels_pred_codes_rf:
labels_names_pred_rf.append(targets_labels[label])
print("Logistic Regression predicted targets and their names.\n")
print(labels_pred_codes_lr)
print(labels_names_pred_lr)
print("\n\nRandom Forest predicted targets and their names.")
print(labels_pred_codes_rf)
print(labels_names_pred_rf)
```
## Interpret Black Box Models
## 1. Interpret Logitistic Regression and Random Forest using LIME
### LIME explanations util functions
```
def lime_explanations(index, x_testset, explainer, model, unique_targets, class_predictions):
instance = x_testset[index]
exp = explainer.explain_instance(instance,
model.predict_proba,
labels=unique_targets,
top_labels=None,
num_features=len(x_testset[index]),
num_samples=6000)
# Array class_predictions contains predicted class labels
exp_vector_predicted_class = exp.as_map()[class_predictions[index]]
return (exp_vector_predicted_class, exp.score), exp
def explanation_to_dataframe(index, x_testset, explainer, model, unique_targets, class_predictions, dataframe):
feature_imp_tuple, exp = lime_explanations(index,
x_testset,
explainer,
model,
unique_targets,
class_predictions)
exp_val = tuple(sorted(feature_imp_tuple[0]))
data = dict((x, y) for x, y in exp_val)
list_val = list(data.values())
list_val.append(feature_imp_tuple[1])
dataframe.loc[index] = list_val
return dataframe, exp
""" Define LIME Explainer
"""
explainer_lime = LimeTabularExplainer(train,
mode = 'classification',
training_labels = labels_train,
feature_names=feature_names,
verbose=False,
class_names=target_names,
feature_selection='auto',
discretize_continuous=True)
from tqdm import tqdm
col_names = list(feature_names)
col_names.append('lime_score')
```
### Interpret logistic regression on testset using LIME
```
explanations_lime_lr = pd.DataFrame(columns=col_names)
for index in tqdm(range(0,len(test))):
explanations_lime_lr, exp = explanation_to_dataframe(index,
test,
explainer_lime,
rf, # random forest model
unique_targets,
labels_pred_codes_lr, # random forest predictions
explanations_lime_lr)
print("LIME explanations on logistic regression.")
display(explanations_lime_lr.head())
display(explanations_lime_lr.iloc[:,:-1].head(1))
```
### Interpret random forest on testset using LIME
```
explanations_lime_rf = pd.DataFrame(columns=col_names)
for index in tqdm(range(0,len(test))):
explanations_lime_rf, exp = explanation_to_dataframe(index,
test,
explainer_lime,
rf, # random forest model
unique_targets,
labels_pred_codes_rf, # random forest predictions
explanations_lime_rf)
print("LIME explanations on random forest.")
display(explanations_lime_rf.head())
display(explanations_lime_rf.iloc[:,:-1].head(1))
```
## 2. Interpret Logitistic Regression and Random Forest using SHAP
```
def shapvalue_to_dataframe(test, labels_pred, shap_values, feature_names):
exp_shap_array = []
for test_index in range(0, len(test)):
label_pred = labels_pred[test_index]
exp_shap_array.append(shap_values[label_pred][test_index])
df_exp_shap = pd.DataFrame(exp_shap_array)
df_exp_shap.columns = feature_names
return df_exp_shap
```
### Interpret logistic regression using SHAP
```
shap_train_summary = shap.kmeans(train, 50)
explainer_shap_lr = shap.KernelExplainer(lr.predict_proba, shap_train_summary)
# print("Shap Train Sample Summary", shap_train_summary)
shap_values_lr = explainer_shap_lr.shap_values(test, nsamples='auto')
shap_expected_values_lr = explainer_shap_lr.expected_value
print("Shapley Expected Values", shap_expected_values_lr)
shap.summary_plot(shap_values_lr, test, feature_names=feature_names)
```
### Interpret random forest using SHAP
```
shap_values_rf = shap.TreeExplainer(rf).shap_values(test)
shap.summary_plot(shap_values_rf, test, feature_names=feature_names)
```
### Extract explanations from SHAP values computed on logistic regressions and random forest models.
#### Preprocessing SHAP values
**_shap_values_** returns 3D array in a form of (num_classes, num_test_instance, num_features) e.g. in our iris dataset it will be (3, 30, 4)
```
explanations_shap_lr = shapvalue_to_dataframe(test,
labels_pred_codes_lr,
shap_values_lr,
feature_names)
display(explanations_shap_lr.head())
display(explanations_shap_lr.iloc[:,:].head(1))
explanations_shap_rf = shapvalue_to_dataframe(test,
labels_pred_codes_rf,
shap_values_rf,
feature_names)
display(explanations_shap_rf.head())
display(explanations_shap_rf.iloc[:,:].head(1))
```
# Local Lipschitz Estimates as a stability measure for LIME & SHAP
## Find Local Lipschitz of points L(x)
### Define neighborhood around anchor point x0
```
def norm(Xs, x0, norm=2):
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html
norm = np.linalg.norm(x0 - Xs, norm) # /np.linalg.norm(b[0] - b, 2)
return norm
def neighborhood_with_euclidean(x_points, anchor_index, radius):
x_i = x_points[anchor_index]
radius = radius * np.sqrt(len(x_points[anchor_index]))
x_js = x_points.tolist()
del x_js[anchor_index]
dist = (x_i - x_js)**2
dist = np.sum(dist, axis=1)
dist = np.sqrt(dist)
neighborhood_indices = []
for index in range(0, len(dist)):
if dist[index] < radius:
neighborhood_indices.append(index)
return neighborhood_indices
def neighborhood_with_KDTree(x_points, anchor_index, radius):
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query_ball_point.html
tree = spatial.KDTree(x_points)
neighborhood_indices = tree.query_ball_point(x_points[anchor_index],
radius * np.sqrt(len(x_points[anchor_index])))
return neighborhood_indices
```
### Local Lipschitz of explanation methods (LIME, SHAP)
```
def lipschitz_formula(nearby_points, nearby_points_exp, anchorX, anchorX_exp):
anchorX_norm2 = np.apply_along_axis(norm, 1, nearby_points, anchorX)
anchorX_exp_norm2 = np.apply_along_axis(norm, 1, nearby_points_exp, anchorX_exp)
anchorX_avg_norm2 = anchorX_exp_norm2/anchorX_norm2
anchorX_LC_argmax = np.argmax(anchorX_avg_norm2)
return anchorX_avg_norm2, anchorX_LC_argmax
def lipschitz_estimate(anchorX, x_points, explanations_x_points, anchor_index, neighborhood_indices):
# extract anchor point explanations
anchorX_exp = explanations_x_points[anchor_index]
# extract anchor point neighborhood's explanations
nearby_points = x_points[neighborhood_indices]
nearby_points_exp = explanations_x_points[neighborhood_indices]
# find local lipschitz estimate (lc)
anchorX_avg_norm2, anchorX_LC_argmax = lipschitz_formula(nearby_points,
nearby_points_exp,
anchorX,
anchorX_exp)
return anchorX_avg_norm2, anchorX_LC_argmax
def find_lipschitz_estimates(x_points, x_points_lime_exp, x_points_shap_exp, radii):
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.apply_along_axis.html
# https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.argmax.html
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query_ball_point.html
instances = []
anchor_x_index = []
lc_coefficient_lime = []
x_deviation_index_lime = []
x_deviation_index_shap = []
lc_coefficient_shap = []
radiuses = []
neighborhood_size = []
for radius in radii:
for anchor_index in range(0, len(x_points)):
# define neighorbood of around anchor point using radius
# neighborhood_indices = neighborhood_with_KDTree(x_points, anchor_index, radius)
# neighborhood_indices.remove(anchor_index) # remove anchor index to remove anchor point
neighborhood_indices = neighborhood_with_euclidean(x_points, anchor_index, radius)
print(neighborhood_indices)
radiuses.append(radius)
if len(neighborhood_indices) == 0:
continue
neighborhood_size.append(len(neighborhood_indices))
# extract anchor point and its original index
anchorX = x_points[anchor_index]
instances.append(anchorX)
anchor_x_index.append(anchor_index)
# find local lipschitz estimate (lc) LIME
anchorX_avg_norm2, anchorX_LC_argmax = lipschitz_estimate(anchorX,
x_points,
x_points_lime_exp,
anchor_index,
neighborhood_indices)
lc_coefficient_lime.append(anchorX_avg_norm2[anchorX_LC_argmax])
# find deviation point from anchor point LIME explanations
deviation_point_index = neighborhood_indices[anchorX_LC_argmax]
x_deviation_index_lime.append(deviation_point_index)
# find local lipschitz estimate (lc) SHAP
anchorX_avg_norm2, anchorX_LC_argmax = lipschitz_estimate(anchorX,
x_points,
x_points_shap_exp,
anchor_index,
neighborhood_indices)
lc_coefficient_shap.append(anchorX_avg_norm2[anchorX_LC_argmax])
# find deviation point from anchor point LIME explanations
deviation_point_index = neighborhood_indices[anchorX_LC_argmax]
x_deviation_index_shap.append(deviation_point_index)
# columns_lipschitz will be reused so to avoid confusion naming convention should remain similar
columns_lipschitz = ['instance', 'anchor_x_index', 'lc_coefficient_lime', 'x_deviation_index_lime',
'lc_coefficient_shap', 'x_deviation_index_shap', 'radiuses', 'neighborhood_size']
zippedList = list(zip(instances, anchor_x_index, lc_coefficient_lime, x_deviation_index_lime,
lc_coefficient_shap, x_deviation_index_shap, radiuses, neighborhood_size))
return zippedList, columns_lipschitz
```
## Prepare points from testset
```
X = pd.DataFrame(test)
x_points = X.copy().values
print("Testset")
# display(X.head())
# radii = [1.00, 1.25]
radii = [0.75]
```
## 1. Lipschitz est. using explanations generated on logistic regression model
```
print("LIME generated explanations")
X_lime_exp = explanations_lime_lr.iloc[:,:-1].copy()
# display(X_lime_exp.head())
print("SHAP generated explanations")
X_shap_exp = explanations_shap_lr.iloc[:,:].copy()
# display(X_shap_exp.head())
x_points_lime_exp = X_lime_exp.copy().values
x_points_shap_exp = X_shap_exp.copy().values
zippedList, columns_lipschitz = find_lipschitz_estimates(x_points,
x_points_lime_exp,
x_points_shap_exp,
radii)
lr_lipschitz = pd.DataFrame(zippedList, columns=columns_lipschitz)
```
## 2. Lipschitz est. using explanations generated on random forest model
```
print("LIME generated explanations")
X_lime_exp = explanations_lime_rf.iloc[:,:-1].copy()
# display(X_lime_exp.head())
print("SHAP generated explanations")
X_shap_exp = explanations_shap_rf.iloc[:,:].copy()
# display(X_shap_exp.head())
x_points_lime_exp = X_lime_exp.copy().values
x_points_shap_exp = X_shap_exp.copy().values
zippedList, columns_lipschitz = find_lipschitz_estimates(x_points,
x_points_lime_exp,
x_points_shap_exp,
radii)
rf_lipschitz = pd.DataFrame(zippedList, columns=columns_lipschitz)
```
## 1. Lipschitz est. visualizations computed on logistic regression model
```
epsilon1 = lr_lipschitz[lr_lipschitz['radiuses'] == 1.00]
epsilon125 = lr_lipschitz[lr_lipschitz['radiuses'] == 1.25]
# display(epsilon1.head())
# display(epsilon125.head())
print("Lipschitz estimates on logistic regression model.")
epsilon1_lc_lime_aggre = np.mean(epsilon1['lc_coefficient_lime'])
epsilon1_lc_shap_aggre = np.mean(epsilon1['lc_coefficient_shap'])
print("\nLIME, epsilon 1.00, Aggregated L(x) = ", epsilon1_lc_lime_aggre)
print("SHAP, epsilon 1.00, Aggregated L(x) = ", epsilon1_lc_shap_aggre)
epsilon125_lc_lime_aggre = np.mean(epsilon125['lc_coefficient_lime'])
epsilon125_lc_shap_aggre = np.mean(epsilon125['lc_coefficient_shap'])
print("\nLIME, epsilon 1.25, Aggregated L(x) = ", epsilon125_lc_lime_aggre)
print("SHAP, epsilon 1.25, Aggregated L(x) = ", epsilon125_lc_shap_aggre)
```
## 2. Lipschitz est. visualizations computed on random forest model
```
epsilon1 = rf_lipschitz[rf_lipschitz['radiuses'] == 1.00]
epsilon125 = rf_lipschitz[rf_lipschitz['radiuses'] == 1.25]
# display(epsilon075.head())
# display(epsilon1.head())
# display(epsilon125.head())
print("Lipschitz estimates on random forest model.")
epsilon1_lc_lime_aggre = np.mean(epsilon1['lc_coefficient_lime'])
epsilon1_lc_shap_aggre = np.mean(epsilon1['lc_coefficient_shap'])
print("\nLIME, epsilon 1.00, Aggregated L(x) = ", epsilon1_lc_lime_aggre)
print("SHAP, epsilon 1.00, Aggregated L(x) = ", epsilon1_lc_shap_aggre)
epsilon125_lc_lime_aggre = np.mean(epsilon125['lc_coefficient_lime'])
epsilon125_lc_shap_aggre = np.mean(epsilon125['lc_coefficient_shap'])
print("\nLIME, epsilon 1.25, Aggregated L(x) = ", epsilon125_lc_lime_aggre)
print("SHAP, epsilon 1.25, Aggregated L(x) = ", epsilon125_lc_shap_aggre)
```
# Visualizations
```
df1 = epsilon075.loc[:, ['lc_coefficient_lime']]
df1.rename(columns={'lc_coefficient_lime': 'Lipschitz Estimates'}, inplace=True)
df1['method'] = 'LIME'
df1['Dataset'] = 'Ionoshpere'
df2 = epsilon075.loc[:, ['lc_coefficient_shap']]
df2.rename(columns={'lc_coefficient_shap': 'Lipschitz Estimates'}, inplace=True)
df2['method'] = 'SHAP'
df2['Dataset'] = 'Ionoshpere'
df = df1.append(df2)
ax = sns.boxplot(x='method', y="Lipschitz Estimates", data=df)
ax = sns.boxplot(x="Dataset", y="Lipschitz Estimates",
hue="method",
data=df)
sns.despine(offset=10, trim=True)
```
### LIME visualizations by single points
```
explainer_lime = LimeTabularExplainer(train,
mode = 'classification',
training_labels = labels_train,
feature_names=feature_names,
verbose=False,
class_names=target_names,
feature_selection='auto',
discretize_continuous=True)
x_instance = test[anchor_index]
LR_exp_lime = explainer_lime.explain_instance(x_instance,
LR_iris.predict_proba,
labels=np.unique(iris.target),
top_labels=None,
num_features=len(x_instance),
num_samples=6000)
LR_exp_lime.show_in_notebook()
x_instance = test[similar_point_index]
LR_exp_lime = explainer_lime.explain_instance(x_instance,
LR_iris.predict_proba,
labels=np.unique(iris.target),
top_labels=None,
num_features=len(x_instance),
num_samples=6000)
LR_exp_lime.show_in_notebook()
i = np.random.randint(0, test.shape[0])
i = 0
LR_exp_lime_map = LR_exp_lime.as_map()
# pprint(LR_exp_lime_map)
print('Predicted class for i:', labels_pred_lr[i])
LR_exp_lime_list = LR_exp_lime.as_list(label=labels_pred_lr[i])
# pprint(LR_exp_lime_list)
```
## Conclusions
```
lr_lime_iris = [2.657, 3.393, 1.495]
rf_lime_iris = [3.010, 3.783, 1.767]
lr_shap_iris = [2.716, 3.512, 1.463]
rf_shap_iris = [1.969, 3.546, 2.136]
find_min_vector = np.array([lr_lime_iris, rf_lime_iris, lr_shap_iris, rf_shap_iris])
np.amin(find_min_vector, axis=0)
from sklearn.linear_model import Ridge
import numpy as np
n_samples, n_features = 10, 5
rng = np.random.RandomState(0)
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
clf = Ridge(alpha=1.0)
clf.fit(X, y)
```
|
github_jupyter
|
print("Bismillahir Rahmanir Rahim")
from IPython.display import display, HTML
from lime.lime_tabular import LimeTabularExplainer
from pprint import pprint
from scipy.spatial.distance import pdist, squareform
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, confusion_matrix
from sklearn.utils.multiclass import unique_labels
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.metrics.pairwise import cosine_similarity
from scipy import spatial
%matplotlib inline
import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pathlib
import sklearn
import seaborn as sns
import statsmodels
import eli5
import lime
import shap
shap.initjs()
# Set the seed experimentations and interpretations.
np.random.seed(111)
project_path = pathlib.Path.cwd().parent.parent
import pathlib
dataset_path = str(project_path) + '/datasets/ionosphere/ionosphere.data'
# print(dataset_path)
fp = open(dataset_path, "r")
rows = []
for line in fp:
rows.append(line)
rows_sep = [sub.split(",") for sub in rows]
iono = pd.DataFrame(np.array(rows_sep))
iono_col_names = np.array(iono.columns.tolist()).astype(str)
iono_col_names = np.where(iono_col_names=='34', 'label', iono_col_names)
iono.columns = iono_col_names
iono['label'] = iono['label'].apply(lambda label: label.split('\n')[0])
labels_iono = iono['label']
labels_iono_list = labels_iono.values.tolist()
# labels_iono_codes = labels_train_iono.astype("category").cat.codes
features_iono = iono.iloc[:,:-1]
display(iono.head())
train_iono, test_iono, labels_train_iono, labels_test_iono = train_test_split(
features_iono, labels_iono, train_size=0.80)
labels_train_iono_codes = labels_train_iono.astype("category").cat.codes
labels_test_iono_codes = labels_test_iono.astype("category").cat.codes
""" This form is only compatiable with rest of the notebook code.
"""
train = train_iono.to_numpy().astype(float)
labels_train = labels_train_iono.to_numpy()
test = test_iono.to_numpy().astype(float)
labels_test = labels_test_iono.to_numpy()
x_testset = test
feature_names = features_iono.columns.values
target_names = np.unique(labels_test)
# here 0 = 'b' & 1 = 'g'
unique_targets = np.unique([0, 1]) # LIME only takes integer,
print("Feature names", feature_names)
print("Target names", target_names)
print("Number of uniques label or target names", unique_targets)
print("Training record", train[0:1])
print("Label for training record", labels_train[0:1])
lr = sklearn.linear_model.LogisticRegression(class_weight='balanced')
lr.fit(train, labels_train)
rf = RandomForestClassifier(n_estimators=500, class_weight='balanced_subsample')
rf.fit(train, labels_train)
labels_pred_lr = lr.predict(test)
labels_pred_rf = rf.predict(test)
score_lr = metrics.accuracy_score(labels_test, labels_pred_lr)
score_rf = metrics.accuracy_score(labels_test, labels_pred_rf)
print("Logitic Regression accuracy score.", score_lr)
predict_proba_lr = lr.predict_proba(test[:5])
print("\nLogistic Regression predict probabilities\n\n", predict_proba_lr)
predict_lr = lr.predict(test[:5])
print("\nLogistic Regression predictions", predict_lr)
print("\n\n\nRandom Forest accuracy score.", score_rf)
predict_proba_rf = rf.predict_proba(test[:5])
print("\nRandom Forest predict probabilities\n\n", predict_proba_rf)
predict_rf = rf.predict(test[:5])
print("\nRandom Forest predictions", predict_rf)
report_lr = classification_report(labels_test, labels_pred_lr, target_names=target_names)
print("Logistic Regression classification report.")
print(report_lr)
report_rf = classification_report(labels_test, labels_pred_rf, target_names=target_names)
print("Random Forestclassification report.")
print(report_rf)
total_targets = len(target_names)
report_lr = classification_report(labels_test, labels_pred_lr, target_names=target_names, output_dict=True)
report_lr = pd.DataFrame(report_lr).transpose().round(2)
report_lr = report_lr.iloc[:total_targets,:-1]
display(report_lr)
report_rf = classification_report(labels_test, labels_pred_rf, target_names=target_names, output_dict=True)
report_rf = pd.DataFrame(report_rf).transpose().round(2)
report_rf = report_rf.iloc[:total_targets,:-1]
display(report_rf)
avg_f1_lr = report_lr['f1-score'].mean()
print("Logistic Regression average f1-score", avg_f1_lr)
avg_f1_rf = report_rf['f1-score'].mean()
print("Random Forest average f1-score", avg_f1_rf)
matrix_lr = confusion_matrix(labels_test, labels_pred_lr)
matrix_lr = pd.DataFrame(matrix_lr, columns=target_names).transpose()
matrix_lr.columns = target_names
display(matrix_lr)
matrix_rf = confusion_matrix(labels_test, labels_pred_rf)
matrix_rf = pd.DataFrame(matrix_rf, columns=target_names).transpose()
matrix_rf.columns = target_names
display(matrix_rf)
matrix_report_lr = pd.concat([matrix_lr, report_lr], axis=1)
display(matrix_report_lr)
matrix_report_rf = pd.concat([matrix_rf, report_rf], axis=1)
display(matrix_report_rf)
file_path = str(project_path) + '/datasets/modelling-results/'
filename = 'iono_matrix_report_lr.csv'
matrix_report_lr.to_csv(file_path + filename, index=True)
filename = 'iono_matrix_report_rf.csv'
matrix_report_rf.to_csv(file_path + filename, index=True)
target_names = target_names
targets = unique_targets
targets_labels = dict(zip(targets, target_names))
print(targets_labels)
dummies = pd.get_dummies(labels_pred_lr)
labels_pred_codes_lr = dummies.values.argmax(1)
dummies = pd.get_dummies(labels_pred_rf)
labels_pred_codes_rf = dummies.values.argmax(1)
labels_names_pred_lr = []
for label in labels_pred_codes_lr:
labels_names_pred_lr.append(targets_labels[label])
labels_names_pred_rf = []
for label in labels_pred_codes_rf:
labels_names_pred_rf.append(targets_labels[label])
print("Logistic Regression predicted targets and their names.\n")
print(labels_pred_codes_lr)
print(labels_names_pred_lr)
print("\n\nRandom Forest predicted targets and their names.")
print(labels_pred_codes_rf)
print(labels_names_pred_rf)
def lime_explanations(index, x_testset, explainer, model, unique_targets, class_predictions):
instance = x_testset[index]
exp = explainer.explain_instance(instance,
model.predict_proba,
labels=unique_targets,
top_labels=None,
num_features=len(x_testset[index]),
num_samples=6000)
# Array class_predictions contains predicted class labels
exp_vector_predicted_class = exp.as_map()[class_predictions[index]]
return (exp_vector_predicted_class, exp.score), exp
def explanation_to_dataframe(index, x_testset, explainer, model, unique_targets, class_predictions, dataframe):
feature_imp_tuple, exp = lime_explanations(index,
x_testset,
explainer,
model,
unique_targets,
class_predictions)
exp_val = tuple(sorted(feature_imp_tuple[0]))
data = dict((x, y) for x, y in exp_val)
list_val = list(data.values())
list_val.append(feature_imp_tuple[1])
dataframe.loc[index] = list_val
return dataframe, exp
""" Define LIME Explainer
"""
explainer_lime = LimeTabularExplainer(train,
mode = 'classification',
training_labels = labels_train,
feature_names=feature_names,
verbose=False,
class_names=target_names,
feature_selection='auto',
discretize_continuous=True)
from tqdm import tqdm
col_names = list(feature_names)
col_names.append('lime_score')
explanations_lime_lr = pd.DataFrame(columns=col_names)
for index in tqdm(range(0,len(test))):
explanations_lime_lr, exp = explanation_to_dataframe(index,
test,
explainer_lime,
rf, # random forest model
unique_targets,
labels_pred_codes_lr, # random forest predictions
explanations_lime_lr)
print("LIME explanations on logistic regression.")
display(explanations_lime_lr.head())
display(explanations_lime_lr.iloc[:,:-1].head(1))
explanations_lime_rf = pd.DataFrame(columns=col_names)
for index in tqdm(range(0,len(test))):
explanations_lime_rf, exp = explanation_to_dataframe(index,
test,
explainer_lime,
rf, # random forest model
unique_targets,
labels_pred_codes_rf, # random forest predictions
explanations_lime_rf)
print("LIME explanations on random forest.")
display(explanations_lime_rf.head())
display(explanations_lime_rf.iloc[:,:-1].head(1))
def shapvalue_to_dataframe(test, labels_pred, shap_values, feature_names):
exp_shap_array = []
for test_index in range(0, len(test)):
label_pred = labels_pred[test_index]
exp_shap_array.append(shap_values[label_pred][test_index])
df_exp_shap = pd.DataFrame(exp_shap_array)
df_exp_shap.columns = feature_names
return df_exp_shap
shap_train_summary = shap.kmeans(train, 50)
explainer_shap_lr = shap.KernelExplainer(lr.predict_proba, shap_train_summary)
# print("Shap Train Sample Summary", shap_train_summary)
shap_values_lr = explainer_shap_lr.shap_values(test, nsamples='auto')
shap_expected_values_lr = explainer_shap_lr.expected_value
print("Shapley Expected Values", shap_expected_values_lr)
shap.summary_plot(shap_values_lr, test, feature_names=feature_names)
shap_values_rf = shap.TreeExplainer(rf).shap_values(test)
shap.summary_plot(shap_values_rf, test, feature_names=feature_names)
explanations_shap_lr = shapvalue_to_dataframe(test,
labels_pred_codes_lr,
shap_values_lr,
feature_names)
display(explanations_shap_lr.head())
display(explanations_shap_lr.iloc[:,:].head(1))
explanations_shap_rf = shapvalue_to_dataframe(test,
labels_pred_codes_rf,
shap_values_rf,
feature_names)
display(explanations_shap_rf.head())
display(explanations_shap_rf.iloc[:,:].head(1))
def norm(Xs, x0, norm=2):
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html
norm = np.linalg.norm(x0 - Xs, norm) # /np.linalg.norm(b[0] - b, 2)
return norm
def neighborhood_with_euclidean(x_points, anchor_index, radius):
x_i = x_points[anchor_index]
radius = radius * np.sqrt(len(x_points[anchor_index]))
x_js = x_points.tolist()
del x_js[anchor_index]
dist = (x_i - x_js)**2
dist = np.sum(dist, axis=1)
dist = np.sqrt(dist)
neighborhood_indices = []
for index in range(0, len(dist)):
if dist[index] < radius:
neighborhood_indices.append(index)
return neighborhood_indices
def neighborhood_with_KDTree(x_points, anchor_index, radius):
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query_ball_point.html
tree = spatial.KDTree(x_points)
neighborhood_indices = tree.query_ball_point(x_points[anchor_index],
radius * np.sqrt(len(x_points[anchor_index])))
return neighborhood_indices
def lipschitz_formula(nearby_points, nearby_points_exp, anchorX, anchorX_exp):
anchorX_norm2 = np.apply_along_axis(norm, 1, nearby_points, anchorX)
anchorX_exp_norm2 = np.apply_along_axis(norm, 1, nearby_points_exp, anchorX_exp)
anchorX_avg_norm2 = anchorX_exp_norm2/anchorX_norm2
anchorX_LC_argmax = np.argmax(anchorX_avg_norm2)
return anchorX_avg_norm2, anchorX_LC_argmax
def lipschitz_estimate(anchorX, x_points, explanations_x_points, anchor_index, neighborhood_indices):
# extract anchor point explanations
anchorX_exp = explanations_x_points[anchor_index]
# extract anchor point neighborhood's explanations
nearby_points = x_points[neighborhood_indices]
nearby_points_exp = explanations_x_points[neighborhood_indices]
# find local lipschitz estimate (lc)
anchorX_avg_norm2, anchorX_LC_argmax = lipschitz_formula(nearby_points,
nearby_points_exp,
anchorX,
anchorX_exp)
return anchorX_avg_norm2, anchorX_LC_argmax
def find_lipschitz_estimates(x_points, x_points_lime_exp, x_points_shap_exp, radii):
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.apply_along_axis.html
# https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.argmax.html
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query_ball_point.html
instances = []
anchor_x_index = []
lc_coefficient_lime = []
x_deviation_index_lime = []
x_deviation_index_shap = []
lc_coefficient_shap = []
radiuses = []
neighborhood_size = []
for radius in radii:
for anchor_index in range(0, len(x_points)):
# define neighorbood of around anchor point using radius
# neighborhood_indices = neighborhood_with_KDTree(x_points, anchor_index, radius)
# neighborhood_indices.remove(anchor_index) # remove anchor index to remove anchor point
neighborhood_indices = neighborhood_with_euclidean(x_points, anchor_index, radius)
print(neighborhood_indices)
radiuses.append(radius)
if len(neighborhood_indices) == 0:
continue
neighborhood_size.append(len(neighborhood_indices))
# extract anchor point and its original index
anchorX = x_points[anchor_index]
instances.append(anchorX)
anchor_x_index.append(anchor_index)
# find local lipschitz estimate (lc) LIME
anchorX_avg_norm2, anchorX_LC_argmax = lipschitz_estimate(anchorX,
x_points,
x_points_lime_exp,
anchor_index,
neighborhood_indices)
lc_coefficient_lime.append(anchorX_avg_norm2[anchorX_LC_argmax])
# find deviation point from anchor point LIME explanations
deviation_point_index = neighborhood_indices[anchorX_LC_argmax]
x_deviation_index_lime.append(deviation_point_index)
# find local lipschitz estimate (lc) SHAP
anchorX_avg_norm2, anchorX_LC_argmax = lipschitz_estimate(anchorX,
x_points,
x_points_shap_exp,
anchor_index,
neighborhood_indices)
lc_coefficient_shap.append(anchorX_avg_norm2[anchorX_LC_argmax])
# find deviation point from anchor point LIME explanations
deviation_point_index = neighborhood_indices[anchorX_LC_argmax]
x_deviation_index_shap.append(deviation_point_index)
# columns_lipschitz will be reused so to avoid confusion naming convention should remain similar
columns_lipschitz = ['instance', 'anchor_x_index', 'lc_coefficient_lime', 'x_deviation_index_lime',
'lc_coefficient_shap', 'x_deviation_index_shap', 'radiuses', 'neighborhood_size']
zippedList = list(zip(instances, anchor_x_index, lc_coefficient_lime, x_deviation_index_lime,
lc_coefficient_shap, x_deviation_index_shap, radiuses, neighborhood_size))
return zippedList, columns_lipschitz
X = pd.DataFrame(test)
x_points = X.copy().values
print("Testset")
# display(X.head())
# radii = [1.00, 1.25]
radii = [0.75]
print("LIME generated explanations")
X_lime_exp = explanations_lime_lr.iloc[:,:-1].copy()
# display(X_lime_exp.head())
print("SHAP generated explanations")
X_shap_exp = explanations_shap_lr.iloc[:,:].copy()
# display(X_shap_exp.head())
x_points_lime_exp = X_lime_exp.copy().values
x_points_shap_exp = X_shap_exp.copy().values
zippedList, columns_lipschitz = find_lipschitz_estimates(x_points,
x_points_lime_exp,
x_points_shap_exp,
radii)
lr_lipschitz = pd.DataFrame(zippedList, columns=columns_lipschitz)
print("LIME generated explanations")
X_lime_exp = explanations_lime_rf.iloc[:,:-1].copy()
# display(X_lime_exp.head())
print("SHAP generated explanations")
X_shap_exp = explanations_shap_rf.iloc[:,:].copy()
# display(X_shap_exp.head())
x_points_lime_exp = X_lime_exp.copy().values
x_points_shap_exp = X_shap_exp.copy().values
zippedList, columns_lipschitz = find_lipschitz_estimates(x_points,
x_points_lime_exp,
x_points_shap_exp,
radii)
rf_lipschitz = pd.DataFrame(zippedList, columns=columns_lipschitz)
epsilon1 = lr_lipschitz[lr_lipschitz['radiuses'] == 1.00]
epsilon125 = lr_lipschitz[lr_lipschitz['radiuses'] == 1.25]
# display(epsilon1.head())
# display(epsilon125.head())
print("Lipschitz estimates on logistic regression model.")
epsilon1_lc_lime_aggre = np.mean(epsilon1['lc_coefficient_lime'])
epsilon1_lc_shap_aggre = np.mean(epsilon1['lc_coefficient_shap'])
print("\nLIME, epsilon 1.00, Aggregated L(x) = ", epsilon1_lc_lime_aggre)
print("SHAP, epsilon 1.00, Aggregated L(x) = ", epsilon1_lc_shap_aggre)
epsilon125_lc_lime_aggre = np.mean(epsilon125['lc_coefficient_lime'])
epsilon125_lc_shap_aggre = np.mean(epsilon125['lc_coefficient_shap'])
print("\nLIME, epsilon 1.25, Aggregated L(x) = ", epsilon125_lc_lime_aggre)
print("SHAP, epsilon 1.25, Aggregated L(x) = ", epsilon125_lc_shap_aggre)
epsilon1 = rf_lipschitz[rf_lipschitz['radiuses'] == 1.00]
epsilon125 = rf_lipschitz[rf_lipschitz['radiuses'] == 1.25]
# display(epsilon075.head())
# display(epsilon1.head())
# display(epsilon125.head())
print("Lipschitz estimates on random forest model.")
epsilon1_lc_lime_aggre = np.mean(epsilon1['lc_coefficient_lime'])
epsilon1_lc_shap_aggre = np.mean(epsilon1['lc_coefficient_shap'])
print("\nLIME, epsilon 1.00, Aggregated L(x) = ", epsilon1_lc_lime_aggre)
print("SHAP, epsilon 1.00, Aggregated L(x) = ", epsilon1_lc_shap_aggre)
epsilon125_lc_lime_aggre = np.mean(epsilon125['lc_coefficient_lime'])
epsilon125_lc_shap_aggre = np.mean(epsilon125['lc_coefficient_shap'])
print("\nLIME, epsilon 1.25, Aggregated L(x) = ", epsilon125_lc_lime_aggre)
print("SHAP, epsilon 1.25, Aggregated L(x) = ", epsilon125_lc_shap_aggre)
df1 = epsilon075.loc[:, ['lc_coefficient_lime']]
df1.rename(columns={'lc_coefficient_lime': 'Lipschitz Estimates'}, inplace=True)
df1['method'] = 'LIME'
df1['Dataset'] = 'Ionoshpere'
df2 = epsilon075.loc[:, ['lc_coefficient_shap']]
df2.rename(columns={'lc_coefficient_shap': 'Lipschitz Estimates'}, inplace=True)
df2['method'] = 'SHAP'
df2['Dataset'] = 'Ionoshpere'
df = df1.append(df2)
ax = sns.boxplot(x='method', y="Lipschitz Estimates", data=df)
ax = sns.boxplot(x="Dataset", y="Lipschitz Estimates",
hue="method",
data=df)
sns.despine(offset=10, trim=True)
explainer_lime = LimeTabularExplainer(train,
mode = 'classification',
training_labels = labels_train,
feature_names=feature_names,
verbose=False,
class_names=target_names,
feature_selection='auto',
discretize_continuous=True)
x_instance = test[anchor_index]
LR_exp_lime = explainer_lime.explain_instance(x_instance,
LR_iris.predict_proba,
labels=np.unique(iris.target),
top_labels=None,
num_features=len(x_instance),
num_samples=6000)
LR_exp_lime.show_in_notebook()
x_instance = test[similar_point_index]
LR_exp_lime = explainer_lime.explain_instance(x_instance,
LR_iris.predict_proba,
labels=np.unique(iris.target),
top_labels=None,
num_features=len(x_instance),
num_samples=6000)
LR_exp_lime.show_in_notebook()
i = np.random.randint(0, test.shape[0])
i = 0
LR_exp_lime_map = LR_exp_lime.as_map()
# pprint(LR_exp_lime_map)
print('Predicted class for i:', labels_pred_lr[i])
LR_exp_lime_list = LR_exp_lime.as_list(label=labels_pred_lr[i])
# pprint(LR_exp_lime_list)
lr_lime_iris = [2.657, 3.393, 1.495]
rf_lime_iris = [3.010, 3.783, 1.767]
lr_shap_iris = [2.716, 3.512, 1.463]
rf_shap_iris = [1.969, 3.546, 2.136]
find_min_vector = np.array([lr_lime_iris, rf_lime_iris, lr_shap_iris, rf_shap_iris])
np.amin(find_min_vector, axis=0)
from sklearn.linear_model import Ridge
import numpy as np
n_samples, n_features = 10, 5
rng = np.random.RandomState(0)
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
clf = Ridge(alpha=1.0)
clf.fit(X, y)
| 0.411466 | 0.840521 |
# Pandas-Log Usage Walkthrough
## Why pandas-log?
Pandas-log is a Python implementation of the R package tidylog, and provides a feedback about basic pandas operations.
The pandas has been invaluable for the data science ecosystem and usually consists of a series of steps that involve transforming raw data into an understandable/usable format. These series of steps need to be run in a certain sequence and if the result is unexpected it's hard to understand what happened. Pandas-log log metadata on each operation which will allow to pinpoint the issues.
## Pandas-log Demo
#### First we need to load some libraries including pandas-log
```
import pandas as pd
import numpy as np
import pandas_log
```
#### Let's take a look at our dataset:
```
df = pd.read_csv("pokemon.csv")
df.head(10)
```
#### Lets say we want to find out:
## Who is the weakest non-legendary fire pokemon?
<img src="fire_pokemons.jpg" width="540" height="340" align="left"/>
#### The strategy will probably be something like:
1. Filter out legendary pokemons using `.query()` .
1. Keep only fire pokemons using `.query()` .
1. Drop Legendary column using `.drop()` .
1. Keep the weakest pokemon among them using `.nsmallest()` .
1. Reset index using `.reset_index()` .
```
res = (df.copy()
.query("legendary==0")
.query("type_1=='fire' or type_2=='fire'")
.drop("legendary", axis=1)
.nsmallest(1,"total")
.reset_index(drop=True)
)
res
```
### OH NOO!!! Our code does not work !! We got no records
<img src="shocked.gif" width="490" height="340" align="left"/>
### If only there was a way to track those issue
Fortunetly thats what **pandas-log** is for! either as a global function or context manager.
This the example with pandas_log's `context_manager`.
```
with pandas_log.enable():
res = (df.query("legendary==0")
.query("type_1=='fire' or type_2=='fire'")
.drop("legendary", axis=1)
.nsmallest(1,"total")
)
res
```
#### We can see clearly that in the second step (`.query()`) we filter all the rows!! and indeed we should of writen Fire as oppose to fire
```
res = (df.copy()
.query("type_1=='Fire' or type_2=='Fire'")
.query("legendary==0")
.drop("legendary", axis=1)
.nsmallest(1,"total")
.reset_index(drop=True)
)
res
```
### Whoala we got Slugma !!!!!!!!
<img src="slugma.jpg" width="250" height="340" align="left"/>
## Some more advance usage
#### One can use verbose variable which allows lower level logs functionalities like whether the dataframe was copied as part of pipeline.
This can explain comparision issues.
```
with pandas_log.enable(verbose=True):
res = (df.query("legendary==0")
.query("type_1=='Fire' or type_2=='Fire'")
.drop("legendary", axis=1)
.nsmallest(1,"total")
.reset_index(drop=True)
)
res
```
as we can see after both the drop and nsmallest functions the dataframe was being copied
#### One can use silent variable which allows to suppress stdout
```
with pandas_log.enable(silent=True):
res = (df.copy()
.query("legendary==0")
.query("type_1=='Fire' or type_2=='Fire'")
.drop("legendary", axis=1)
.nsmallest(1,"total")
.reset_index(drop=True)
)
res
```
#### One can use full_signature variable which allows to suppress the signature
```
with pandas_log.enable(full_signature=False):
res = (df.query("legendary==0")
.query("type_1=='Fire' or type_2=='Fire'")
.drop("legendary", axis=1)
.nsmallest(1,"total")
.reset_index(drop=True)
)
res
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import pandas_log
df = pd.read_csv("pokemon.csv")
df.head(10)
res = (df.copy()
.query("legendary==0")
.query("type_1=='fire' or type_2=='fire'")
.drop("legendary", axis=1)
.nsmallest(1,"total")
.reset_index(drop=True)
)
res
with pandas_log.enable():
res = (df.query("legendary==0")
.query("type_1=='fire' or type_2=='fire'")
.drop("legendary", axis=1)
.nsmallest(1,"total")
)
res
res = (df.copy()
.query("type_1=='Fire' or type_2=='Fire'")
.query("legendary==0")
.drop("legendary", axis=1)
.nsmallest(1,"total")
.reset_index(drop=True)
)
res
with pandas_log.enable(verbose=True):
res = (df.query("legendary==0")
.query("type_1=='Fire' or type_2=='Fire'")
.drop("legendary", axis=1)
.nsmallest(1,"total")
.reset_index(drop=True)
)
res
with pandas_log.enable(silent=True):
res = (df.copy()
.query("legendary==0")
.query("type_1=='Fire' or type_2=='Fire'")
.drop("legendary", axis=1)
.nsmallest(1,"total")
.reset_index(drop=True)
)
res
with pandas_log.enable(full_signature=False):
res = (df.query("legendary==0")
.query("type_1=='Fire' or type_2=='Fire'")
.drop("legendary", axis=1)
.nsmallest(1,"total")
.reset_index(drop=True)
)
res
| 0.117547 | 0.978753 |
# Introduction to TensorFlow and Keras
## Objectives
The goal of this notebook is to teach some basics of the TensorFlow framework and the Keras API.
### What is TensorFlow?
[TensorFlow](https://www.tensorflow.org/guide]) is an open-source framework developed by Google for building various machine learning and deep learning models. Although originally released in late 2015, the first stable version arrived in 2017. TensorFlow is free and open-source, thanks to the Apache Open Source license.
The main objective of using TensorFlow is to reduce the complexity of implementing computations on large numerical data sets. In practice, these large computations can manifest as training and inference with machine learning or deep learning models.
TensorFlow was designed to operate with multiple CPUs or GPUs, as well as a growing number of mobile operating systems. The framework includes wrappers in Python, C++, and Java.
#### How does it work?
TensorFlow accepts inputs as a multi-dimensional array called a Tensor, which allows the programmer to create dataflow graphs and structures specifying how data travels through. The framework is designed to support creation of a flowchart of operations to be applied to input Tensors, which travel in one direction and out the other.
#### TensorFlow’s structure
There are three main components to TensorFlow's structure.
1. preprocessing the data
2. building the model
3. training and estimating the model
The name TensorFlow derives from the way in which the framework receives input in the form of a multi-dimensional array, i.e. the tensors. These tensors travel sequentially through the specified flowchart of the operations, entering at one end and culminate as output at the other end.
#### What are TensorFlow components?
**Tensor**
Tensors, the basic unit of data in this framework, are involved in every computation of TensorFlow. A tensor is an n-dimensional vector or matrix. In theory, a tensor may represent any form of data. The values belonging to a tensor all share the same data type and often the same shape / dimensionality. A tensor can describe the input data and the output of a calculation.
In TensorFlow, all operations are carried out within a graph, which in effect is a series of computations that happen in order. Each indivisual operation is referred to as an op node.
**Graphs**
TensorFlow uses a graph framework. Graphs collect and summarize all of the calculations and offer several benefits:
1. They are designed to work on CPUs or GPUs, as well as on mobile devices.
2. Graphs are portable which enables the computations to be saved for immediate or later usage. Otherwsie stated, the graph can be frozen and run at a later time.
3. Graph calculations are executed by linking tensors together.
4. For each tensor, there is a node and an edge. The node carries out the mathematical process and produces endpoint outputs. The input/output connections are represented by the edges.
5. All nodes are linked together, so the graph itself is a depiction of the operations and relationships that exist between the nodes.
:::{figure-md} TFgraph-fig
<img src="https://miro.medium.com/max/1838/1*aOYUa3hHKi9HlYnnFAipUQ.gif" width="650px">
TensorFlow graph example (from [https://medium.com/the-artificial-impostor/notes-understanding-tensorflow-part-1-5f0ebb253ad4](https://medium.com/the-artificial-impostor/notes-understanding-tensorflow-part-1-5f0ebb253ad4)).
:::
#### Why do so many people like TensorFlow?
TensorFlow is intentionally user-friendly, with helpful plugins to visualize model training and a useful software debugging tool. As well, TensorFlow is highly scalable, with easy deployment on both CPUs and GPUs.
### What is Keras?
[Keras](https://keras.io/about/) is an API built on Python, with human readability at the forefront of its design. With simple and consistent structures and methods extensible across many machine learning and deep learning applications, Keras reduces the cognitive load associated with programming models. Furthermore, the API seeks to minimize the need for programmer interaction by abstracting many complexities into easily callable functions. Lastly, Keras features clear & actionable error messaging, complemented by comprehensive and digestible documentation and developer guides.
Keras is what some might call a wrapper for TensorFlow. By that, one means to say that Keras simplifies a programmer's interaction with TensorFlow through refinement of key methods and constructs.
Importantly, Keras is intended for rapid experimentation.
Tha main components of Keras include:
1. A models API, which enables one to construct a model with varying levels of complexity depending on use case. We will use the [Functional API](https://keras.io/guides/functional_api/) subclass.
2. A layers API, which allows one to define the tensor in/tnesor out computation functions.
3. A callback API, which enables one to program specific actions to occur during training, such as log training metrics, visualize interim/internal states and statistics of the model during training, and perform early stopping when the model converges.
4. A data preprocessing API, which offers support for prepping raw data from disk to model ready Tensor format.
5. An optimizer API where all of the state of the art optimizers can be plugged in. Learning rate decay / scheduling can also be implemented a spart of this API.
6. A metrics API which is used for assessing the performance of the model during training. A metric is the target to optimize during training, with specific metrics chosen for specific modeling objectives.
7. A loss API that informs the model quantitatively how much it should try to minimize during training by providing a measure of error. Similar to metrics, specific loss functions are selected for specific modelung objectives.
With the Functional API, our main workflow will follow the diagram below.
:::{figure-md} Keras-fig
<img src="images/Keras_functional_API.jpg" width="650px">
Keras Functional API diagram (from [https://miro.com/app/board/o9J_lhnKhVE=/](hhttps://miro.com/app/board/o9J_lhnKhVE=/)).
:::
|
github_jupyter
|
# Introduction to TensorFlow and Keras
## Objectives
The goal of this notebook is to teach some basics of the TensorFlow framework and the Keras API.
### What is TensorFlow?
[TensorFlow](https://www.tensorflow.org/guide]) is an open-source framework developed by Google for building various machine learning and deep learning models. Although originally released in late 2015, the first stable version arrived in 2017. TensorFlow is free and open-source, thanks to the Apache Open Source license.
The main objective of using TensorFlow is to reduce the complexity of implementing computations on large numerical data sets. In practice, these large computations can manifest as training and inference with machine learning or deep learning models.
TensorFlow was designed to operate with multiple CPUs or GPUs, as well as a growing number of mobile operating systems. The framework includes wrappers in Python, C++, and Java.
#### How does it work?
TensorFlow accepts inputs as a multi-dimensional array called a Tensor, which allows the programmer to create dataflow graphs and structures specifying how data travels through. The framework is designed to support creation of a flowchart of operations to be applied to input Tensors, which travel in one direction and out the other.
#### TensorFlow’s structure
There are three main components to TensorFlow's structure.
1. preprocessing the data
2. building the model
3. training and estimating the model
The name TensorFlow derives from the way in which the framework receives input in the form of a multi-dimensional array, i.e. the tensors. These tensors travel sequentially through the specified flowchart of the operations, entering at one end and culminate as output at the other end.
#### What are TensorFlow components?
**Tensor**
Tensors, the basic unit of data in this framework, are involved in every computation of TensorFlow. A tensor is an n-dimensional vector or matrix. In theory, a tensor may represent any form of data. The values belonging to a tensor all share the same data type and often the same shape / dimensionality. A tensor can describe the input data and the output of a calculation.
In TensorFlow, all operations are carried out within a graph, which in effect is a series of computations that happen in order. Each indivisual operation is referred to as an op node.
**Graphs**
TensorFlow uses a graph framework. Graphs collect and summarize all of the calculations and offer several benefits:
1. They are designed to work on CPUs or GPUs, as well as on mobile devices.
2. Graphs are portable which enables the computations to be saved for immediate or later usage. Otherwsie stated, the graph can be frozen and run at a later time.
3. Graph calculations are executed by linking tensors together.
4. For each tensor, there is a node and an edge. The node carries out the mathematical process and produces endpoint outputs. The input/output connections are represented by the edges.
5. All nodes are linked together, so the graph itself is a depiction of the operations and relationships that exist between the nodes.
:::{figure-md} TFgraph-fig
<img src="https://miro.medium.com/max/1838/1*aOYUa3hHKi9HlYnnFAipUQ.gif" width="650px">
TensorFlow graph example (from [https://medium.com/the-artificial-impostor/notes-understanding-tensorflow-part-1-5f0ebb253ad4](https://medium.com/the-artificial-impostor/notes-understanding-tensorflow-part-1-5f0ebb253ad4)).
:::
#### Why do so many people like TensorFlow?
TensorFlow is intentionally user-friendly, with helpful plugins to visualize model training and a useful software debugging tool. As well, TensorFlow is highly scalable, with easy deployment on both CPUs and GPUs.
### What is Keras?
[Keras](https://keras.io/about/) is an API built on Python, with human readability at the forefront of its design. With simple and consistent structures and methods extensible across many machine learning and deep learning applications, Keras reduces the cognitive load associated with programming models. Furthermore, the API seeks to minimize the need for programmer interaction by abstracting many complexities into easily callable functions. Lastly, Keras features clear & actionable error messaging, complemented by comprehensive and digestible documentation and developer guides.
Keras is what some might call a wrapper for TensorFlow. By that, one means to say that Keras simplifies a programmer's interaction with TensorFlow through refinement of key methods and constructs.
Importantly, Keras is intended for rapid experimentation.
Tha main components of Keras include:
1. A models API, which enables one to construct a model with varying levels of complexity depending on use case. We will use the [Functional API](https://keras.io/guides/functional_api/) subclass.
2. A layers API, which allows one to define the tensor in/tnesor out computation functions.
3. A callback API, which enables one to program specific actions to occur during training, such as log training metrics, visualize interim/internal states and statistics of the model during training, and perform early stopping when the model converges.
4. A data preprocessing API, which offers support for prepping raw data from disk to model ready Tensor format.
5. An optimizer API where all of the state of the art optimizers can be plugged in. Learning rate decay / scheduling can also be implemented a spart of this API.
6. A metrics API which is used for assessing the performance of the model during training. A metric is the target to optimize during training, with specific metrics chosen for specific modeling objectives.
7. A loss API that informs the model quantitatively how much it should try to minimize during training by providing a measure of error. Similar to metrics, specific loss functions are selected for specific modelung objectives.
With the Functional API, our main workflow will follow the diagram below.
:::{figure-md} Keras-fig
<img src="images/Keras_functional_API.jpg" width="650px">
Keras Functional API diagram (from [https://miro.com/app/board/o9J_lhnKhVE=/](hhttps://miro.com/app/board/o9J_lhnKhVE=/)).
:::
| 0.932783 | 0.989399 |
| [**Overview**](./00_overview.ipynb) | **Examples:** | [Selecting and Indexing Geochem Data](01_indexes_selectors.ipynb) | [Data Munging](02_munging.ipynb) | [Visualisation](03_visualisation.ipynb) |[lambdas](04_lambdas.ipynb) |
|:-----|:-----|:-----|:-----|:-----|:-----|
## Visualisation
`pyrolite` contains an array of visualisation methods, a few of which we'll quickly run through here. For more, check out the [examples gallery](https://pyrolite.readthedocs.io/en/develop/examples/index.html#plotting-examples)!
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from util import fetch_GEOROC_csv # this is a funciton we put together in the previous notebook, which arranges a CSV from GEOROC
df = fetch_GEOROC_csv('http://georoc.mpch-mainz.gwdg.de/georoc/Csv_Downloads/Continental_Flood_Basalts_comp/CENTRAL_ATLANTIC_MAGMATIC_PROVINCE_-_CAMP.csv')
df.pyrochem.compositional = df.pyrochem.compositional.replace(0, np.nan)# get rid of some zeroes
```
### Simple Bivariate Plotting
While there are many ways to get to simple bivariate plots, `pyrolite` provides a few options which can provide a simpler interface and easier access to simple styling configuration.
```
df[['MgO', 'SiO2']].pyroplot.scatter(color='k', marker='o', alpha=0.5)
```
Where we get to larger datasets, overplotting becomes an issue, and we may want to consider methods for visualising the distribution of data as a whole rather than individual points. `pyrolite` has as few options for this, including 'density' plots and 'heatscatter' plots (based on kernel density estimates).
```
df[['MgO', 'SiO2']].pyroplot.density(bins=100)
df[['MgO', 'SiO2']].pyroplot.heatscatter(alpha=0.5)
```
### Ternary Plots
Ternary plots are a common in geochemistry, mineralogy and petrology but dont' necessarily pop up elsewhere. `pyrolite` provides an interface to create ternary plots wherever you pass three columns, making it as simple as creating our bivariate plots above!
```
df[['CaO', 'MgO', 'FeO']].pyroplot.scatter(color='k', marker='o', alpha=0.5)
```
In contrast to most ternary plots, however, we can also create data density visualisations (based on distributions in logratio space):
```
df[['CaO', 'MgO', 'FeO']].pyroplot.heatscatter(alpha=0.5, cmap='cividis')
```
### Spider Plots
Visualisation of multivariate patterns in geochemical data can be a challenge, but one tool well adpated to is the 'spiderplot'. In most cases, you'll want to visualise normalised data (e.g. to Chondrite or Primitive Mantle) such that the effects of nuceleosynthesis and planetary formation are removed and you can instead dig deeper into processes which have happend since. The `pyrolite.pyrochem` API can be chained together with the `pyolite.pyroplot` API to do this in one line - here we'll pull up some REE data (note that some of it looks like it could be pre-normalised!).
```
df.pyrochem.REE.pyrochem.normalize_to('PM_PON').pyroplot.spider(unity_line=True, color='0.5', alpha=0.4)
```
For the REE data specifically, there's also a method which will scale axes to ionic radii:
```
df.pyrochem.REE.pyrochem.normalize_to('PM_PON').pyroplot.REE(unity_line=True, color='0.5', alpha=0.4)
```
We can also style this as above, including by colormapping a particular variable:
```
df.pyrochem.REE.pyrochem.normalize_to('PM_PON').pyroplot.REE(unity_line=True, c=df['MgO'], cmap='cividis', alpha=0.5)
```
We can also make conditional density spider plots - here it highlights the few samples with REE data errors!
```
df.pyrochem.REE.pyrochem.normalize_to('PM_PON').pyroplot.REE(unity_line=True, mode='binkde', bins=100, yextent=(0.5, 500), vmin=0.03, cmap='cividis')
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from util import fetch_GEOROC_csv # this is a funciton we put together in the previous notebook, which arranges a CSV from GEOROC
df = fetch_GEOROC_csv('http://georoc.mpch-mainz.gwdg.de/georoc/Csv_Downloads/Continental_Flood_Basalts_comp/CENTRAL_ATLANTIC_MAGMATIC_PROVINCE_-_CAMP.csv')
df.pyrochem.compositional = df.pyrochem.compositional.replace(0, np.nan)# get rid of some zeroes
df[['MgO', 'SiO2']].pyroplot.scatter(color='k', marker='o', alpha=0.5)
df[['MgO', 'SiO2']].pyroplot.density(bins=100)
df[['MgO', 'SiO2']].pyroplot.heatscatter(alpha=0.5)
df[['CaO', 'MgO', 'FeO']].pyroplot.scatter(color='k', marker='o', alpha=0.5)
df[['CaO', 'MgO', 'FeO']].pyroplot.heatscatter(alpha=0.5, cmap='cividis')
df.pyrochem.REE.pyrochem.normalize_to('PM_PON').pyroplot.spider(unity_line=True, color='0.5', alpha=0.4)
df.pyrochem.REE.pyrochem.normalize_to('PM_PON').pyroplot.REE(unity_line=True, color='0.5', alpha=0.4)
df.pyrochem.REE.pyrochem.normalize_to('PM_PON').pyroplot.REE(unity_line=True, c=df['MgO'], cmap='cividis', alpha=0.5)
df.pyrochem.REE.pyrochem.normalize_to('PM_PON').pyroplot.REE(unity_line=True, mode='binkde', bins=100, yextent=(0.5, 500), vmin=0.03, cmap='cividis')
| 0.508788 | 0.984575 |
# Solving CartPole-v0 with the policy gradient algorithm REINFORCE
The following is an implementation of the [REINFORCE]() algorithm described in ([Policy Gradient Methods for
Reinforcement Learning with Function Approximation](https://papers.nips.cc/paper/1713-policy-gradient-methods-for-reinforcement-learning-with-function-approximation.pdf)).
The environment Cartpole-v0 is used to test the algorithm, was created by OpenAi and is described in more detail here cartpolev0 and here doc.
If the algorithm manages to execute the simulation of the cartpole for a 100 episodes, keeping it balanced for more then 195 steps the environment is said to be solved.
```
import numpy as np
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import deque
from matplotlib import pyplot as plt
from tqdm.notebook import trange, tqdm
def print_step(state, reward, done, info):
print(f"state: {state},\nreward: {reward},\ndone: {done},\ninfo: {info}")
```
A quick look at the environment:
```
env = gym.make('CartPole-v0')
initial_state = env.reset()
action = env.action_space.sample()
state, reward, done, info = env.step(action)
print_step(state, reward, done, info)
def running_mean(x, N=50):
kernel = np.ones(N)
conv_len = x.shape[0]-N
y = np.zeros(conv_len)
for i in range(conv_len):
y[i] = kernel @ x[i:i+N]
y[i] /= N
return y
```
Create the policy network
```
class PolicyNetwork(nn.Module):
def __init__(self,
input_dim=4,
hidden_dim=[32, 32],
output_dim=2):
super(PolicyNetwork, self).__init__()
self.output_dim = output_dim
self.input_layer = nn.Linear(input_dim, hidden_dim[0])
self.hidden_layers = nn.ModuleList()
for i in range(len(hidden_dim) - 1):
hidden_layer = nn.Linear(hidden_dim[i], hidden_dim[i+1])
self.hidden_layers.append(hidden_layer)
self.output_layer = nn.Linear(hidden_dim[-1], output_dim)
def forward(self, state):
x = F.relu(self.input_layer(state))
for hidden in self.hidden_layers:
x = F.leaky_relu(hidden(x))
x = F.softmax(self.output_layer(x), dim=-1)
return x
model = PolicyNetwork()
learning_rate = 0.009
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
initial_state = env.reset()
pred = model(torch.from_numpy(initial_state).float())
action = np.random.choice(np.array([0,1]), p=pred.data.numpy())
state, reward, done, info = env.step(action)
print("pred: ",pred)
print_step(state, reward, done, info)
def discount_rewards(rewards, gamma=.99):
lenr = len(rewards)
discounted_return = torch.pow(gamma, torch.arange(lenr).float()) * rewards
discounted_return /= discounted_return.max()
return discounted_return.flip(dims=(0,))
def loss_fn(preds, rewards):
#self.loss = -tf.reduce_mean(tf.log(selected_action_prob) * self.rewards)
return -1 * torch.mean(rewards * torch.log(preds))
```
Training the REINFORCE ALGORITHM
```
MAX_DUR = 200
MAX_EPISODES = 5000
gamma = 0.99
action_space = np.array([0,1])
score = []
actions_taken = {}
scores_window = deque(maxlen=100)
def interact_with_environment():
#start the environment.
state = env.reset()
done = False
experiences = []
episode_reward = 0
#interacting with the environment, continue untill the pole falls (done == True) or max 200 steps.
for t in range(MAX_DUR):
action_prob = model(torch.from_numpy(state).float()).detach()
action = np.random.choice(action_space, p=action_prob.data.numpy())
actions_taken[action] = 1 + actions_taken.get(action, 0)
next_state, reward, done, info = env.step(action)
experiences.append((state, action, reward * (1-done)))
episode_reward += 1
state = next_state
# the pole fell.
if done:
break
return experiences, episode_reward, done
def train_model(experiences):
# train the model
sum_reward = 0
discnt_rewards = []
rewards = [r for (s,a,r) in experiences]
rewards.reverse()
for r in rewards:
sum_reward = r + gamma * sum_reward
discnt_rewards.append(sum_reward)
discnt_rewards.reverse()
reward_batch = torch.Tensor(discnt_rewards)
#reward_batch = torch.Tensor([r for (s,a,r) in experiences]).flip(dims=(0,))
discounted_reward = torch.Tensor(reward_batch)
state_batch = torch.Tensor([s for (s,a,r) in experiences])
action_batch = torch.Tensor([a for (s,a,r) in experiences])
pred_batch = model(state_batch)
prob_batch = pred_batch.gather(dim=1, index=action_batch.long().view(-1,1)).squeeze()
loss = loss_fn(prob_batch, discounted_reward)
optimizer.zero_grad()
loss.backward()
optimizer.step()
for episode in tqdm(range(MAX_EPISODES)):
experiences, rewards, done = interact_with_environment()
scores_window.append(rewards)
ep_len = len(experiences)
score.append(ep_len)
if done:
train_model(experiences)
if episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(episode, np.mean(scores_window)))
print(actions_taken)
if np.mean(scores_window) >= 195:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(episode - 100,
np.mean(scores_window)))
torch.save(model.state_dict(), 'checkpoint.pth')
break
score = np.array(score)
avg_score = running_mean(score, 50)
plt.figure(figsize=(10,7))
plt.ylabel("Episode Duration",fontsize=22)
plt.xlabel("Training Epochs",fontsize=22)
plt.plot(avg_score, color='green')
```
|
github_jupyter
|
import numpy as np
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import deque
from matplotlib import pyplot as plt
from tqdm.notebook import trange, tqdm
def print_step(state, reward, done, info):
print(f"state: {state},\nreward: {reward},\ndone: {done},\ninfo: {info}")
env = gym.make('CartPole-v0')
initial_state = env.reset()
action = env.action_space.sample()
state, reward, done, info = env.step(action)
print_step(state, reward, done, info)
def running_mean(x, N=50):
kernel = np.ones(N)
conv_len = x.shape[0]-N
y = np.zeros(conv_len)
for i in range(conv_len):
y[i] = kernel @ x[i:i+N]
y[i] /= N
return y
class PolicyNetwork(nn.Module):
def __init__(self,
input_dim=4,
hidden_dim=[32, 32],
output_dim=2):
super(PolicyNetwork, self).__init__()
self.output_dim = output_dim
self.input_layer = nn.Linear(input_dim, hidden_dim[0])
self.hidden_layers = nn.ModuleList()
for i in range(len(hidden_dim) - 1):
hidden_layer = nn.Linear(hidden_dim[i], hidden_dim[i+1])
self.hidden_layers.append(hidden_layer)
self.output_layer = nn.Linear(hidden_dim[-1], output_dim)
def forward(self, state):
x = F.relu(self.input_layer(state))
for hidden in self.hidden_layers:
x = F.leaky_relu(hidden(x))
x = F.softmax(self.output_layer(x), dim=-1)
return x
model = PolicyNetwork()
learning_rate = 0.009
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
initial_state = env.reset()
pred = model(torch.from_numpy(initial_state).float())
action = np.random.choice(np.array([0,1]), p=pred.data.numpy())
state, reward, done, info = env.step(action)
print("pred: ",pred)
print_step(state, reward, done, info)
def discount_rewards(rewards, gamma=.99):
lenr = len(rewards)
discounted_return = torch.pow(gamma, torch.arange(lenr).float()) * rewards
discounted_return /= discounted_return.max()
return discounted_return.flip(dims=(0,))
def loss_fn(preds, rewards):
#self.loss = -tf.reduce_mean(tf.log(selected_action_prob) * self.rewards)
return -1 * torch.mean(rewards * torch.log(preds))
MAX_DUR = 200
MAX_EPISODES = 5000
gamma = 0.99
action_space = np.array([0,1])
score = []
actions_taken = {}
scores_window = deque(maxlen=100)
def interact_with_environment():
#start the environment.
state = env.reset()
done = False
experiences = []
episode_reward = 0
#interacting with the environment, continue untill the pole falls (done == True) or max 200 steps.
for t in range(MAX_DUR):
action_prob = model(torch.from_numpy(state).float()).detach()
action = np.random.choice(action_space, p=action_prob.data.numpy())
actions_taken[action] = 1 + actions_taken.get(action, 0)
next_state, reward, done, info = env.step(action)
experiences.append((state, action, reward * (1-done)))
episode_reward += 1
state = next_state
# the pole fell.
if done:
break
return experiences, episode_reward, done
def train_model(experiences):
# train the model
sum_reward = 0
discnt_rewards = []
rewards = [r for (s,a,r) in experiences]
rewards.reverse()
for r in rewards:
sum_reward = r + gamma * sum_reward
discnt_rewards.append(sum_reward)
discnt_rewards.reverse()
reward_batch = torch.Tensor(discnt_rewards)
#reward_batch = torch.Tensor([r for (s,a,r) in experiences]).flip(dims=(0,))
discounted_reward = torch.Tensor(reward_batch)
state_batch = torch.Tensor([s for (s,a,r) in experiences])
action_batch = torch.Tensor([a for (s,a,r) in experiences])
pred_batch = model(state_batch)
prob_batch = pred_batch.gather(dim=1, index=action_batch.long().view(-1,1)).squeeze()
loss = loss_fn(prob_batch, discounted_reward)
optimizer.zero_grad()
loss.backward()
optimizer.step()
for episode in tqdm(range(MAX_EPISODES)):
experiences, rewards, done = interact_with_environment()
scores_window.append(rewards)
ep_len = len(experiences)
score.append(ep_len)
if done:
train_model(experiences)
if episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(episode, np.mean(scores_window)))
print(actions_taken)
if np.mean(scores_window) >= 195:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(episode - 100,
np.mean(scores_window)))
torch.save(model.state_dict(), 'checkpoint.pth')
break
score = np.array(score)
avg_score = running_mean(score, 50)
plt.figure(figsize=(10,7))
plt.ylabel("Episode Duration",fontsize=22)
plt.xlabel("Training Epochs",fontsize=22)
plt.plot(avg_score, color='green')
| 0.897499 | 0.967256 |
```
import sdgym
from sdgym import load_dataset
from sdgym import benchmark
from sdgym import load_dataset
from timeit import default_timer as timer
from functools import partial
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
from synthsonic.models.kde_utils import kde_smooth_peaks_1dim, kde_smooth_peaks
from sklearn.model_selection import train_test_split
import pgmpy
from pgmpy.models import BayesianModel
from pgmpy.estimators import TreeSearch
from pgmpy.estimators import HillClimbSearch, BicScore, ExhaustiveSearch
from pgmpy.estimators import BayesianEstimator
from pgmpy.sampling import BayesianModelSampling
import xgboost as xgb
from random import choices
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
import xgboost as xgb
from sklearn.svm import SVC
from sklearn.isotonic import IsotonicRegression
from scipy import interpolate
%matplotlib inline
data, categorical_columns, ordinal_columns = load_dataset('insurance')
data.shape
data
df = pd.DataFrame(data)
df.columns = [str(i) for i in df.columns]
# learn graph structure (preferred - fast)
est = TreeSearch(df, root_node=df.columns[0])
dag = est.estimate(estimator_type="tan", class_node='1')
# alternative graph structure
if False:
est2 = TreeSearch(df, root_node=df.columns[0])
dag2 = est2.estimate(estimator_type="chow-liu")
# alternative graph structure (slow)
if False:
est = HillClimbSearch(df)
best_model = est.estimate() # start_dag=dag)
nx.draw(best_model, with_labels=True, arrowsize=30, node_size=800, alpha=0.3, font_weight='bold')
plt.show()
edges = best_model.edges()
edges
# there are many choices of parametrization, here is one example
model = BayesianModel(best_model.edges())
model.fit(df, estimator=BayesianEstimator, prior_type='dirichlet', pseudo_counts=0.1)
print(model.get_cpds('2'))
# set up train-test sample.
# the test sample is used to calibrate the output of the classifier
random_state = 0
X1_train, X1_test, y1_train, y1_test = train_test_split(data, np.ones(data.shape[0]), test_size=0.35,
random_state=random_state)
X1_train.shape
if False:
clf = MLPClassifier(random_state=0, max_iter=1000, early_stopping=True)
if True:
clf = xgb.XGBClassifier(
n_estimators=250,
reg_lambda=1,
gamma=0,
max_depth=9
)
n_one = len(X1_train)
n_zero = n_one
np.random.seed(seed = 0)
# sample data from BN
inference = BayesianModelSampling(model)
df_data = inference.forward_sample(size=n_zero, return_type='dataframe')
df_data.columns = [int(c) for c in df_data.columns]
X0_train = df_data[sorted(df_data.columns)].values
zeros = np.zeros(n_zero)
ones = np.ones(n_one)
yy = np.concatenate([zeros, ones], axis = 0)
XX = np.concatenate([X0_train, X1_train], axis = 0)
clf = clf.fit(XX, yy)
# calibrate the probabilities, using the test sample and a new null sample
np.random.seed(10)
df_data = inference.forward_sample(size=250000, return_type='dataframe')
df_data.columns = [int(c) for c in df_data.columns]
X0_test = df_data[sorted(df_data.columns)].values
p0 = clf.predict_proba(X0_test)[:, 1]
p1 = clf.predict_proba(X1_test)[:, 1]
nbins = 100
plt.figure(figsize=(12,7))
plt.hist(p0, bins=100, range=(0,1), alpha=0.5, log=True, density=True);
plt.hist(p1, bins=100, range=(0,1), alpha=0.5, log=True, density=True);
nbins = 100
binning = np.linspace(0, 1, nbins+1)
hist_p0, bin_edges = np.histogram(p0, binning)
hist_p1, bin_edges = np.histogram(p1, binning)
def poisson_uncertainty(n):
sigman = np.sqrt(n)
# correct poisson counts of zero.
sigman[sigman == 0] = 1.
return sigman
def fraction_and_uncertainty(a, b, sigma_a, sigma_b):
absum = a+b
#frac_a = np.divide(a, absum, out=np.zeros_like(a), where=(absum) != 0)
#frac_b = np.divide(b, absum, out=np.zeros_like(b), where=(absum) != 0)
frac_a = a / (a + b)
frac_b = b / (a + b)
sigma_fa2 = np.power(frac_b * sigma_a, 2) / np.power(a + b, 2) + np.power(frac_a * sigma_b, 2) / np.power(a + b, 2)
return frac_a, np.sqrt(sigma_fa2)
rest_p0 = np.sum(hist_p0) - hist_p0
rest_p1 = np.sum(hist_p1) - hist_p1
sigma_bin0 = poisson_uncertainty(hist_p0)
sigma_rest0 = poisson_uncertainty(rest_p0)
sigma_bin1 = poisson_uncertainty(hist_p1)
sigma_rest1 = poisson_uncertainty(rest_p1)
frac0, sigma_frac0 = fraction_and_uncertainty(hist_p0, rest_p0, sigma_bin0, sigma_rest0)
frac1, sigma_frac1 = fraction_and_uncertainty(hist_p1, rest_p1, sigma_bin1, sigma_rest1)
p1calib, sigma_p1calib = fraction_and_uncertainty(frac1, frac0, sigma_frac1, sigma_frac0)
sample_weight = 1 / (sigma_p1calib * sigma_p1calib)
sample_weight /= min(sample_weight)
#sample_weight
if True:
# we recalibrate per probability bin. NO interpolation (not valid in highest bin)
#hist_p0, bin_edges = np.histogram(p0, bins=nbins, range=(0, 1))
#hist_p1, bin_edges = np.histogram(p2, bins=nbins, range=(0, 1)) #### !!!! p2
bin_centers = bin_edges[:-1] + 0.5/nbins
hnorm_p0 = hist_p0 / sum(hist_p0)
hnorm_p1 = hist_p1 / sum(hist_p1)
hnorm_sum = hnorm_p0 + hnorm_p1
p1cb = np.divide(hnorm_p1, hnorm_sum, out=np.zeros_like(hnorm_p1), where=hnorm_sum != 0)
# self.p1cb = p1cb, bin_centers
# use isotonic regression to smooth out potential fluctuations in the p1 values
# isotonic regression assumes that p1 can only be a rising function.
# I’m assuming that if a classifier predicts a higher probability, the calibrated probability
# will also be higher. This may not always be right, but I think generally it is a safe one.
iso_reg = IsotonicRegression(y_min=0, y_max=1).fit(bin_centers, p1calib, sample_weight)
p1pred = iso_reg.predict(bin_centers)
# calibrated probabilities
p1f_ = interpolate.interp1d(bin_edges[:-1], p1pred, kind='previous', bounds_error=False, fill_value="extrapolate")
p1pred = p1f_(bin_centers)
p1lin = p1f_(bin_centers)
plt.figure(figsize=(12,7))
#plt.plot(bin_centers, p1cb)
plt.plot(bin_centers, p1pred)
plt.plot(bin_centers, bin_centers)
plt.plot(bin_centers, p1lin)
maxp1 = p1f_(0.995)
max_weight = maxp1 / (1. - maxp1)
max_weight
# validation - part 1: check if reweighting works okay
from pgmpy.sampling import BayesianModelSampling
np.random.seed(1)
# sample data from BN
inference = BayesianModelSampling(model)
df_data = inference.forward_sample(size=250000, return_type='dataframe')
df_data.columns = [int(c) for c in df_data.columns]
X_test = df_data[sorted(df_data.columns)].values
p0 = clf.predict_proba(X_test)[:, 1]
nominator = p1f_(p0)
denominator = 1 - nominator
weight = np.divide(nominator, denominator, out=np.ones_like(nominator), where=denominator != 0)
len(X_test), sum(weight)
if False:
keep = weight == max_weight
same = weight != max_weight
ratio = (250000 - np.sum(weight[same])) / np.sum(weight[keep])
np.sum(weight[same]), np.sum(weight[keep])
plt.hist(weight, bins=100, log=True);
#data, sample_weights = self._sample_no_transform(n_samples, random_state)
pop = np.asarray(range(X_test.shape[0]))
probs = weight/np.sum(weight)
sample = choices(pop, probs, k=X_test.shape[0])
Xtrans = X_test[sample]
p0 = clf.predict_proba(Xtrans)[:, 1]
p1 = clf.predict_proba(X1_test)[:, 1]
plt.figure(figsize=(12,7))
plt.hist(p0, bins=100, range=(0,1), alpha=0.5, density=True); #, weights=weight)#, log=True)
plt.hist(p1, bins=100, range=(0,1), alpha=0.5, density=True);
# validation - part 2: plot distributions
i = 1
plt.figure(figsize=(12,7))
plt.hist(X_test[:, i], bins=100, range=(0,1), alpha=0.5, density=True);#, log=True)
plt.hist(X1_test[:, i], bins=100, range=(0,1), alpha=0.5, density=True);
# validation part 3: check number of duplicates
np.random.seed(2)
df_data = inference.forward_sample(size=500000, return_type='dataframe')
df_data.columns = [int(c) for c in df_data.columns]
X10k = df_data[sorted(df_data.columns)].values
p0 = clf.predict_proba(X10k)[:, 1]
nominator = p1f_(p0)
denominator = 1 - nominator
weight = np.divide(nominator, denominator, out=np.ones_like(nominator), where=denominator != 0)
sum(weight)
pop = np.asarray(range(X10k.shape[0]))
probs = weight/np.sum(weight)
sample = choices(pop, probs, k=X10k.shape[0])
Xtrans = X10k[sample]
u, c = np.unique(Xtrans, axis=0, return_counts=True)
counts = np.sort(c)[::-1] / 50
counts
u, c = np.unique(data, axis=0, return_counts=True)
c2 = np.sort(c)[::-1]
plt.figure(figsize=(12,7))
plt.bar(list(range(40)), c2[:40], alpha=0.5)
plt.bar(list(range(40)), counts[:40], alpha=0.5)
# run sdgym
df = pd.DataFrame(Xtrans)
df.to_csv('test.csv', index=False)
def KDECopulaNNPdf_RoundCategorical(real_data, categorical_columns, ordinal_columns, times=None):
df = pd.read_csv('test.csv')
data = df.values[:50000]
return data
import sdgym
scores = sdgym.run(synthesizers=KDECopulaNNPdf_RoundCategorical, datasets=['insurance'])
scores
```
|
github_jupyter
|
import sdgym
from sdgym import load_dataset
from sdgym import benchmark
from sdgym import load_dataset
from timeit import default_timer as timer
from functools import partial
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
from synthsonic.models.kde_utils import kde_smooth_peaks_1dim, kde_smooth_peaks
from sklearn.model_selection import train_test_split
import pgmpy
from pgmpy.models import BayesianModel
from pgmpy.estimators import TreeSearch
from pgmpy.estimators import HillClimbSearch, BicScore, ExhaustiveSearch
from pgmpy.estimators import BayesianEstimator
from pgmpy.sampling import BayesianModelSampling
import xgboost as xgb
from random import choices
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
import xgboost as xgb
from sklearn.svm import SVC
from sklearn.isotonic import IsotonicRegression
from scipy import interpolate
%matplotlib inline
data, categorical_columns, ordinal_columns = load_dataset('insurance')
data.shape
data
df = pd.DataFrame(data)
df.columns = [str(i) for i in df.columns]
# learn graph structure (preferred - fast)
est = TreeSearch(df, root_node=df.columns[0])
dag = est.estimate(estimator_type="tan", class_node='1')
# alternative graph structure
if False:
est2 = TreeSearch(df, root_node=df.columns[0])
dag2 = est2.estimate(estimator_type="chow-liu")
# alternative graph structure (slow)
if False:
est = HillClimbSearch(df)
best_model = est.estimate() # start_dag=dag)
nx.draw(best_model, with_labels=True, arrowsize=30, node_size=800, alpha=0.3, font_weight='bold')
plt.show()
edges = best_model.edges()
edges
# there are many choices of parametrization, here is one example
model = BayesianModel(best_model.edges())
model.fit(df, estimator=BayesianEstimator, prior_type='dirichlet', pseudo_counts=0.1)
print(model.get_cpds('2'))
# set up train-test sample.
# the test sample is used to calibrate the output of the classifier
random_state = 0
X1_train, X1_test, y1_train, y1_test = train_test_split(data, np.ones(data.shape[0]), test_size=0.35,
random_state=random_state)
X1_train.shape
if False:
clf = MLPClassifier(random_state=0, max_iter=1000, early_stopping=True)
if True:
clf = xgb.XGBClassifier(
n_estimators=250,
reg_lambda=1,
gamma=0,
max_depth=9
)
n_one = len(X1_train)
n_zero = n_one
np.random.seed(seed = 0)
# sample data from BN
inference = BayesianModelSampling(model)
df_data = inference.forward_sample(size=n_zero, return_type='dataframe')
df_data.columns = [int(c) for c in df_data.columns]
X0_train = df_data[sorted(df_data.columns)].values
zeros = np.zeros(n_zero)
ones = np.ones(n_one)
yy = np.concatenate([zeros, ones], axis = 0)
XX = np.concatenate([X0_train, X1_train], axis = 0)
clf = clf.fit(XX, yy)
# calibrate the probabilities, using the test sample and a new null sample
np.random.seed(10)
df_data = inference.forward_sample(size=250000, return_type='dataframe')
df_data.columns = [int(c) for c in df_data.columns]
X0_test = df_data[sorted(df_data.columns)].values
p0 = clf.predict_proba(X0_test)[:, 1]
p1 = clf.predict_proba(X1_test)[:, 1]
nbins = 100
plt.figure(figsize=(12,7))
plt.hist(p0, bins=100, range=(0,1), alpha=0.5, log=True, density=True);
plt.hist(p1, bins=100, range=(0,1), alpha=0.5, log=True, density=True);
nbins = 100
binning = np.linspace(0, 1, nbins+1)
hist_p0, bin_edges = np.histogram(p0, binning)
hist_p1, bin_edges = np.histogram(p1, binning)
def poisson_uncertainty(n):
sigman = np.sqrt(n)
# correct poisson counts of zero.
sigman[sigman == 0] = 1.
return sigman
def fraction_and_uncertainty(a, b, sigma_a, sigma_b):
absum = a+b
#frac_a = np.divide(a, absum, out=np.zeros_like(a), where=(absum) != 0)
#frac_b = np.divide(b, absum, out=np.zeros_like(b), where=(absum) != 0)
frac_a = a / (a + b)
frac_b = b / (a + b)
sigma_fa2 = np.power(frac_b * sigma_a, 2) / np.power(a + b, 2) + np.power(frac_a * sigma_b, 2) / np.power(a + b, 2)
return frac_a, np.sqrt(sigma_fa2)
rest_p0 = np.sum(hist_p0) - hist_p0
rest_p1 = np.sum(hist_p1) - hist_p1
sigma_bin0 = poisson_uncertainty(hist_p0)
sigma_rest0 = poisson_uncertainty(rest_p0)
sigma_bin1 = poisson_uncertainty(hist_p1)
sigma_rest1 = poisson_uncertainty(rest_p1)
frac0, sigma_frac0 = fraction_and_uncertainty(hist_p0, rest_p0, sigma_bin0, sigma_rest0)
frac1, sigma_frac1 = fraction_and_uncertainty(hist_p1, rest_p1, sigma_bin1, sigma_rest1)
p1calib, sigma_p1calib = fraction_and_uncertainty(frac1, frac0, sigma_frac1, sigma_frac0)
sample_weight = 1 / (sigma_p1calib * sigma_p1calib)
sample_weight /= min(sample_weight)
#sample_weight
if True:
# we recalibrate per probability bin. NO interpolation (not valid in highest bin)
#hist_p0, bin_edges = np.histogram(p0, bins=nbins, range=(0, 1))
#hist_p1, bin_edges = np.histogram(p2, bins=nbins, range=(0, 1)) #### !!!! p2
bin_centers = bin_edges[:-1] + 0.5/nbins
hnorm_p0 = hist_p0 / sum(hist_p0)
hnorm_p1 = hist_p1 / sum(hist_p1)
hnorm_sum = hnorm_p0 + hnorm_p1
p1cb = np.divide(hnorm_p1, hnorm_sum, out=np.zeros_like(hnorm_p1), where=hnorm_sum != 0)
# self.p1cb = p1cb, bin_centers
# use isotonic regression to smooth out potential fluctuations in the p1 values
# isotonic regression assumes that p1 can only be a rising function.
# I’m assuming that if a classifier predicts a higher probability, the calibrated probability
# will also be higher. This may not always be right, but I think generally it is a safe one.
iso_reg = IsotonicRegression(y_min=0, y_max=1).fit(bin_centers, p1calib, sample_weight)
p1pred = iso_reg.predict(bin_centers)
# calibrated probabilities
p1f_ = interpolate.interp1d(bin_edges[:-1], p1pred, kind='previous', bounds_error=False, fill_value="extrapolate")
p1pred = p1f_(bin_centers)
p1lin = p1f_(bin_centers)
plt.figure(figsize=(12,7))
#plt.plot(bin_centers, p1cb)
plt.plot(bin_centers, p1pred)
plt.plot(bin_centers, bin_centers)
plt.plot(bin_centers, p1lin)
maxp1 = p1f_(0.995)
max_weight = maxp1 / (1. - maxp1)
max_weight
# validation - part 1: check if reweighting works okay
from pgmpy.sampling import BayesianModelSampling
np.random.seed(1)
# sample data from BN
inference = BayesianModelSampling(model)
df_data = inference.forward_sample(size=250000, return_type='dataframe')
df_data.columns = [int(c) for c in df_data.columns]
X_test = df_data[sorted(df_data.columns)].values
p0 = clf.predict_proba(X_test)[:, 1]
nominator = p1f_(p0)
denominator = 1 - nominator
weight = np.divide(nominator, denominator, out=np.ones_like(nominator), where=denominator != 0)
len(X_test), sum(weight)
if False:
keep = weight == max_weight
same = weight != max_weight
ratio = (250000 - np.sum(weight[same])) / np.sum(weight[keep])
np.sum(weight[same]), np.sum(weight[keep])
plt.hist(weight, bins=100, log=True);
#data, sample_weights = self._sample_no_transform(n_samples, random_state)
pop = np.asarray(range(X_test.shape[0]))
probs = weight/np.sum(weight)
sample = choices(pop, probs, k=X_test.shape[0])
Xtrans = X_test[sample]
p0 = clf.predict_proba(Xtrans)[:, 1]
p1 = clf.predict_proba(X1_test)[:, 1]
plt.figure(figsize=(12,7))
plt.hist(p0, bins=100, range=(0,1), alpha=0.5, density=True); #, weights=weight)#, log=True)
plt.hist(p1, bins=100, range=(0,1), alpha=0.5, density=True);
# validation - part 2: plot distributions
i = 1
plt.figure(figsize=(12,7))
plt.hist(X_test[:, i], bins=100, range=(0,1), alpha=0.5, density=True);#, log=True)
plt.hist(X1_test[:, i], bins=100, range=(0,1), alpha=0.5, density=True);
# validation part 3: check number of duplicates
np.random.seed(2)
df_data = inference.forward_sample(size=500000, return_type='dataframe')
df_data.columns = [int(c) for c in df_data.columns]
X10k = df_data[sorted(df_data.columns)].values
p0 = clf.predict_proba(X10k)[:, 1]
nominator = p1f_(p0)
denominator = 1 - nominator
weight = np.divide(nominator, denominator, out=np.ones_like(nominator), where=denominator != 0)
sum(weight)
pop = np.asarray(range(X10k.shape[0]))
probs = weight/np.sum(weight)
sample = choices(pop, probs, k=X10k.shape[0])
Xtrans = X10k[sample]
u, c = np.unique(Xtrans, axis=0, return_counts=True)
counts = np.sort(c)[::-1] / 50
counts
u, c = np.unique(data, axis=0, return_counts=True)
c2 = np.sort(c)[::-1]
plt.figure(figsize=(12,7))
plt.bar(list(range(40)), c2[:40], alpha=0.5)
plt.bar(list(range(40)), counts[:40], alpha=0.5)
# run sdgym
df = pd.DataFrame(Xtrans)
df.to_csv('test.csv', index=False)
def KDECopulaNNPdf_RoundCategorical(real_data, categorical_columns, ordinal_columns, times=None):
df = pd.read_csv('test.csv')
data = df.values[:50000]
return data
import sdgym
scores = sdgym.run(synthesizers=KDECopulaNNPdf_RoundCategorical, datasets=['insurance'])
scores
| 0.653348 | 0.459622 |
# Estimating a mixed Gaussian distribution
```
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from dpmmlearn import DPMM
from dpmmlearn.probability import GaussianMeanKnownVariance, NormInvChi2, NormInvGamma
```
## Prepare data
```
def gaussian_sample(mu_list, var_list, size, portion, random_seed=0):
np.random.seed(random_seed)
nums = np.random.multinomial(size, portion)
xs = []
for mu, var, num in zip(mu_list, var_list, nums):
x = np.random.normal(loc=mu, scale=np.sqrt(var), size=num)
xs.append(x)
xs = np.concatenate(xs)
return xs
def sample_pdf(x, mu_list, var_list, portion):
y = np.zeros_like(x)
for mu, var, num in zip(mu_list, var_list, portion):
y_ = np.exp(-0.5*(x-mu)**2/var)/np.sqrt(2*np.pi*var)
y += num/sum(portion) * y_
return y
def result_pdf(x, thetas, n_labels, sigsqr):
y = np.zeros_like(x)
for theta, n_label in zip(thetas, n_labels):
mu = theta
y_ = np.exp(-0.5*(x-mu)**2/sigsqr)/np.sqrt(2*np.pi*sigsqr)
y += n_label/sum(n_labels) * y_
return y
size = 400
mu_list = [-0.5, 0.0, 0.7] # means
var_list = [0.02, 0.03, 0.1] # variances
portion = [0.25, 0.4, 0.35] # proportions
X = gaussian_sample(mu_list, var_list, size, portion)
sns.histplot(X, bins=20,kde=True)
plt.show()
```
## known variance model
we'll infer a Gaussian mixture model where the components all have known variance but unknown means.
```
mu_0 = 0.0
sigsqr_0 = 1.0
sigsqr = 0.05
prob = GaussianMeanKnownVariance(mu_0, sigsqr_0, sigsqr)
alpha = 0.1
model = DPMM(prob, alpha, max_iter=100)
model.fit(X)
print(mu_list, portion)
print(model.thetas_, [k/sum(model.n_labels_) for k in model.n_labels_])
x = np.arange(-2, 2, 0.01)
y_true = sample_pdf(x, mu_list, var_list, portion)
y_pred = result_pdf(x, model.thetas_, model.n_labels_, sigsqr)
plt.plot(x, y_true, label='true')
plt.plot(x, y_pred, label='pred')
plt.legend()
plt.show()
```
## Modeling mean and variance
Letting the component means to float, let the component variances also float.
```
mu_0 = 0.3
kappa_0 = 0.1
sigsqr_0 = 0.1
nu_0 = 1.0
prob = NormInvChi2(mu_0, kappa_0, sigsqr_0, nu_0)
alpha = 1.0
model2 = DPMM(prob, alpha, max_iter=500)
model2.fit(X)
print(list(zip(mu_list, var_list)), portion)
print(model2.thetas_, [k/sum(model2.n_labels_) for k in model2.n_labels_])
mu_ = [mu for mu, var in model2.thetas_]
var_ = [var for mu, var in model2.thetas_]
portion_ = [k/sum(model2.n_labels_) for k in model2.n_labels_]
y_pred = sample_pdf(x, mu_, var_, portion_)
plt.plot(x, y_true, label='true')
plt.plot(x, y_pred, label='pred')
plt.legend()
plt.show()
```
## Normal-inverse-Gamm Prior
Should be able to get statistically identical results from a NIG prior with corresponding hyperparameters.
```
m_0 = mu_0
V_0 = 1. / kappa_0
a_0 = nu_0 / 2.0
b_0 = nu_0 * sigsqr_0 / 2.0
prob = NormInvGamma(m_0, V_0, a_0, b_0)
alpha = 1.0
model3 = DPMM(prob, alpha, max_iter=100)
model3.fit(X)
print(list(zip(mu_list, var_list)), portion)
print(model3.thetas_, [k/sum(model3.n_labels_) for k in model3.n_labels_])
mu_ = [mu for mu, var in model2.thetas_]
var_ = [var for mu, var in model2.thetas_]
portion_ = [k/sum(model2.n_labels_) for k in model2.n_labels_]
y_pred = sample_pdf(x, mu_, var_, portion_)
plt.plot(x, y_true, label='true')
plt.plot(x, y_pred, label='pred')
plt.legend()
plt.show()
```
|
github_jupyter
|
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from dpmmlearn import DPMM
from dpmmlearn.probability import GaussianMeanKnownVariance, NormInvChi2, NormInvGamma
def gaussian_sample(mu_list, var_list, size, portion, random_seed=0):
np.random.seed(random_seed)
nums = np.random.multinomial(size, portion)
xs = []
for mu, var, num in zip(mu_list, var_list, nums):
x = np.random.normal(loc=mu, scale=np.sqrt(var), size=num)
xs.append(x)
xs = np.concatenate(xs)
return xs
def sample_pdf(x, mu_list, var_list, portion):
y = np.zeros_like(x)
for mu, var, num in zip(mu_list, var_list, portion):
y_ = np.exp(-0.5*(x-mu)**2/var)/np.sqrt(2*np.pi*var)
y += num/sum(portion) * y_
return y
def result_pdf(x, thetas, n_labels, sigsqr):
y = np.zeros_like(x)
for theta, n_label in zip(thetas, n_labels):
mu = theta
y_ = np.exp(-0.5*(x-mu)**2/sigsqr)/np.sqrt(2*np.pi*sigsqr)
y += n_label/sum(n_labels) * y_
return y
size = 400
mu_list = [-0.5, 0.0, 0.7] # means
var_list = [0.02, 0.03, 0.1] # variances
portion = [0.25, 0.4, 0.35] # proportions
X = gaussian_sample(mu_list, var_list, size, portion)
sns.histplot(X, bins=20,kde=True)
plt.show()
mu_0 = 0.0
sigsqr_0 = 1.0
sigsqr = 0.05
prob = GaussianMeanKnownVariance(mu_0, sigsqr_0, sigsqr)
alpha = 0.1
model = DPMM(prob, alpha, max_iter=100)
model.fit(X)
print(mu_list, portion)
print(model.thetas_, [k/sum(model.n_labels_) for k in model.n_labels_])
x = np.arange(-2, 2, 0.01)
y_true = sample_pdf(x, mu_list, var_list, portion)
y_pred = result_pdf(x, model.thetas_, model.n_labels_, sigsqr)
plt.plot(x, y_true, label='true')
plt.plot(x, y_pred, label='pred')
plt.legend()
plt.show()
mu_0 = 0.3
kappa_0 = 0.1
sigsqr_0 = 0.1
nu_0 = 1.0
prob = NormInvChi2(mu_0, kappa_0, sigsqr_0, nu_0)
alpha = 1.0
model2 = DPMM(prob, alpha, max_iter=500)
model2.fit(X)
print(list(zip(mu_list, var_list)), portion)
print(model2.thetas_, [k/sum(model2.n_labels_) for k in model2.n_labels_])
mu_ = [mu for mu, var in model2.thetas_]
var_ = [var for mu, var in model2.thetas_]
portion_ = [k/sum(model2.n_labels_) for k in model2.n_labels_]
y_pred = sample_pdf(x, mu_, var_, portion_)
plt.plot(x, y_true, label='true')
plt.plot(x, y_pred, label='pred')
plt.legend()
plt.show()
m_0 = mu_0
V_0 = 1. / kappa_0
a_0 = nu_0 / 2.0
b_0 = nu_0 * sigsqr_0 / 2.0
prob = NormInvGamma(m_0, V_0, a_0, b_0)
alpha = 1.0
model3 = DPMM(prob, alpha, max_iter=100)
model3.fit(X)
print(list(zip(mu_list, var_list)), portion)
print(model3.thetas_, [k/sum(model3.n_labels_) for k in model3.n_labels_])
mu_ = [mu for mu, var in model2.thetas_]
var_ = [var for mu, var in model2.thetas_]
portion_ = [k/sum(model2.n_labels_) for k in model2.n_labels_]
y_pred = sample_pdf(x, mu_, var_, portion_)
plt.plot(x, y_true, label='true')
plt.plot(x, y_pred, label='pred')
plt.legend()
plt.show()
| 0.579519 | 0.920718 |
<a href="https://colab.research.google.com/github/juunnn/DataScienceTutorial/blob/master/Pengenalan_Data_Science.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Pengenalan Proses Data Science
## Main Process
Jadi kan proses dalam data science itu biasa dibagi menjadi 5 proses.
1. Pemahaman Bisnis
2. Preprocessing Data
3. Modeling
4. Evaluation
5. Deployment
Nah biasanya Pemahaman bisnis itu gak bisa dilakukan sendiri oleh data scientis (kita) aja, tapi harus melibatkan orang-orang yang paham mengenai bisnis. Bisnis disini bukan cuma bisnis yang menghasilkan uang, tapi bisnis yang tidak menghasilkan uang tapi memiliki tujuan keorganisasian. Pada tahap ini biasanya kita harus didampingi oleh orang bisnis atau bisa juga disebut *domain expert*, orang yang paham mengenai bisnis tersebut. Misalnya kalau kita mau membuat sentimen analisis menggunakan text mining tentang gojek atau grab, kita bisa cari twitter yang berkaitan tentang gojek dan grab, tapi untuk menentukan sentimen dari twitter tersebut, kita gak bisa sembarangan menentukan sentimen dari pendapat pribadi, harus ada ahli bahasa atau ahli twitter yang memang kerjaannya fokus untuk menentukan bahwa kalimat di twitter tersebut memiliki sentimen positif atau negatif.
Kalau Preprocessing Data ini yang agak kompleks, karena kita bisa menjelaskan teknik-tekniknya, tapi kita gak bisa menentukan teknik apa yang seharusnya dipake. Setiap data tidak bisa diperlakukan secara sama, tetapi masing-masing data harus diperlakukan sesuai dengan keadaan data tersebut dan sesuai dengan tujuan bisnis. Ini akan kita bahas nanti di segmen khusus ya, untuk saat ini kita akan fokus ke main proses aja dulu.
Main proses dalam data science, machine learning atau data mining itu hampir mirip. Intinya kita mau buat model untuk melakukan _task_ (klasifikasi, klustering atau regresi). Proses ini biasanya dilakukan secara iteratif atau berulang-ulang. Jadi jarang banget kita buat suatu model, dilatih terus jadi. Itu jarang banget terjadi, dan kalaupun terjadi seperti itu, kita harus curiga sama data yang kita punya.
Di tutorial ini kita akan melakukan klasifikasi yang biasa dulu deh ya, kita coba pake data yang sudah ada. Oke kita siapkan bahan-bahannya dulu ya!
# Siapkan bahan
Nah sebelum mulai, pastikan bahwa python kamu sudah tersinstall. Kemudian pastikan lagi bahwa **pip** atau **pip3** kamu sudah terinstall.
Kalau sudah, ini package yang harus ada sebelum kita bisa mulai.
1. pandas
2. numpy
3. sklearn
4. matplotlib
5. seaborn
## Pandas
Pandas ini berguna untuk mengolah data yang kita punya. Misal kita punya punya data lokal yang bertipe csv, xls atau h5. Dengan pandas kita dapat mengambil data itu dengan mudah, tidak perlu tools tambahan lainnya. Selain itu juga, pandas biasa berguna untuk melakukan manipulasi terhadap atribut yang kita punya, bisa untuk feature engineering juga. Bahasan itu nanti kita bahas di segmen preprocesing ya. Untuk saat ini kita berasumsi bahwa data yang akan digunakan sudah melewati fase preprocessing, jadi sudah siap untuk digunakan. Kalau data yang kita punya biasanya belum melalui proses preprocesing, jadi gak bisa asal masukkin aja. Sip
## Numpy
Di python, kita gak kenal yang namanya array, adanya list, itu juga gak bisa dianggap mirip, karena sifatnya beda sama array. Oleh sebab itu, kita biasa menggunakan numpy untuk melakukan operasi array itu. Tau kan array itu apa? kalau dibahasa matematika itu disebut vector. Tapi gak cuma vector aja, numpy juga bisa untuk merepresentasikan matriks dan tensor. Kalau secara teknis, numpy biasa digunakan untuk aljabar linear, tapi tenang aja, kita gak akan terlalu dalam membahas aljabar linearnya kok. Paling pengenalan yang sering dipakai aja di data science ini.
__Fun Fact__
Vektor itu adalah satu baris bilangan atau matrix dengan ukuran 1xn,
Matriks itu adalah sekumpuluan bilangan yang membentuk segi empat, jadi ukurannya nxm,
nah kalau tensor itu perluasan dari matriks, jadi kalau kita punya matriks yang dimensinya lebih dari 2, bisa disebut tensor.
## SKlearn
Nah ini inti dari tools data science kita, SKLearn itu sebuah modul atau package yang berisi algoritma data science siap pakai. Jadi kalau kita mau pakai algoritma SVM, Regresi Linear atau semacamnya itu kita harus ada ini. Tapi gak cuma algoritma data science aja sih, sklearn juga punya yang tools pembantu seperti evaluasi, spliting data scaling dan sebagainya. (apa pula itu, nanti kita bahas)
## Matplotlib & Seaborn
Perlu kita ingat bahwa data science itu gak bisa berdiri sendiri, dan gak bisa cuma dilakukan sama orang IT, tapi juga melibatkan orang bisnis dan eksekutif. Untuk orang-orang yang sudah terbiasa liat layar komputer lama seperti kita mungkin gak masalah ketika harus liat banyak baris kode dan matriks yang panjang, tapi orang bisnis gak gitu. Nah kita gak mau dong, apa yang kita lakukan dengan perjuangan panjang seperti ini pada akhirnya akan berbuah duka, karena orang bisnis gak ngerti apa yang kita bahas. Makanya kita harus menyajikan hasil yang kita dapat itu secara lebih universal, salah satu caranya dengan menggunakan grafik atau visual. Matplotlib dan Seaborn sering digunakan untuk melakukan itu, jadi biar apa yang kita buat bisa dipahami sama orang yang backgroundnya bukan IT juga.
Oke segitu dulu intronya, sekarang kita masuk ke prosesnya ya.
# Proses Modeling
## 1. Masukkan semua bahan
Bahan-bahan yang kita punya kemudian harus kita masukkan ke dalam environtment yang sedang kita gunakan. caranya gampang, tinggal import aja selesai
```
import pandas as pd
import numpy as np
import sklearn as skl
import matplotlib.pyplot as plt
import seaborn as sns
```
## 2. Buat dataset
Data yang tadi kita sudah lakuakan proses preprocesing, kemudian kita masukkan ke dalam environtment. Caranya gampang, kita gunakan pandas.
```
df = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv",sep=";")
df.head() # Melihat 5 data pertama
```
**df** itu sebagai variable tempat semua data kita disimpan. Setelah masuk proses ini istilah data itu berubah menjadi dataset (himpunan data) karena istilah ini akan lebih sering digunakan daripada istilah data. Dari dataset ini akan kita bagi menjadi dua yaitu data latih (training set) dan data uji (testing set).
Data latih digunakan pada saat kita melakukan proses latihan modeling menggunakan algoritma yang kita pilih. Biasanya jumlah data latih lebih banyak dari data uji.
Data uji sendiri digunakan saat kita akan melakukan pengukuran dari model yang kita buat. Teknik pengukurannya kita bahas di bawah
Biasanya kita bagi data latih dan data uji itu antara 70:30 atau 80:20, untuk sekarang kita coba pakai 75:25. Untuk itu kita lakukan dengan bantuan sklearn
```
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(df, df.quality, train_size=0.75)
print("kita memiliki data latih sebesar: ",len(x_train))
print("kita memiliki data uji sebesar: ",len(x_test))
```
x_train dan y_train itu kita gunakan untuk latihan model.
## 3. Latih model
Proses pelatihan model ini dilakukan secara otomatis oleh sklearn, jadi prosesnya gak keliatan. Intinya, kita bisa melakukan pelatihan model dengan menggunakan algoritma yang berbeda tanpa harus paham mengenai proses yang berjalan di belakang layar. caranya pertama kita harus definiskan model, terus dari model itu kita
```
from sklearn.svm import SVR
model = SVR(gamma="scale")#definisi model
model.fit(x_train, y_train)
```
## 4. Testing model (Scoring)
Testing atau scoring kita gunakan sesuai dengan tujuan dan algoritma yang digunakan. Ada beberapa ukuran yang biasa digunakan, pertama akurasi, confussion_matrix, precission dan recall itu biasa digunakan untuk klasifikasi, ada juga Mean Square Error (MSE) dan Mean Error (ME) itu untuk regresi, untuk klastering biasanya kita gunakan distance. Karena kita sedang melakukan proses klasifikasi, kita coba pakai akurasi ya. caranya gampang
```
model.score(x_test,y_test)
```
Nah proses utama (modeling) dalam data science itu cuma segini aja, gampang sih diterapkan, tapi agak sulit ketika kita harus membuat model dengan akurasi yang tinggi. Proses ini biasanya diulang-ulang dengan mengubah beberapa parameter. Bisa mengubah perbandingan antara data uji dg data latih, bisa juga mengubah parameter ketika kita definisikan model, atau malah mengubah dataset kita (lakukan preprocessing lagi). proses ini disebut _fine tuning_.
# Tantangan Untuk kamu
Coba bikin model dengan algoritma yang berbeda dan data yang berbeda
|
github_jupyter
|
import pandas as pd
import numpy as np
import sklearn as skl
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv",sep=";")
df.head() # Melihat 5 data pertama
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(df, df.quality, train_size=0.75)
print("kita memiliki data latih sebesar: ",len(x_train))
print("kita memiliki data uji sebesar: ",len(x_test))
from sklearn.svm import SVR
model = SVR(gamma="scale")#definisi model
model.fit(x_train, y_train)
model.score(x_test,y_test)
| 0.314893 | 0.866641 |
# Random Forest model train
```
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import roc_auc_score, roc_curve
import logging
import mlflow
from urllib.parse import urlparse
from sklearn.ensemble import RandomForestClassifier
from matplotlib import pyplot
from sklearn.model_selection import GridSearchCV
```
## Splitting dataset into train and test
```
def get_split_train_data():
"""Return a tuple containing split train data into X_train X_test, y_train and y_test."""
df = pd.read_csv('../../data/processed/processed_application_train.csv')
train, test = train_test_split(df)
X_train = train.drop(['TARGET'], axis=1)
X_test = test.drop(['TARGET'], axis=1)
y_train = train[['TARGET']]
y_test = test[['TARGET']]
return X_train, X_test, y_train, y_test
```
## Adding MLFLow workflow
### Configuring logs
```
def get_configured_logger():
"""Return a logger for console outputs configured to print warnings."""
logging.basicConfig(level=logging.WARN)
return logging.getLogger(__name__)
```
### Training model on split data
```
def train_random_forest_classifier(X_train, y_train):
"""Return RandomForestClassifier fit on input ndarrays X_train and y_train.
Keyword arguments:
X_train -- ndarray containing all train columns except target column
y_train -- ndarray target column values to train the model
"""
clf = RandomForestClassifier(class_weight='balanced', n_estimators=100)
grid_search = GridSearchCV(clf, {'max_depth': [10, 15], 'min_samples_split': [5, 10]}, n_jobs=-1, cv=5, scoring='accuracy')
grid_search.fit(X_train.values, y_train.values)
clf.set_params(**grid_search.best_params_)
clf = clf.fit(X_train, y_train)
return clf
```
### Getting model evaluation metrics
```
def eval_metrics(actual, pred):
"""Return a tuple containing model classification accuracy, confusion matrix, f1_score and precision score.
Keyword arguments:
actual -- ndarray y_test containing true target values
pred -- ndarray of the predicted target values by the model
"""
accuracy = accuracy_score(actual, pred)
conf_matrix = confusion_matrix(actual, pred)
f_score = f1_score(actual, pred)
precision = precision_score(actual, pred)
return accuracy, conf_matrix, f_score, precision
def get_model_evaluation_metrics(clf, X_test, y_test):
"""Return a tuple containing model classification accuracy, confusion matrix, f1_score, precision score and
ROC area under the curve score.
Keyword arguments:
clf -- classifier model
X_test -- ndarray containing all test columns except target column
y_test -- ndarray target column values to test the model
"""
predicted_repayments = clf.predict(X_test)
(accuracy, conf_matrix, f_score, precision) = eval_metrics(y_test, predicted_repayments)
rf_probs = clf.predict_proba(X_test)
rf_probs = rf_probs[:, 0] # keeping only the first class (repayment OK)
rf_roc_auc_score = roc_auc_score(y_test, rf_probs)
random_probs = [0 for _ in range(len(y_test))]
random_roc_auc_score = roc_auc_score(y_test, random_probs)
return accuracy, conf_matrix, f_score, precision, rf_roc_auc_score, random_roc_auc_score, rf_probs, random_probs
```
### Tracking model on MLFLow
```
def track_model_params(clf):
"""Log model params on MLFlow UI.
Keyword arguments:
clf -- classifier model
"""
clf_params = clf.get_params()
for param in clf_params:
param_value = clf_params[param]
mlflow.log_param(param, param_value)
```
## Vizualizing ROC AUC scores summaries for both Random Forest and Random model
```
def vizualize_roc_curves(rf_roc_auc_score, random_roc_auc_score, y_test, rf_probs, random_probs):
"""Vizualize ROC curves for both fit model and random model.
Keyword arguments:
rf_roc_auc_score -- fit model ROC AUC score
random_roc_auc_score -- random model ROC AUC score
y_test -- ndarray of target values
rf_probs -- fit model predicted probabilities
random_probs -- random model predicted probabilities
"""
# summarize scores
print('Random model: ROC AUC=%.3f' % random_roc_auc_score)
print('Random Forest: ROC AUC=%.3f' % rf_roc_auc_score)
# calculate roc curves
random_fpr, random_tpr, _ = roc_curve(y_test, random_probs)
rf_fpr, rf_tpr, _ = roc_curve(y_test, rf_probs)
# plot the roc curve for the model
pyplot.plot(random_fpr, random_tpr, linestyle='--', label='Random model')
pyplot.plot(rf_fpr, rf_tpr, marker='.', label='Random Forest')
# axis labels
pyplot.xlabel('False Positive Rate')
pyplot.ylabel('True Positive Rate')
# show the legend
pyplot.legend()
# show the plot
pyplot.show()
def track_model_metrics(clf, X_test, y_test):
"""Log model metrics on MLFlow UI.
Keyword arguments:
clf -- classifier model
X_test -- ndarray containing all test columns except target column
y_test -- ndarray target column values to test the model
"""
(accuracy, conf_matrix, f_score, precision, rf_roc_auc_score, random_roc_auc_score, rf_probs, random_probs) = \
get_model_evaluation_metrics(clf, X_test, y_test)
mlflow.log_metric('accuracy', accuracy)
mlflow.log_metric('f1_score', f_score)
mlflow.log_metric('precision', precision)
mlflow.log_metric('roc_auc_score', rf_roc_auc_score)
vizualize_roc_curves(rf_roc_auc_score, random_roc_auc_score, y_test, rf_probs, random_probs)
tn, fp, fn, tp = conf_matrix.ravel()
mlflow.log_metric('true_negatives', tn)
mlflow.log_metric('false_positives', fp)
mlflow.log_metric('false_negatives', fn)
mlflow.log_metric('true_positives', tp)
def track_model_version(clf):
"""Version model on MLFlow UI.
Keyword arguments:
clf -- classifier model
"""
tracking_url_type_store = urlparse(mlflow.get_tracking_uri()).scheme
if tracking_url_type_store != 'file':
mlflow.sklearn.log_model(clf, 'model', registered_model_name='RandomForestClassifier')
else:
mlflow.sklearn.log_model(clf, 'model')
def set_mlflow_run_tags():
"""Set current MLFlow run tags."""
tags = {'model_name': 'RandomForestClassifier'}
mlflow.set_tags(tags)
def train_and_track_model_in_mlflow():
"""Train model and track it with MLFLow"""
(X_train, X_test, y_train, y_test) = get_split_train_data()
logger = get_configured_logger()
clf = train_random_forest_classifier(X_train, y_train)
with mlflow.start_run():
track_model_params(clf)
track_model_metrics(clf, X_test, y_test)
track_model_version(clf)
set_mlflow_run_tags()
train_and_track_model_in_mlflow()
```
|
github_jupyter
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import roc_auc_score, roc_curve
import logging
import mlflow
from urllib.parse import urlparse
from sklearn.ensemble import RandomForestClassifier
from matplotlib import pyplot
from sklearn.model_selection import GridSearchCV
def get_split_train_data():
"""Return a tuple containing split train data into X_train X_test, y_train and y_test."""
df = pd.read_csv('../../data/processed/processed_application_train.csv')
train, test = train_test_split(df)
X_train = train.drop(['TARGET'], axis=1)
X_test = test.drop(['TARGET'], axis=1)
y_train = train[['TARGET']]
y_test = test[['TARGET']]
return X_train, X_test, y_train, y_test
def get_configured_logger():
"""Return a logger for console outputs configured to print warnings."""
logging.basicConfig(level=logging.WARN)
return logging.getLogger(__name__)
def train_random_forest_classifier(X_train, y_train):
"""Return RandomForestClassifier fit on input ndarrays X_train and y_train.
Keyword arguments:
X_train -- ndarray containing all train columns except target column
y_train -- ndarray target column values to train the model
"""
clf = RandomForestClassifier(class_weight='balanced', n_estimators=100)
grid_search = GridSearchCV(clf, {'max_depth': [10, 15], 'min_samples_split': [5, 10]}, n_jobs=-1, cv=5, scoring='accuracy')
grid_search.fit(X_train.values, y_train.values)
clf.set_params(**grid_search.best_params_)
clf = clf.fit(X_train, y_train)
return clf
def eval_metrics(actual, pred):
"""Return a tuple containing model classification accuracy, confusion matrix, f1_score and precision score.
Keyword arguments:
actual -- ndarray y_test containing true target values
pred -- ndarray of the predicted target values by the model
"""
accuracy = accuracy_score(actual, pred)
conf_matrix = confusion_matrix(actual, pred)
f_score = f1_score(actual, pred)
precision = precision_score(actual, pred)
return accuracy, conf_matrix, f_score, precision
def get_model_evaluation_metrics(clf, X_test, y_test):
"""Return a tuple containing model classification accuracy, confusion matrix, f1_score, precision score and
ROC area under the curve score.
Keyword arguments:
clf -- classifier model
X_test -- ndarray containing all test columns except target column
y_test -- ndarray target column values to test the model
"""
predicted_repayments = clf.predict(X_test)
(accuracy, conf_matrix, f_score, precision) = eval_metrics(y_test, predicted_repayments)
rf_probs = clf.predict_proba(X_test)
rf_probs = rf_probs[:, 0] # keeping only the first class (repayment OK)
rf_roc_auc_score = roc_auc_score(y_test, rf_probs)
random_probs = [0 for _ in range(len(y_test))]
random_roc_auc_score = roc_auc_score(y_test, random_probs)
return accuracy, conf_matrix, f_score, precision, rf_roc_auc_score, random_roc_auc_score, rf_probs, random_probs
def track_model_params(clf):
"""Log model params on MLFlow UI.
Keyword arguments:
clf -- classifier model
"""
clf_params = clf.get_params()
for param in clf_params:
param_value = clf_params[param]
mlflow.log_param(param, param_value)
def vizualize_roc_curves(rf_roc_auc_score, random_roc_auc_score, y_test, rf_probs, random_probs):
"""Vizualize ROC curves for both fit model and random model.
Keyword arguments:
rf_roc_auc_score -- fit model ROC AUC score
random_roc_auc_score -- random model ROC AUC score
y_test -- ndarray of target values
rf_probs -- fit model predicted probabilities
random_probs -- random model predicted probabilities
"""
# summarize scores
print('Random model: ROC AUC=%.3f' % random_roc_auc_score)
print('Random Forest: ROC AUC=%.3f' % rf_roc_auc_score)
# calculate roc curves
random_fpr, random_tpr, _ = roc_curve(y_test, random_probs)
rf_fpr, rf_tpr, _ = roc_curve(y_test, rf_probs)
# plot the roc curve for the model
pyplot.plot(random_fpr, random_tpr, linestyle='--', label='Random model')
pyplot.plot(rf_fpr, rf_tpr, marker='.', label='Random Forest')
# axis labels
pyplot.xlabel('False Positive Rate')
pyplot.ylabel('True Positive Rate')
# show the legend
pyplot.legend()
# show the plot
pyplot.show()
def track_model_metrics(clf, X_test, y_test):
"""Log model metrics on MLFlow UI.
Keyword arguments:
clf -- classifier model
X_test -- ndarray containing all test columns except target column
y_test -- ndarray target column values to test the model
"""
(accuracy, conf_matrix, f_score, precision, rf_roc_auc_score, random_roc_auc_score, rf_probs, random_probs) = \
get_model_evaluation_metrics(clf, X_test, y_test)
mlflow.log_metric('accuracy', accuracy)
mlflow.log_metric('f1_score', f_score)
mlflow.log_metric('precision', precision)
mlflow.log_metric('roc_auc_score', rf_roc_auc_score)
vizualize_roc_curves(rf_roc_auc_score, random_roc_auc_score, y_test, rf_probs, random_probs)
tn, fp, fn, tp = conf_matrix.ravel()
mlflow.log_metric('true_negatives', tn)
mlflow.log_metric('false_positives', fp)
mlflow.log_metric('false_negatives', fn)
mlflow.log_metric('true_positives', tp)
def track_model_version(clf):
"""Version model on MLFlow UI.
Keyword arguments:
clf -- classifier model
"""
tracking_url_type_store = urlparse(mlflow.get_tracking_uri()).scheme
if tracking_url_type_store != 'file':
mlflow.sklearn.log_model(clf, 'model', registered_model_name='RandomForestClassifier')
else:
mlflow.sklearn.log_model(clf, 'model')
def set_mlflow_run_tags():
"""Set current MLFlow run tags."""
tags = {'model_name': 'RandomForestClassifier'}
mlflow.set_tags(tags)
def train_and_track_model_in_mlflow():
"""Train model and track it with MLFLow"""
(X_train, X_test, y_train, y_test) = get_split_train_data()
logger = get_configured_logger()
clf = train_random_forest_classifier(X_train, y_train)
with mlflow.start_run():
track_model_params(clf)
track_model_metrics(clf, X_test, y_test)
track_model_version(clf)
set_mlflow_run_tags()
train_and_track_model_in_mlflow()
| 0.860735 | 0.870927 |
# Three-dimensional rigid body kinetics
Renato Naville Watanabe
## The Newton-Euler laws
The Newton-Euler laws remain valid in the three-dimensional motion of a rigid body (for revision on Newton-Euler laws, [see this notebook about these laws](newton_euler_equations.ipynb)).
\begin{equation}
\vec{\bf{F}} = m\vec{\bf{a_{cm}}}
\end{equation}
\begin{equation}
\vec{\bf{M_O}} = \frac{d\vec{\bf{H_O}}}{dt}
\end{equation}
## Resultant force and moments
The resultant force and the resultant moment around a point O are computed in the same way of the two-dimensional case.
\begin{equation}
\vec{\bf{F}} = \sum\limits_{i=1}^n \vec{\bf{F_n}}
\end{equation}
\begin{equation}
\vec{\bf{M_O}} = \sum\limits_{i=1}^n \vec{\bf{r_{i/O}}} \times \vec{\bf{F_i}}
\end{equation}
## Deduction of the angular momentum in three-dimensional movement
The major difference that appears when we are dealing with three-dimensional motions is to compute the derivative of the angular momentum. For the sake of simplicity, the analysis will only consider the point O as the center of mass of the body.
We begin computing the angular momentum of a rigid body around its center of mass.
\begin{equation}
\vec{\boldsymbol{H_{cm}}} = \int_B \vec{\boldsymbol{r_{/cm}}} \times \vec{\boldsymbol{v}}\,dm = \int \vec{\boldsymbol{r_{/cm}}} \times (\vec{\boldsymbol{\omega}}\times\vec{\boldsymbol{r_{/cm}}})\,dm
\end{equation}
where $\vec{\boldsymbol{r_{/cm}}}$ is the vector from the point O to the position of the infinitesimal mass considered in the integral. For simplicity of the notation, we will use $\vec{\boldsymbol{r}} = \vec{\boldsymbol{r_{/cm}}}$.
So, the angular momentum is:
$$
\vec{\boldsymbol{H_{cm}}} = \int \vec{\boldsymbol{r}} \times (\vec{\boldsymbol{\omega}}\times\vec{\boldsymbol{r}})\,dm =
\int (r_x\hat{\boldsymbol{i}} + r_y\hat{\boldsymbol{j}}+r_z\hat{\boldsymbol{k}}) \times \left[(\omega_x\hat{\boldsymbol{i}} + \omega_y\hat{\boldsymbol{j}}+\omega_z\hat{\boldsymbol{k}})\times(r_x\hat{\boldsymbol{i}} + r_y\hat{\boldsymbol{j}}+r_z\hat{\boldsymbol{k}})\right]\,dm
$$
$$
\vec{\boldsymbol{H_{cm}}}= \int (r_x\hat{\boldsymbol{i}} + r_y\hat{\boldsymbol{j}}+r_z\hat{\boldsymbol{k}}) \times \left[(\omega_x r_y\hat{\boldsymbol{k}} - \omega_xr_z\hat{\boldsymbol{j}} - \omega_yr_x\hat{\boldsymbol{k}} + \omega_yr_z\hat{\boldsymbol{i}}+ \omega_zr_x\hat{\boldsymbol{j}}- \omega_zr_y\hat{\boldsymbol{i}}\right]\,dm
$$
$$
\vec{\boldsymbol{H_{cm}}}= \int-\omega_x r_xr_y\hat{\boldsymbol{j}} -\omega_x r_xr_z\hat{\boldsymbol{k}} +\omega_y r_x^2\hat{\boldsymbol{j}} + \omega_zr_x^2\hat{\boldsymbol{k}}+ \omega_xr_y^2\hat{\boldsymbol{i}} -\omega_yr_xr_y\hat{\boldsymbol{i}}-\omega_yr_yr_z\hat{\boldsymbol{k}}+\omega_zr_y^2\hat{\boldsymbol{k}}+\omega_x r_z^2\hat{\boldsymbol{i}} +\omega_y r_z^2\hat{\boldsymbol{j}} -\omega_z r_xr_z\hat{\boldsymbol{i}} - \omega_zr_yr_z\hat{\boldsymbol{j}}\,dm
$$
$$
\vec{\boldsymbol{H_{cm}}}=\left(\int \omega_xr_y^2 -\omega_yr_xr_y+\omega_x r_z^2-\omega_z r_xr_z\,dm\right)\;\hat{\boldsymbol{i}}+\left(\int-\omega_x r_xr_y +\omega_y r_x^2 +\omega_y r_z^2 - \omega_zr_yr_z\,dm\right)\hat{\boldsymbol{j}}+\left(\int-\omega_x r_xr_z + \omega_zr_x^2-\omega_yr_yr_z+\omega_zr_y^2 \,dm\right)\hat{\boldsymbol{k}}
$$
$$
\vec{\boldsymbol{H_{cm}}}=\left(\int \omega_x(r_y^2+r_z^2)\,dm+ \int-\omega_yr_xr_y\,dm + \int-\omega_z r_xr_z\,dm\right)\;\hat{\boldsymbol{i}} + \left(\int-\omega_x r_xr_y\,dm +\int\omega_y (r_x^2 +r_z^2)\,dm + \int- \omega_zr_yr_z\,dm\right)\hat{\boldsymbol{j}} + \left(\int-\omega_x r_xr_z\,dm + \int-\omega_yr_yr_z\,dm + \int \omega_z(r_x^2+r_y^2) \,dm\right)\hat{\boldsymbol{k}}
$$
$$
\vec{\boldsymbol{H_{cm}}}=\bigg(\omega_x\int (r_y^2+r_z^2)\,dm+ \omega_y\int-r_xr_y\,dm + \omega_z\int- r_xr_z\,dm\bigg)\;\hat{\boldsymbol{i}}+\bigg(\omega_x\int- r_xr_y\,dm +\omega_y\int (r_x^2 +r_z^2)\,dm + \omega_z\int-r_yr_z\,dm\bigg)\hat{\boldsymbol{j}} + \bigg(\omega_x\int- r_xr_z\,dm + \omega_y\int-r_yr_z\,dm + \omega_z\int (r_x^2+r_y^2) \,dm\bigg)\hat{\boldsymbol{k}}
$$
$$
\vec{\boldsymbol{H_{cm}}}=\left(\omega_xI_{xx}^{cm}+ \omega_yI_{xy}^{cm} + \omega_zI_{xz}^{cm}\right)\;\hat{\boldsymbol{i}} + \left(\omega_xI_{xy}^{cm} +\omega_yI_{yy}^{cm} + \omega_zI_{yz}^{cm}\right)\hat{\boldsymbol{j}}+\left(\omega_xI_{yz}^{cm} + \omega_yI_{yz}^{cm} + \omega_zI_{zz}^{cm}\right)\hat{\boldsymbol{k}}
$$
## Angular momentum in three-dimensional movement
\begin{align}
\begin{split}
\vec{\boldsymbol{H_{cm}}}=\left[\begin{array}{ccc}I_{xx}^{cm}&I_{xy}^{cm}&I_{xz}^{cm}\\
I_{xy}^{cm}&I_{yy}^{cm}&I_{yz}^{cm}\\
I_{xz}^{cm}&I_{yz}^{cm}&I_{zz}^{cm}\end{array}\right]\cdot
\left[\begin{array}{c}\omega_x\\\omega_y\\\omega_z \end{array}\right] = I\vec{\boldsymbol{\omega}}
\end{split}
\label{eq:angmom}
\end{align}
where this matrix is the Matrix of Inertia (or more rigorously, Tensor of Inertia) as defined previously in [this notebook about moment of inertia](CenterOfMassAndMomentOfInertia.ipynb#matrixinertia).
## The matrix of inertia is different depending on the body orientation
So, to compute the angular momentum of a body we have to multiply the matrix of inertia $I$ of the body by its angular velocity $\vec{\boldsymbol{\omega}}$. The problem on this approach is that the moments and products of inertia depends on the orientation of the body relative to the frame of reference. As they depend on the the distances of each point of the body to the axes, if the body is rotating, the matrix of inertia $I$ will be different at each instant.
<figure><img src="../images/3Dbodyref.png\" width=800 />
## The solution is to attach a frame reference to the body
The solution to this problem is to attach a frame of reference to the body. We will denote this frame of reference as $\hat{\boldsymbol{e_1}}$, $\hat{\boldsymbol{e_2}}$ and $\hat{\boldsymbol{e_3}}$, with origin in the center of mass of the body. As can be noted from the figure below, the frame of reference moves along the body. Now the matrix of inertia $I$ will be constant relative to this new basis $\hat{\boldsymbol{e_1}}$, $\hat{\boldsymbol{e_2}}$ and $\hat{\boldsymbol{e_3}}$.
<figure><img src="../images/3DbodyrefMove.png\" width=800 />
## The angular velocity in the fixed frame
So, we can write the angular momentum vector relative to this new basis. To do this, we must write the angular velocity in this new basis:
\begin{equation}
\vec{\boldsymbol{\omega}} = \omega_1\hat{\boldsymbol{e_1}} + \omega_2\hat{\boldsymbol{e_2}} + \omega_3\hat{\boldsymbol{e_3}}
\end{equation}
Note that this angular velocity is the same vector that we used previously in Eq. \eqref{eq:angmom}. We are just describing it in a basis attached to the body (local basis).
## The fixed frame is chosen in a way that the matrix of inertia is a diagonal matrix
As we can choose the basis versors as we want, we can choose them so as to the products of inertia be equal to zero. This can always be done. If the body has axes of symmetry, we can choose these axes (principal axes) as the direction of the basis to the products of inertia be equal to zero. Having the basis $\hat{\boldsymbol{e_1}}$, $\hat{\boldsymbol{e_2}}$ and $\hat{\boldsymbol{e_3}}$, the matrix of inertia will be a diagonal matrix:
\begin{equation}
I = \left[\begin{array}{ccc}I_1&0&0\\
0&I_2&0\\
0&0&I_3\end{array}\right]
\end{equation}
## The angular momentum in the fixed frame
So, using the basis in the direction of the principal axes of the body the angular momentum simplifies to:
\begin{equation}
\vec{\boldsymbol{H_{cm}}} = I\vec{\boldsymbol{\omega}} = I_1\omega_1 \hat{\boldsymbol{e_1}} + I_2\omega_2 \hat{\boldsymbol{e_2}} +I_3\omega_3 \hat{\boldsymbol{e_3}}
\label{eq:angmomprinc}
\end{equation}
## Derivative of the angular momentum
For the second Newton-Euler law, we must compute the derivative of the angular momentum. So, we derive the angular momentum in Eq. \eqref{eq:angmomprinc}. As the versors $\hat{\boldsymbol{e_1}}$, $\hat{\boldsymbol{e_2}}$ and $\hat{\boldsymbol{e_3}}$ are varying in time, we must consider their derivatives.
\begin{equation}
\frac{d\vec{\boldsymbol{H_{cm}}}}{dt} = I_1\dot{\omega_1}\hat{\boldsymbol{e_1}} + I_2\dot{\omega_2}\hat{\boldsymbol{e_2}}+I_3\dot{\omega_3}\hat{\boldsymbol{e_3}} + I_1\omega_1\frac{d\hat{\boldsymbol{e_1}}}{dt} + I_2\omega_2\frac{d\hat{\boldsymbol{e_2}}}{dt}+I_3\omega_3\frac{d\hat{\boldsymbol{e_3}}}{dt}
\label{eq:derivangmom}
\end{equation}
Now it only remains to find an expression for the derivative of the versors $\frac{d\hat{\boldsymbol{e_1}}}{dt}$, $\frac{d\hat{\boldsymbol{e_2}}}{dt}$ and $\frac{d\hat{\boldsymbol{e_3}}}{dt}$.
## Angular velocity (from Kane and Levinson (1985))
It would be interesting to find some relation between these derivatives and the angular velocity of the body.
First we will express the angular velocity $\vec{\boldsymbol{\omega}}$ in terms of these derivatives.
Remember that the angular velocity is described as a vector in the orthogonal plane of the rotation. ($\vec{\boldsymbol{\omega_1}} = \frac{d\theta_1}{dt}\hat{\boldsymbol{e_1}}$, $\vec{\boldsymbol{\omega_2}} = \frac{d\theta_2}{dt}\hat{\boldsymbol{e_2}}$ and $\vec{\boldsymbol{\omega_3}} = \frac{d\theta_3}{dt}\hat{\boldsymbol{e_3}}$).
Note also that the derivative of the angle $\theta_1$ can be described as the projection of the vector $\frac{d\hat{\boldsymbol{e_2}}}{dt}$ on the vector $\hat{\boldsymbol{e_3}}$.
This can be written by using the scalar product between these vectors: $ \frac{d\theta_1}{dt} = \frac{d\hat{\boldsymbol{e_2}}}{dt}\cdot \hat{\boldsymbol{e_3}}$. Similarly, the same is valid for the angles in the other two directions: $ \frac{d\theta_2}{dt} = \frac{d\hat{\boldsymbol{e_3}}}{dt}\cdot \hat{\boldsymbol{e_1}}$ and $ \frac{d\theta_3}{dt} = \frac{d\hat{\boldsymbol{e_1}}}{dt}\cdot \hat{\boldsymbol{e_2}}$.
<figure><img src="../images/derivVersor.png\" width=400 />
So, we can write the angular velocity as:
\begin{equation}
\vec{\boldsymbol{\omega}} = \left(\frac{d\hat{\boldsymbol{e_2}}}{dt}\cdot \hat{\boldsymbol{e_3}}\right) \hat{\boldsymbol{e_1}} + \left(\frac{d\hat{\boldsymbol{e_3}}}{dt}\cdot \hat{\boldsymbol{e_1}}\right) \hat{\boldsymbol{e_2}} + \left(\frac{d\hat{\boldsymbol{e_1}}}{dt}\cdot \hat{\boldsymbol{e_2}}\right) \hat{\boldsymbol{e_3}}
\label{eq:angvel}
\end{equation}
## The derivative of the versors
Now, we must isolate the derivative of the versors to substitute them in the Eq. \eqref{eq:derivangmom}. To isolate the derivative of the versor $\hat{\boldsymbol{e_1}}$, first we cross multiply both sides of the equation above by $\hat{\boldsymbol{e_1}}$:
\begin{equation}
\vec{\boldsymbol{\omega}} \times \hat{\boldsymbol{e_1}} = - \left(\frac{d\hat{\boldsymbol{e_3}}}{dt}\cdot \hat{\boldsymbol{e_1}}\right) \hat{\boldsymbol{e_3}} + \left(\frac{d\hat{\boldsymbol{e_1}}}{dt}\cdot \hat{\boldsymbol{e_2}}\right) \hat{\boldsymbol{e_2}}
\end{equation}
If we note that the term multipliying $\hat{\boldsymbol{e_3}}$ in the right side of the identity can be obtained by $\frac{d\hat{\boldsymbol{e_1}}\cdot\hat{\boldsymbol{e_3}} }{dt} = \frac{d\hat{\boldsymbol{e_1}}}{dt}\cdot\hat{\boldsymbol{e_3}} + \frac{d\hat{\boldsymbol{e_3}}}{dt}\cdot\hat{\boldsymbol{e_1}} \rightarrow 0 = \frac{d\hat{\boldsymbol{e_1}}}{dt}\cdot\hat{\boldsymbol{e_3}} + \frac{d\hat{\boldsymbol{e_3}}}{dt}\cdot\hat{\boldsymbol{e_1}} \rightarrow \frac{d\hat{\boldsymbol{e_3}}}{dt}\cdot\hat{\boldsymbol{e_1}} = - \frac{d\hat{\boldsymbol{e_1}}}{dt}\cdot\hat{\boldsymbol{e_3}} $ (the scalar product $\hat{\boldsymbol{e_1}}\cdot\hat{\boldsymbol{e_3}}$ is zero because these vectors are orthogonal ), we can write the equation above becomes:
\begin{equation}
\vec{\boldsymbol{\omega}} \times \hat{\boldsymbol{e_1}} = \left(\frac{d\hat{\boldsymbol{e_1}}}{dt}\cdot \hat{\boldsymbol{e_3}}\right) \hat{\boldsymbol{e_3}} + \left(\frac{d\hat{\boldsymbol{e_1}}}{dt}\cdot \hat{\boldsymbol{e_2}}\right) \hat{\boldsymbol{e_2}}
\end{equation}
Finally, we can note that $\frac{d\hat{\boldsymbol{e_1}}\cdot\hat{\boldsymbol{e_1}} }{dt} = \frac{d\hat{\boldsymbol{e_1}}}{dt}\cdot\hat{\boldsymbol{e_1}} + \frac{d\hat{\boldsymbol{e_1}}}{dt}\cdot\hat{\boldsymbol{e_1}} \rightarrow \frac{d(1)}{dt} = 2\frac{d\hat{\boldsymbol{e_1}}}{dt}\cdot\hat{\boldsymbol{e_1}} \rightarrow \frac{d\hat{\boldsymbol{e_1}}}{dt}\cdot\hat{\boldsymbol{e_1}} = 0 $. As this term is equal to zero, we can add it to the expression above:
\begin{equation}
\vec{\boldsymbol{\omega}} \times \hat{\boldsymbol{e_1}} = \left(\frac{d\hat{\boldsymbol{e_1}}}{dt}\cdot\hat{\boldsymbol{e_1}}\right)\hat{\boldsymbol{e_1}} + \left(\frac{d\hat{\boldsymbol{e_1}}}{dt}\cdot \hat{\boldsymbol{e_3}}\right) \hat{\boldsymbol{e_3}} + \left(\frac{d\hat{\boldsymbol{e_1}}}{dt}\cdot \hat{\boldsymbol{e_2}}\right) \hat{\boldsymbol{e_2}}
\end{equation}
Note that the expression above is just another manner to write the vector $\frac{d\hat{\boldsymbol{e_1}}}{dt}$, as any vector can be described by the sum of the projections on each of the versors forming a basis.
So, the derivative of the versor $\hat{\boldsymbol{e_1}}$ can be written as:
\begin{equation}
\frac{d\hat{\boldsymbol{e_1}}}{dt} = \vec{\boldsymbol{\omega}} \times \hat{\boldsymbol{e_1}}
\end{equation}
Similarly, the derivatives of the versors $\hat{\boldsymbol{e_2}}$ and $\hat{\boldsymbol{e_3}}$ can be written as:
\begin{equation}
\frac{d\hat{\boldsymbol{e_2}}}{dt} = \vec{\boldsymbol{\omega}} \times \hat{\boldsymbol{e_2}} ~~~~~~~~\text{and} ~~~~~~ \frac{d\hat{\boldsymbol{e_3}}}{dt} = \vec{\boldsymbol{\omega}} \times \hat{\boldsymbol{e_3}}
\end{equation}
## The derivative of the angular momentum
Now we can get back to the equation describing the derivative of the angular momentum (Eq.\eqref{eq:derivangmom}):
\begin{align}
\begin{split}
\frac{d\vec{\boldsymbol{H_{cm}}}}{dt} =&I_1\dot{\omega_1}\hat{\boldsymbol{e_1}} + I_2\dot{\omega_2}\hat{\boldsymbol{e_2}}+I_3\dot{\omega_3}\hat{\boldsymbol{e_3}} + I_1\omega_1\frac{d\hat{\boldsymbol{e_1}}}{dt} + I_2\omega_2\frac{d\hat{\boldsymbol{e_2}}}{dt}+I_3\omega_3\frac{d\hat{\boldsymbol{e_3}}}{dt}=\\
=& I_1\dot{\omega_1}\hat{\boldsymbol{e_1}} + I_2\dot{\omega_2}\hat{\boldsymbol{e_2}}+I_3\dot{\omega_3}\hat{\boldsymbol{e_3}} + I_1\omega_1(\vec{\boldsymbol{\omega}} \times \hat{\boldsymbol{e_1}}) + I_2\omega_2(\vec{\boldsymbol{\omega}} \times \hat{\boldsymbol{e_2}})+I_3\omega_3(\vec{\boldsymbol{\omega}} \times \hat{\boldsymbol{e_3}}) = \\
=& I_1\dot{\omega_1}\hat{\boldsymbol{e_1}} + I_2\dot{\omega_2}\hat{\boldsymbol{e_2}}+I_3\dot{\omega_3}\hat{\boldsymbol{e_3}} + \vec{\boldsymbol{\omega}} \times I_1\omega_1\hat{\boldsymbol{e_1}} + \vec{\boldsymbol{\omega}} \times I_2\omega_2\hat{\boldsymbol{e_2}}+\vec{\boldsymbol{\omega}} \times I_3\omega_3\hat{\boldsymbol{e_3}} = \\
=& I_1\dot{\omega_1}\hat{\boldsymbol{e_1}} + I_2\dot{\omega_2}\hat{\boldsymbol{e_2}}+I_3\dot{\omega_3}\hat{\boldsymbol{e_3}} + \vec{\boldsymbol{\omega}} \times (I_1\omega_1\hat{\boldsymbol{e_1}} + I_2\omega_2\hat{\boldsymbol{e_2}} + I_3\omega_3\hat{\boldsymbol{e_3}})=\\
=&I\vec{\dot{\boldsymbol{\omega}}} + \vec{\boldsymbol{\omega}} \times (I\vec{\boldsymbol{\omega}})
\end{split}
\label{eq:derivangmomVec}
\end{align}
Performing the cross products, we can get the expressions for each of the coordinates attached to the body:
\begin{align}
\begin{split}
\frac{d\vec{\boldsymbol{H_{cm}}}}{dt} =\left[\begin{array}{c}I_1\dot{\omega_1}\\I_2\dot{\omega_2}\\I_3\dot{\omega_3}\end{array}\right] + \left[\begin{array}{c}\omega_1\\\omega_2\\\omega_3\end{array}\right] \times \left[\begin{array}{c}I_1\omega_1\\I_2\omega_2\\I_3\omega_3\end{array}\right] = \left[\begin{array}{c}I_1\dot{\omega_1} + \omega_2\omega_3(I_3-I_2)\\I_2\dot{\omega_2}+\omega_1\omega_3(I_1-I_3)\\I_3\dot{\omega_3}+\omega_1\omega_2(I_2-I_1)\end{array}\right]
\end{split}
\end{align}
## Newton-Euler laws
Having computed the derivative of the angular momentum, we have the final forms of the Newton-Euler laws:
\begin{equation}
F_x = ma_{cm_x}
\end{equation}
\begin{equation}
F_y = ma_{cm_y}
\end{equation}
\begin{equation}
F_z = ma_{cm_z}
\end{equation}
\begin{equation}
M_{cm_1} = I_1\dot{\omega_1} + \omega_2\omega_3(I_3-I_2)
\label{eq:M1}
\end{equation}
\begin{equation}
M_{cm_2} = I_2\dot{\omega_2}+\omega_1\omega_3(I_1-I_3)
\label{eq:M2}
\end{equation}
\begin{equation}
M_{cm_3} = I_3\dot{\omega_3}+\omega_1\omega_2(I_2-I_1)
\label{eq:M3}
\end{equation}
Note that the equations of the forces are written in the global frame of reference and the equations of the moments are described in the frame of reference of the body. So, before using Eq.~\eqref{eq:derivangmomVec} or the equations Eq.\eqref{eq:M1},\eqref{eq:M2} and \eqref{eq:M3} you must transform all the forces and moment-arms to the frame of reference of the body by using rotation matrices.
Below are shown some examples with three-dimensional kinematic data to find the forces and moments acting on the body.
## Examples
### 1 ) 3D pendulum bar
At the file '../data/3Dpendulum.txt' there are 3 seconds of data of 3 points of the three-dimensional cylindrical pendulum, very similar to the pendulum shown in the [notebook about free-body diagrams](FreeBodyDiagramForRigidBodies.ipynb#pendulum), except that it can move in every direction. Also it has a motor at the upper part of the cylindrical bar producing torques to move the bar. It has mass $m=1$ kg, length $l=1$ m and radius $r=0.1$ m.
The point m1 is at the upper part of the cylinder and is the origin of the system.
The point m2 is at the center of mass of the cylinder.
The point m3 is a point at the surface of the cylinder.
The free-body diagram of the 3d pendulum is depicted below. There is the gravitational force acting at the center of mass of gravity of the body and the torque $M_1$ due to the motor acting on the pendulum and the force $F_1$ due to the restraint at the upper part of the cylinder. Together with the forces, the local basis $\hat{\boldsymbol{e_1}}$, $\hat{\boldsymbol{e_2}}$ and $\hat{\boldsymbol{e_3}}$ in the direction of the principal axes an origin at the center of mass of the body is also shown.
<figure><img src="../images/3DpendulumFBD.png\" width=400 />
The resultant forces acting on the cylinder is:
\begin{equation}
\vec{\boldsymbol{F}} = \vec{\boldsymbol{F_O}} - mg\hat{\boldsymbol{k}}
\end{equation}
So, the first Newton-Euler law, at each component of the global basis, is written as:
\begin{align}
\begin{split}
F_{O_x} &= ma_{cm_x} &\rightarrow F_{O_x} &= ma_{cm_x} \\
F_{O_y} &= ma_{cm_y} &\rightarrow F_{O_y} &= ma_{cm_y}\\
F_{O_z} - mg &= ma_{cm_z} &\rightarrow F_{O_z} &= ma_{cm_z} + mg
\end{split}
\label{eq:fnependulum}
\end{align}
Now, the resultant moment applied to the body, computed relative to the center of mass, is:
\begin{equation}
\vec{\boldsymbol{M}} = \vec{\boldsymbol{M_O}} + \vec{\boldsymbol{r_{O/cm}}} \times \vec{\boldsymbol{F_O}}
\end{equation}
So, the second Newton-Euler law, at each of the components at the local basis of the body, is written as:
\begin{align}
\begin{split}
M_{O_1} + MFocm_1 &= I_1\dot{\omega_1} + \omega_2\omega_3(I_3-I_2) \rightarrow M_{O_1} &= I_1\dot{\omega_1} + \omega_2\omega_3(I_3-I_2) - MFocm_1\\
M_{O_2} + MFocm_2 &= I_2\dot{\omega_2} + \omega_1\omega_3(I_1-I_3) \rightarrow M_{O_2} &= I_2\dot{\omega_2} + \omega_1\omega_3(I_1-I_3) - MFocm_2\\
M_{O_3} + MFocm_3 &= I_3\dot{\omega_3} + \omega_1\omega_2(I_2-I_1) \rightarrow M_{O_3} &= I_3\dot{\omega_3} + \omega_1\omega_2(I_2-I_1) - MFocm_3
\end{split}
\end{align}
where $\vec{\boldsymbol{MFocm}} = \vec{\boldsymbol{r_{O/cm}}} \times \vec{\boldsymbol{F_O}}$.
The moments of inertia at the directions of $\hat{\boldsymbol{e_1}}$, $\hat{\boldsymbol{e_2}}$ and $\hat{\boldsymbol{e_3}}$ are, $I_1 = \frac{mR^2}{12}$ and $I_2=I_3=\frac{m(3R^2+l^2)}{12}$. Now, to compute the moment $ \vec{\boldsymbol{M_O}}$ and the force $\vec{\boldsymbol{F_O}}$, we will need to find the acceleration of the center of mass $ \vec{\boldsymbol{a_{cm}}}$, the angular velocity $\vec{\boldsymbol{\omega}}$, the time-derivatives of each component of the angular velocity, and the moment-arm $\vec{\boldsymbol{r_{O/cm}}}$ to compute the torque due to the force $\vec{\boldsymbol{F_O}}$. These signals will come from the kinematic data file.
First, we need to open the file with the data:
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib notebook
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
data = np.loadtxt('../data/3dPendulum.txt', skiprows=1, delimiter = ',')
m = 1
g= 9.81
l = 1
r = 0.1
I1 = m*r**2/12
I2 = m*(3*r**2+l**2)/12
I3 = I2
```
Now, we assign the proper columns to variables:
```
t = data[:,0]
m1 = data[:,1:4]
m2 = data[:,4:7]
m3 = data[:,7:]
```
As the center of mass is the data contained in m2, we can find the acceleration of the center of mass $\vec{\boldsymbol{a_{cm}}}$. This will be performed by deriving numerically the position of the center of mass twice. The numerical derivative of a function $f(t)$ with the samples $f(i)$ can be performed by taking the forward difference of the values $f(i)$:
\begin{equation}
\frac{df}{dt}(i) = \frac{f(i+1)-f(i)}{\Delta t}
\end{equation}
The numerical derivative could be obtained by taking the backward differences as well:
\begin{equation}
\frac{df}{dt}(i) = \frac{f(i)-f(i-1)}{\Delta t}
\end{equation}
A better estimation of the derivative of the time derivative of the function $f(t)$ would be obtained by the average value between the estimations using the backward and forward differences (this subject is treated [in this notebook about data filtering](DataFiltering.ipynb#numdiff)):
\begin{equation}
\frac{df}{dt}(i) = \frac{\frac{f(i+1)-f(i)}{\Delta t} + \frac{f(i)-f(i-1)}{\Delta t}}{2} = \frac{f(i+1)-f(i-1)}{2\Delta t}
\label{eq:centralderiv}
\end{equation}
So, the acceleration of the center of mass, is:
```
dt = t[1]-t[0]
rcm = m2
vcm = (rcm[2:,:]-rcm[0:-2,:])/(2*dt)
acm = (vcm[2:,:]-vcm[0:-2,:])/(2*dt)
```
Now we can find the force $\vec{\boldsymbol{F_O}}$ using the Eq. \eqref{eq:fnependulum}.
```
Fox = m*acm[:,0]
Foy = m*acm[:,1]
Foz = m*acm[:,2] + m*g
Fo=np.hstack((Fox.reshape(-1,1),Foy.reshape(-1,1),Foz.reshape(-1,1)))
plt.figure()
plt.plot(t[0:acm.shape[0]], Fox)
plt.plot(t[0:acm.shape[0]], Foy,'--')
plt.plot(t[0:acm.shape[0]], Foz)
plt.legend(('x','y','z'))
plt.title('Force (N)')
plt.show()
```
Now, to find the moment being applied to the body, we need to compute a basis attached to the body
```
e1 = m2 - m1
e1 = e1/np.linalg.norm(e1,axis=1,keepdims=True)
e2 = m3-m2
e2 = e2/np.linalg.norm(e2,axis=1,keepdims=True)
e3 = np.cross(e1,e2,axis=1)
e3 = e3/np.linalg.norm(e3,axis=1,keepdims=True)
e2 = np.cross(e3,e1, axis=1)
e2 = e2/np.linalg.norm(e2,axis=1,keepdims=True)
```
To compute the moment applied to the body, we need the angular velocity described in the basis attached to the body. The easiest way to find this angular velocity is to use Eq. \eqref{eq:angvel}, repeated here.
\begin{equation}
\vec{\boldsymbol{\omega}} = \left(\frac{d\hat{\boldsymbol{e_2}}}{dt}\cdot \hat{\boldsymbol{e_3}}\right) \hat{\boldsymbol{e_1}} + \left(\frac{d\hat{\boldsymbol{e_3}}}{dt}\cdot \hat{\boldsymbol{e_1}}\right) \hat{\boldsymbol{e_2}} + \left(\frac{d\hat{\boldsymbol{e_1}}}{dt}\cdot \hat{\boldsymbol{e_2}}\right) \hat{\boldsymbol{e_3}}
\end{equation}
To do this we need the derivatives of the basis versors. This will also be performed with Eq. \eqref{eq:centralderiv}.
To perform the computation of the angular velocity remember that the scalar product between two vectors is given by:
\begin{equation}
\vec{\bf{v}}\cdot\vec{\bf{w}} = \left[\begin{array}{c}v_x\\v_y\\v_z \end{array}\right]\cdot \left[\begin{array}{c}w_x\\w_y\\w_z \end{array}\right] = v_x.w_x+v_yw_y+v_zw_z
\end{equation}
```
de1dt = (e1[2:,:]-e1[0:-2,:])/(2*dt)
de2dt = (e2[2:,:]-e2[0:-2,:])/(2*dt)
de3dt = (e3[2:,:]-e3[0:-2,:])/(2*dt)
omega = np.hstack((np.sum(de2dt*e3[1:-1,:], axis = 1).reshape(-1,1),
np.sum(de3dt*e1[1:-1,:], axis = 1).reshape(-1,1),
np.sum(de1dt*e2[1:-1,:], axis = 1).reshape(-1,1)))
```
From the angular velocity vector we can obtain the derivatives of each component of it, also needed to compute the moment applied to the body. To do this we will use Eq. \eqref{eq:centralderiv}:
```
alpha = (omega[2:,:]-omega[0:-2,:])/(2*dt)
```
It remains to find the moment caused by the force $\vec{\boldsymbol{F_O}}$, $\vec{\boldsymbol{MFocm}} = \vec{\boldsymbol{r_{O/cm}}} \times \vec{\boldsymbol{F_O}}$. The moment-arm $\vec{\boldsymbol{r_{O/cm}}} =-\vec{\boldsymbol{r_{cm}}}$.
```
MFocm = np.cross(-rcm[2:-2], Fo, axis = 1)
```
The problem is that this moment is in the global basis. We need to transform it to the local basis. This will be performed using the rotation matrix of the bar. Each row of this matrix is one of the basis versors. Note that at each instant the matrix of rotation $R$ will be different. After the matrix is formed, we can find the components of the moment $\vec{\boldsymbol{MFocm}}$ in the local basis by multiplying the matrix of rotation $R$ by the vector $\vec{\boldsymbol{MFocm}}$.
```
MFocmRotated = np.zeros_like(MFocm)
for i in range(MFocm.shape[0]):
R = np.vstack((e1[i,:],e2[i,:],e3[i,:]))
MFocmRotated[i,:]=R@MFocm[i,:]
Mo1 = I1*alpha[:,0] + omega[0:alpha.shape[0],1]*omega[0:alpha.shape[0],2]*(I3-I2) - MFocmRotated[:,0]
Mo2 = I2*alpha[:,1] + omega[0:alpha.shape[0],0]*omega[0:alpha.shape[0],2]*(I1-I3) - MFocmRotated[:,1]
Mo3 = I3*alpha[:,2] + omega[0:alpha.shape[0],0]*omega[0:alpha.shape[0],1]*(I2-I1) - MFocmRotated[:,2]
plt.figure()
plt.plot(t[2:-2], Mo1)
plt.plot(t[2:-2], Mo2)
plt.plot(t[2:-2], Mo3)
plt.legend(('$e_1$','$e_2$','$e_3$'))
plt.show()
```
We could also have used the vectorial form of the derivative of the angular momentum (Eq. \eqref{eq:derivangmomVec}) and instead of writing three lines of code, write only one. The result is the same.
```
I = np.array([[I1,0,0],[0,I2,0],[0,0,I3]])
Mo = ([email protected]).T + np.cross(omega[0:alpha.shape[0],:], (I@omega[0:alpha.shape[0],:].T).T,axis=1) - MFocmRotated
plt.figure()
plt.plot(t[2:-2], Mo)
plt.legend(('$e_1$','$e_2$','$e_3$'))
plt.show()
```
## 2 ) Data from postural control
This example will use real data from a subject during quiet standing during 60 seconds. This data is from the database freely available at [https://github.com/demotu/datasets/tree/master/PDS](https://github.com/demotu/datasets/tree/master/PDS). The data of this subject is in the file '../data/postureData.txt'.
The mass of the subject was $m = 53$ kg and her height was $h= 1.65 $ m.
The free-body diagram is very similar to the free-body diagram shown [in the notebook about free-body diagram](FreeBodyDiagramForRigidBodies.ipynb#quietstanding), except that the force $\vec{\boldsymbol{F_A}}$ and the moment $\vec{\boldsymbol{M_A}}$ have components at all 3 directions.
So, the first Newton-Euler law, at each component of the global basis, is written as (note that in these data, the vertical direction is the y coordinate):
\begin{align}
\begin{split}
F_{A_x} &= ma_{cm_x} &\rightarrow F_{O_x} &= ma_{cm_x} \\
F_{A_y} - mg &= ma_{cm_y} &\rightarrow F_{O_y} &= ma_{cm_y} + mg\\
F_{A_z} &= ma_{cm_z} &\rightarrow F_{O_z} &= ma_{cm_z}
\end{split}
\label{eq:fnequiet}
\end{align}
Now, the resultant moment applied to the body, computed relative to the center of mass, is:
\begin{equation}
\vec{\boldsymbol{M}} = \vec{\boldsymbol{M_A}} + \vec{\boldsymbol{r_{A/cm}}} \times \vec{\boldsymbol{F_A}}
\end{equation}
So, the second Newton-Euler law, at each of the components at the local basis of the body, is written as:
\begin{align}
\begin{split}
\vec{\boldsymbol{M_A}} + \vec{\boldsymbol{MFacm}} &= I\left[\begin{array}{c}\dot{\omega_1}\\\dot{\omega_2}\\\dot{\omega_3}\end{array}\right] + \vec{\boldsymbol{\omega}}\times I\vec{\boldsymbol{\omega}} \rightarrow \vec{\boldsymbol{M_A}} = I\left[\begin{array}{c}\dot{\omega_1}\\\dot{\omega_2}\\\dot{\omega_3}\end{array}\right] + \vec{\boldsymbol{\omega}} \times I\vec{\boldsymbol{\omega}}- \vec{\boldsymbol{MFacm}}
\end{split}
\end{align}
where $\vec{\boldsymbol{MFAcm}} = \vec{\boldsymbol{r_{A/cm}}} \times \vec{\boldsymbol{F_A}}$.
Now we open the data and assign the coordinates of each marker to a variable.
```
data = np.loadtxt('../data/postureData.txt', skiprows=1, delimiter = ',')
t = data[:,0]
dt = t[1]-t[2]
rcm = data[:,1:4] #center of mass
rrA = data[:,4:7] # Right lateral malleolus
rlA = data[:,7:] # Left lateral maleolus
```
The body will be approximated by a cylinder with the height of the subject and radius equal to half of the mean distances between the right and left medial malleoli.
```
m = 53
h = 1.65
r = np.mean(np.linalg.norm(rrA-rlA, axis = 1))/2
I1 = m*r**2/12 # longitudnal
I2 = m*(3*r**2+h**2)/12 # sagittal
I3 = I2 # transversal
# acceleration of the center of mass by deriving the center of mass twice
vcm = (rcm[2:,:]-rcm[0:-2,:])/(2*dt)
acm = (vcm[2:,:]-vcm[0:-2,:])/(2*dt)
FAx = m*acm[:,0]
FAy = m*acm[:,1] + m*g
FAz = m*acm[:,2]
FA=np.hstack((FAx.reshape(-1,1),FAy.reshape(-1,1),FAz.reshape(-1,1)))
```
Now we form the basis attached to the body. The first versor $\hat{\boldsymbol{e_1}}$ will be a versor from the midpoint between the medial malleoli and the center of mass of the body. The second versor $\hat{\boldsymbol{e_2}}$ will be a versor from the right to the left malleolus. The third versor $\hat{\boldsymbol{e_3}}$ will be a cross product between $\hat{\boldsymbol{e_1}}$ and $\hat{\boldsymbol{e_2}}$.
```
e1 = rcm - (rlA+rrA)/2
e1 = e1/np.linalg.norm(e1,axis=1,keepdims=True)
e2 = rlA-rrA
e2 = e2/np.linalg.norm(e2,axis=1,keepdims=True)
e3 = np.cross(e1,e2,axis=1)
e3 = e3/np.linalg.norm(e3,axis=1,keepdims=True)
e2 = np.cross(e3,e1, axis=1)
e2 = e2/np.linalg.norm(e2,axis=1,keepdims=True)
```
Now we can find the angular velocity $\vec{\boldsymbol{\omega}}$ at the basis attached to the body using Eq.\eqref{eq:angvel} and the time derivatives of its components.
```
de1dt = (e1[2:,:]-e1[0:-2,:])/(2*dt)
de2dt = (e2[2:,:]-e2[0:-2,:])/(2*dt)
de3dt = (e3[2:,:]-e3[0:-2,:])/(2*dt)
omega = np.hstack((np.sum(de2dt*e3[1:-1,:], axis = 1).reshape(-1,1),
np.sum(de3dt*e1[1:-1,:], axis = 1).reshape(-1,1),
np.sum(de1dt*e2[1:-1,:], axis = 1).reshape(-1,1)))
alpha = (omega[2:,:]-omega[0:-2,:])/(2*dt)
```
Now we need to find the moment caused by the force at the ankles $\vec{\boldsymbol{F_A}}$, $\vec{\boldsymbol{MFAcm}} = \vec{\boldsymbol{r_{A/cm}}} \times \vec{\boldsymbol{F_A}}$. The moment-arm $\vec{\boldsymbol{r_{A/cm}}}$ is the vector from the center of mass to the midpoint of the lateral malleoli.
Besides the description of the moment due to the force $\vec{\boldsymbol{F_A}}$ in the basis attached to the body, we will also describe the force $\vec{\boldsymbol{F_A}}$ at the local basis. This is useful because it has an anatomical meaning. After this we can use the equations of Newton-Euler to obtain the moment at the ankle.
After having all signals described in the basis of the body, the moment being applied by the muscles of the ankle is computed using Eq. \eqref{eq:derivangmomVec}.
```
#computing the moment due to the ankle force
racm = (rlA+rrA)/2-rcm
MFAcm = np.cross(racm[0:FA.shape[0],:], FA, axis =1)
MFAcmRotated = np.zeros_like(MFAcm)
FARotated = np.zeros_like(MFAcm)
# rotation matrix and description of the ankle force and moment due to the ankle force
for i in range(MFAcm.shape[0]):
R = np.vstack((e1[i,:],e2[i,:],e3[i,:]))
MFAcmRotated[i,:]=R@MFAcm[i,:]
FARotated[i,:]=R@FA[i,:]
# Second Newton-Euler law to obtain the ankle moment
I = np.array([[I1,0,0],[0,I2,0],[0,0,I3]])
MA = ([email protected]).T + np.cross(omega[0:alpha.shape[0],:], (I@omega[0:alpha.shape[0],:].T).T,axis=1) - MFAcmRotated
plt.figure()
plt.plot(t[2:-2], MA)
plt.legend(('longitudinal','sagittal','mediolateral'))
plt.title('Ankle Torque')
plt.xlabel('t (s)')
plt.ylabel('M (N.m)')
plt.show()
plt.figure()
plt.plot(t[2:-2], FARotated[:,0])
plt.plot(t[2:-2], FARotated[:,1])
plt.plot(t[2:-2], FARotated[:,2])
plt.title('Ankle Force')
plt.legend(('longitudinal','sagittal','mediolateral'))
plt.xlabel('t (s)')
plt.ylabel('F (N)')
plt.show()
```
## Problems
Compute the derivative of the angular momentum of the foot and the leg using one of the following data acquired during the gait of a subject: ['../data/BiomecII2018_gait_d.txt'](../data/BiomecII2018_gait_d.txt) or ['../data/BiomecII2018_gait_n.txt'](../data/BiomecII2018_gait_n.txt).
## References
- Beer, F P; Johnston, E R; Cornwell, P. J.(2010) Vector Mechanics for Enginners: Dynamics.
- Kane T, Levinson D (1985) [Dynamics: Theory and Applications](https://ecommons.cornell.edu/handle/1813/638). McGraw-Hill, Inc
- Hibbeler, R. C. (2005) Engineering Mechanics: Dynamics.
- Taylor, J, R (2005) Classical Mechanics
- Winter D. A., (2009) Biomechanics and motor control of human movement. John Wiley and Sons.
- Santos DA, Fukuchi CA, Fukuchi RK, Duarte M. (2017) A data set with kinematic and ground reaction forces of human balance. PeerJ Preprints.
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
%matplotlib notebook
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
data = np.loadtxt('../data/3dPendulum.txt', skiprows=1, delimiter = ',')
m = 1
g= 9.81
l = 1
r = 0.1
I1 = m*r**2/12
I2 = m*(3*r**2+l**2)/12
I3 = I2
t = data[:,0]
m1 = data[:,1:4]
m2 = data[:,4:7]
m3 = data[:,7:]
dt = t[1]-t[0]
rcm = m2
vcm = (rcm[2:,:]-rcm[0:-2,:])/(2*dt)
acm = (vcm[2:,:]-vcm[0:-2,:])/(2*dt)
Fox = m*acm[:,0]
Foy = m*acm[:,1]
Foz = m*acm[:,2] + m*g
Fo=np.hstack((Fox.reshape(-1,1),Foy.reshape(-1,1),Foz.reshape(-1,1)))
plt.figure()
plt.plot(t[0:acm.shape[0]], Fox)
plt.plot(t[0:acm.shape[0]], Foy,'--')
plt.plot(t[0:acm.shape[0]], Foz)
plt.legend(('x','y','z'))
plt.title('Force (N)')
plt.show()
e1 = m2 - m1
e1 = e1/np.linalg.norm(e1,axis=1,keepdims=True)
e2 = m3-m2
e2 = e2/np.linalg.norm(e2,axis=1,keepdims=True)
e3 = np.cross(e1,e2,axis=1)
e3 = e3/np.linalg.norm(e3,axis=1,keepdims=True)
e2 = np.cross(e3,e1, axis=1)
e2 = e2/np.linalg.norm(e2,axis=1,keepdims=True)
de1dt = (e1[2:,:]-e1[0:-2,:])/(2*dt)
de2dt = (e2[2:,:]-e2[0:-2,:])/(2*dt)
de3dt = (e3[2:,:]-e3[0:-2,:])/(2*dt)
omega = np.hstack((np.sum(de2dt*e3[1:-1,:], axis = 1).reshape(-1,1),
np.sum(de3dt*e1[1:-1,:], axis = 1).reshape(-1,1),
np.sum(de1dt*e2[1:-1,:], axis = 1).reshape(-1,1)))
alpha = (omega[2:,:]-omega[0:-2,:])/(2*dt)
MFocm = np.cross(-rcm[2:-2], Fo, axis = 1)
MFocmRotated = np.zeros_like(MFocm)
for i in range(MFocm.shape[0]):
R = np.vstack((e1[i,:],e2[i,:],e3[i,:]))
MFocmRotated[i,:]=R@MFocm[i,:]
Mo1 = I1*alpha[:,0] + omega[0:alpha.shape[0],1]*omega[0:alpha.shape[0],2]*(I3-I2) - MFocmRotated[:,0]
Mo2 = I2*alpha[:,1] + omega[0:alpha.shape[0],0]*omega[0:alpha.shape[0],2]*(I1-I3) - MFocmRotated[:,1]
Mo3 = I3*alpha[:,2] + omega[0:alpha.shape[0],0]*omega[0:alpha.shape[0],1]*(I2-I1) - MFocmRotated[:,2]
plt.figure()
plt.plot(t[2:-2], Mo1)
plt.plot(t[2:-2], Mo2)
plt.plot(t[2:-2], Mo3)
plt.legend(('$e_1$','$e_2$','$e_3$'))
plt.show()
I = np.array([[I1,0,0],[0,I2,0],[0,0,I3]])
Mo = ([email protected]).T + np.cross(omega[0:alpha.shape[0],:], (I@omega[0:alpha.shape[0],:].T).T,axis=1) - MFocmRotated
plt.figure()
plt.plot(t[2:-2], Mo)
plt.legend(('$e_1$','$e_2$','$e_3$'))
plt.show()
data = np.loadtxt('../data/postureData.txt', skiprows=1, delimiter = ',')
t = data[:,0]
dt = t[1]-t[2]
rcm = data[:,1:4] #center of mass
rrA = data[:,4:7] # Right lateral malleolus
rlA = data[:,7:] # Left lateral maleolus
m = 53
h = 1.65
r = np.mean(np.linalg.norm(rrA-rlA, axis = 1))/2
I1 = m*r**2/12 # longitudnal
I2 = m*(3*r**2+h**2)/12 # sagittal
I3 = I2 # transversal
# acceleration of the center of mass by deriving the center of mass twice
vcm = (rcm[2:,:]-rcm[0:-2,:])/(2*dt)
acm = (vcm[2:,:]-vcm[0:-2,:])/(2*dt)
FAx = m*acm[:,0]
FAy = m*acm[:,1] + m*g
FAz = m*acm[:,2]
FA=np.hstack((FAx.reshape(-1,1),FAy.reshape(-1,1),FAz.reshape(-1,1)))
e1 = rcm - (rlA+rrA)/2
e1 = e1/np.linalg.norm(e1,axis=1,keepdims=True)
e2 = rlA-rrA
e2 = e2/np.linalg.norm(e2,axis=1,keepdims=True)
e3 = np.cross(e1,e2,axis=1)
e3 = e3/np.linalg.norm(e3,axis=1,keepdims=True)
e2 = np.cross(e3,e1, axis=1)
e2 = e2/np.linalg.norm(e2,axis=1,keepdims=True)
de1dt = (e1[2:,:]-e1[0:-2,:])/(2*dt)
de2dt = (e2[2:,:]-e2[0:-2,:])/(2*dt)
de3dt = (e3[2:,:]-e3[0:-2,:])/(2*dt)
omega = np.hstack((np.sum(de2dt*e3[1:-1,:], axis = 1).reshape(-1,1),
np.sum(de3dt*e1[1:-1,:], axis = 1).reshape(-1,1),
np.sum(de1dt*e2[1:-1,:], axis = 1).reshape(-1,1)))
alpha = (omega[2:,:]-omega[0:-2,:])/(2*dt)
#computing the moment due to the ankle force
racm = (rlA+rrA)/2-rcm
MFAcm = np.cross(racm[0:FA.shape[0],:], FA, axis =1)
MFAcmRotated = np.zeros_like(MFAcm)
FARotated = np.zeros_like(MFAcm)
# rotation matrix and description of the ankle force and moment due to the ankle force
for i in range(MFAcm.shape[0]):
R = np.vstack((e1[i,:],e2[i,:],e3[i,:]))
MFAcmRotated[i,:]=R@MFAcm[i,:]
FARotated[i,:]=R@FA[i,:]
# Second Newton-Euler law to obtain the ankle moment
I = np.array([[I1,0,0],[0,I2,0],[0,0,I3]])
MA = ([email protected]).T + np.cross(omega[0:alpha.shape[0],:], (I@omega[0:alpha.shape[0],:].T).T,axis=1) - MFAcmRotated
plt.figure()
plt.plot(t[2:-2], MA)
plt.legend(('longitudinal','sagittal','mediolateral'))
plt.title('Ankle Torque')
plt.xlabel('t (s)')
plt.ylabel('M (N.m)')
plt.show()
plt.figure()
plt.plot(t[2:-2], FARotated[:,0])
plt.plot(t[2:-2], FARotated[:,1])
plt.plot(t[2:-2], FARotated[:,2])
plt.title('Ankle Force')
plt.legend(('longitudinal','sagittal','mediolateral'))
plt.xlabel('t (s)')
plt.ylabel('F (N)')
plt.show()
| 0.337968 | 0.966851 |
<a href="https://colab.research.google.com/github/AliaksandrSiarohin/first-order-model/blob/master/demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a><a href="https://kaggle.com/kernels/welcome?src=https://github.com/AliaksandrSiarohin/first-order-model/blob/master/demo.ipynb" target="_parent"><img alt="Kaggle" title="Open in Kaggle" src="https://kaggle.com/static/images/open-in-kaggle.svg"></a>
# Demo for paper "First Order Motion Model for Image Animation"
To try the demo, press the 2 play buttons in order and scroll to the bottom. Note that it may take several minutes to load.
```
!pip install ffmpy &> /dev/null
!git init -q .
!git remote add origin https://github.com/AliaksandrSiarohin/first-order-model
!git pull -q origin master
!git clone -q https://github.com/graphemecluster/first-order-model-demo demo
import IPython.display
import PIL.Image
import cv2
import imageio
import io
import ipywidgets
import numpy
import os.path
import requests
import skimage.transform
import warnings
from base64 import b64encode
from demo import load_checkpoints, make_animation
from ffmpy import FFmpeg
from google.colab import files, output
from IPython.display import HTML, Javascript
from skimage import img_as_ubyte
warnings.filterwarnings("ignore")
display(HTML("""
<style>
.widget-box > * {
flex-shrink: 0;
}
.widget-tab {
min-width: 0;
flex: 1 1 auto;
}
.widget-tab .p-TabBar-tabLabel {
font-size: 15px;
}
.widget-upload {
background-color: tan;
}
.widget-button {
font-size: 18px;
width: 160px;
height: 34px;
line-height: 34px;
}
.widget-dropdown {
width: 250px;
}
.widget-checkbox {
width: 650px;
}
.widget-checkbox + .widget-checkbox {
margin-top: -6px;
}
.input-widget .output_html {
text-align: center;
width: 266px;
height: 266px;
line-height: 266px;
color: lightgray;
font-size: 72px;
}
div.stream {
display: none;
}
.title {
font-size: 20px;
font-weight: bold;
margin: 12px 0 6px 0;
}
.warning {
display: none;
color: red;
margin-left: 10px;
}
.warn {
display: initial;
}
.resource {
cursor: pointer;
border: 1px solid gray;
margin: 5px;
width: 160px;
height: 160px;
min-width: 160px;
min-height: 160px;
max-width: 160px;
max-height: 160px;
-webkit-box-sizing: initial;
box-sizing: initial;
}
.resource:hover {
border: 6px solid crimson;
margin: 0;
}
.selected {
border: 6px solid seagreen;
margin: 0;
}
.input-widget {
width: 266px;
height: 266px;
border: 1px solid gray;
}
.input-button {
width: 268px;
font-size: 15px;
margin: 2px 0 0;
}
.output-widget {
width: 256px;
height: 256px;
border: 1px solid gray;
}
.output-button {
width: 258px;
font-size: 15px;
margin: 2px 0 0;
}
.uploaded {
width: 256px;
height: 256px;
border: 6px solid seagreen;
margin: 0;
}
.label-or {
align-self: center;
font-size: 20px;
margin: 16px;
}
.loading {
align-items: center;
width: fit-content;
}
.loader {
margin: 32px 0 16px 0;
width: 48px;
height: 48px;
min-width: 48px;
min-height: 48px;
max-width: 48px;
max-height: 48px;
border: 4px solid whitesmoke;
border-top-color: gray;
border-radius: 50%;
animation: spin 1.8s linear infinite;
}
.loading-label {
color: gray;
}
.comparison-widget {
width: 256px;
height: 256px;
border: 1px solid gray;
margin-left: 2px;
}
.comparison-label {
color: gray;
font-size: 14px;
text-align: center;
position: relative;
bottom: 3px;
}
@keyframes spin {
from { transform: rotate(0deg); }
to { transform: rotate(360deg); }
}
</style>
"""))
def thumbnail(file):
return imageio.get_reader(file, mode='I', format='FFMPEG').get_next_data()
def create_image(i, j):
image_widget = ipywidgets.Image(
value=open('demo/images/%d%d.png' % (i, j), 'rb').read(),
format='png'
)
image_widget.add_class('resource')
image_widget.add_class('resource-image')
image_widget.add_class('resource-image%d%d' % (i, j))
return image_widget
def create_video(i):
video_widget = ipywidgets.Image(
value=cv2.imencode('.png', cv2.cvtColor(thumbnail('demo/videos/%d.mp4' % i), cv2.COLOR_RGB2BGR))[1].tostring(),
format='png'
)
video_widget.add_class('resource')
video_widget.add_class('resource-video')
video_widget.add_class('resource-video%d' % i)
return video_widget
def create_title(title):
title_widget = ipywidgets.Label(title)
title_widget.add_class('title')
return title_widget
def download_output(button):
complete.layout.display = 'none'
loading.layout.display = ''
files.download('output.mp4')
loading.layout.display = 'none'
complete.layout.display = ''
def convert_output(button):
complete.layout.display = 'none'
loading.layout.display = ''
FFmpeg(inputs={'output.mp4': None}, outputs={'scaled.mp4': '-vf "scale=1080x1080:flags=lanczos,pad=1920:1080:420:0" -y'}).run()
files.download('scaled.mp4')
loading.layout.display = 'none'
complete.layout.display = ''
def back_to_main(button):
complete.layout.display = 'none'
main.layout.display = ''
label_or = ipywidgets.Label('or')
label_or.add_class('label-or')
image_titles = ['Peoples', 'Cartoons', 'Dolls', 'Game of Thrones', 'Statues']
image_lengths = [8, 4, 8, 9, 4]
image_tab = ipywidgets.Tab()
image_tab.children = [ipywidgets.HBox([create_image(i, j) for j in range(length)]) for i, length in enumerate(image_lengths)]
for i, title in enumerate(image_titles):
image_tab.set_title(i, title)
input_image_widget = ipywidgets.Output()
input_image_widget.add_class('input-widget')
upload_input_image_button = ipywidgets.FileUpload(accept='image/*', button_style='primary')
upload_input_image_button.add_class('input-button')
image_part = ipywidgets.HBox([
ipywidgets.VBox([input_image_widget, upload_input_image_button]),
label_or,
image_tab
])
video_tab = ipywidgets.Tab()
video_tab.children = [ipywidgets.HBox([create_video(i) for i in range(5)])]
video_tab.set_title(0, 'All Videos')
input_video_widget = ipywidgets.Output()
input_video_widget.add_class('input-widget')
upload_input_video_button = ipywidgets.FileUpload(accept='video/*', button_style='primary')
upload_input_video_button.add_class('input-button')
video_part = ipywidgets.HBox([
ipywidgets.VBox([input_video_widget, upload_input_video_button]),
label_or,
video_tab
])
model = ipywidgets.Dropdown(
description="Model:",
options=[
'vox',
'vox-adv',
'taichi',
'taichi-adv',
'nemo',
'mgif',
'fashion',
'bair'
]
)
warning = ipywidgets.HTML('<b>Warning:</b> Upload your own images and videos (see README)')
warning.add_class('warning')
model_part = ipywidgets.HBox([model, warning])
relative = ipywidgets.Checkbox(description="Relative keypoint displacement (Inherit object proporions from the video)", value=True)
adapt_movement_scale = ipywidgets.Checkbox(description="Adapt movement scale (Don’t touch unless you know want you are doing)", value=True)
generate_button = ipywidgets.Button(description="Generate", button_style='primary')
main = ipywidgets.VBox([
create_title('Choose Image'),
image_part,
create_title('Choose Video'),
video_part,
create_title('Settings'),
model_part,
relative,
adapt_movement_scale,
generate_button
])
loader = ipywidgets.Label()
loader.add_class("loader")
loading_label = ipywidgets.Label("This may take several minutes to process…")
loading_label.add_class("loading-label")
loading = ipywidgets.VBox([loader, loading_label])
loading.add_class('loading')
output_widget = ipywidgets.Output()
output_widget.add_class('output-widget')
download = ipywidgets.Button(description='Download', button_style='primary')
download.add_class('output-button')
download.on_click(download_output)
convert = ipywidgets.Button(description='Convert to 1920×1080', button_style='primary')
convert.add_class('output-button')
convert.on_click(convert_output)
back = ipywidgets.Button(description='Back', button_style='primary')
back.add_class('output-button')
back.on_click(back_to_main)
comparison_widget = ipywidgets.Output()
comparison_widget.add_class('comparison-widget')
comparison_label = ipywidgets.Label('Comparison')
comparison_label.add_class('comparison-label')
complete = ipywidgets.HBox([
ipywidgets.VBox([output_widget, download, convert, back]),
ipywidgets.VBox([comparison_widget, comparison_label])
])
display(ipywidgets.VBox([main, loading, complete]))
display(Javascript("""
var images, videos;
function deselectImages() {
images.forEach(function(item) {
item.classList.remove("selected");
});
}
function deselectVideos() {
videos.forEach(function(item) {
item.classList.remove("selected");
});
}
function invokePython(func) {
google.colab.kernel.invokeFunction("notebook." + func, [].slice.call(arguments, 1), {});
}
setTimeout(function() {
(images = [].slice.call(document.getElementsByClassName("resource-image"))).forEach(function(item) {
item.addEventListener("click", function() {
deselectImages();
item.classList.add("selected");
invokePython("select_image", item.className.match(/resource-image(\d\d)/)[1]);
});
});
images[0].classList.add("selected");
(videos = [].slice.call(document.getElementsByClassName("resource-video"))).forEach(function(item) {
item.addEventListener("click", function() {
deselectVideos();
item.classList.add("selected");
invokePython("select_video", item.className.match(/resource-video(\d)/)[1]);
});
});
videos[0].classList.add("selected");
}, 1000);
"""))
selected_image = None
def select_image(filename):
global selected_image
selected_image = resize(PIL.Image.open('demo/images/%s.png' % filename).convert("RGB"))
input_image_widget.clear_output(wait=True)
with input_image_widget:
display(HTML('Image'))
input_image_widget.remove_class('uploaded')
output.register_callback("notebook.select_image", select_image)
selected_video = None
def select_video(filename):
global selected_video
selected_video = 'demo/videos/%s.mp4' % filename
input_video_widget.clear_output(wait=True)
with input_video_widget:
display(HTML('Video'))
input_video_widget.remove_class('uploaded')
output.register_callback("notebook.select_video", select_video)
def resize(image, size=(256, 256)):
w, h = image.size
d = min(w, h)
r = ((w - d) // 2, (h - d) // 2, (w + d) // 2, (h + d) // 2)
return image.resize(size, resample=PIL.Image.LANCZOS, box=r)
def upload_image(change):
global selected_image
for name, file_info in upload_input_image_button.value.items():
content = file_info['content']
if content is not None:
selected_image = resize(PIL.Image.open(io.BytesIO(content)).convert("RGB"))
input_image_widget.clear_output(wait=True)
with input_image_widget:
display(selected_image)
input_image_widget.add_class('uploaded')
display(Javascript('deselectImages()'))
upload_input_image_button.observe(upload_image, names='value')
def upload_video(change):
global selected_video
for name, file_info in upload_input_video_button.value.items():
content = file_info['content']
if content is not None:
selected_video = content
preview = resize(PIL.Image.fromarray(thumbnail(selected_video)).convert("RGB"))
input_video_widget.clear_output(wait=True)
with input_video_widget:
display(preview)
input_video_widget.add_class('uploaded')
display(Javascript('deselectVideos()'))
upload_input_video_button.observe(upload_video, names='value')
def change_model(change):
if model.value.startswith('vox'):
warning.remove_class('warn')
else:
warning.add_class('warn')
model.observe(change_model, names='value')
def generate(button):
main.layout.display = 'none'
loading.layout.display = ''
filename = model.value + ('' if model.value == 'fashion' else '-cpk') + '.pth.tar'
if not os.path.isfile(filename):
download = requests.get(requests.get('https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key=https://yadi.sk/d/lEw8uRm140L_eQ&path=/' + filename).json().get('href'))
with open(filename, 'wb') as checkpoint:
checkpoint.write(download.content)
reader = imageio.get_reader(selected_video, mode='I', format='FFMPEG')
fps = reader.get_meta_data()['fps']
driving_video = []
for frame in reader:
driving_video.append(frame)
generator, kp_detector = load_checkpoints(config_path='config/%s-256.yaml' % model.value, checkpoint_path=filename)
predictions = make_animation(
skimage.transform.resize(numpy.asarray(selected_image), (256, 256)),
[skimage.transform.resize(frame, (256, 256)) for frame in driving_video],
generator,
kp_detector,
relative=relative.value,
adapt_movement_scale=adapt_movement_scale.value
)
if selected_video == 'demo/videos/0.mp4':
imageio.mimsave('temp.mp4', [img_as_ubyte(frame) for frame in predictions], fps=fps)
FFmpeg(inputs={'temp.mp4': None, selected_video: None}, outputs={'output.mp4': '-c copy -y'}).run()
else:
imageio.mimsave('output.mp4', [img_as_ubyte(frame) for frame in predictions], fps=fps)
loading.layout.display = 'none'
complete.layout.display = ''
with output_widget:
display(HTML('<video id="left" controls src="data:video/mp4;base64,%s" />' % b64encode(open('output.mp4', 'rb').read()).decode()))
with comparison_widget:
display(HTML('<video id="right" src="data:video/mp4;base64,%s" />' % b64encode(open(selected_video, 'rb').read() if type(selected_video) is str else selected_video).decode()))
display(Javascript("""
(function(left, right) {
left.addEventListener("play", function() {
right.play();
});
left.addEventListener("pause", function() {
right.pause();
});
left.addEventListener("seeking", function() {
right.currentTime = left.currentTime;
});
})(document.getElementById("left"), document.getElementById("right"));
"""))
generate_button.on_click(generate)
loading.layout.display = 'none'
complete.layout.display = 'none'
select_image('00')
select_video('0')
```
|
github_jupyter
|
!pip install ffmpy &> /dev/null
!git init -q .
!git remote add origin https://github.com/AliaksandrSiarohin/first-order-model
!git pull -q origin master
!git clone -q https://github.com/graphemecluster/first-order-model-demo demo
import IPython.display
import PIL.Image
import cv2
import imageio
import io
import ipywidgets
import numpy
import os.path
import requests
import skimage.transform
import warnings
from base64 import b64encode
from demo import load_checkpoints, make_animation
from ffmpy import FFmpeg
from google.colab import files, output
from IPython.display import HTML, Javascript
from skimage import img_as_ubyte
warnings.filterwarnings("ignore")
display(HTML("""
<style>
.widget-box > * {
flex-shrink: 0;
}
.widget-tab {
min-width: 0;
flex: 1 1 auto;
}
.widget-tab .p-TabBar-tabLabel {
font-size: 15px;
}
.widget-upload {
background-color: tan;
}
.widget-button {
font-size: 18px;
width: 160px;
height: 34px;
line-height: 34px;
}
.widget-dropdown {
width: 250px;
}
.widget-checkbox {
width: 650px;
}
.widget-checkbox + .widget-checkbox {
margin-top: -6px;
}
.input-widget .output_html {
text-align: center;
width: 266px;
height: 266px;
line-height: 266px;
color: lightgray;
font-size: 72px;
}
div.stream {
display: none;
}
.title {
font-size: 20px;
font-weight: bold;
margin: 12px 0 6px 0;
}
.warning {
display: none;
color: red;
margin-left: 10px;
}
.warn {
display: initial;
}
.resource {
cursor: pointer;
border: 1px solid gray;
margin: 5px;
width: 160px;
height: 160px;
min-width: 160px;
min-height: 160px;
max-width: 160px;
max-height: 160px;
-webkit-box-sizing: initial;
box-sizing: initial;
}
.resource:hover {
border: 6px solid crimson;
margin: 0;
}
.selected {
border: 6px solid seagreen;
margin: 0;
}
.input-widget {
width: 266px;
height: 266px;
border: 1px solid gray;
}
.input-button {
width: 268px;
font-size: 15px;
margin: 2px 0 0;
}
.output-widget {
width: 256px;
height: 256px;
border: 1px solid gray;
}
.output-button {
width: 258px;
font-size: 15px;
margin: 2px 0 0;
}
.uploaded {
width: 256px;
height: 256px;
border: 6px solid seagreen;
margin: 0;
}
.label-or {
align-self: center;
font-size: 20px;
margin: 16px;
}
.loading {
align-items: center;
width: fit-content;
}
.loader {
margin: 32px 0 16px 0;
width: 48px;
height: 48px;
min-width: 48px;
min-height: 48px;
max-width: 48px;
max-height: 48px;
border: 4px solid whitesmoke;
border-top-color: gray;
border-radius: 50%;
animation: spin 1.8s linear infinite;
}
.loading-label {
color: gray;
}
.comparison-widget {
width: 256px;
height: 256px;
border: 1px solid gray;
margin-left: 2px;
}
.comparison-label {
color: gray;
font-size: 14px;
text-align: center;
position: relative;
bottom: 3px;
}
@keyframes spin {
from { transform: rotate(0deg); }
to { transform: rotate(360deg); }
}
</style>
"""))
def thumbnail(file):
return imageio.get_reader(file, mode='I', format='FFMPEG').get_next_data()
def create_image(i, j):
image_widget = ipywidgets.Image(
value=open('demo/images/%d%d.png' % (i, j), 'rb').read(),
format='png'
)
image_widget.add_class('resource')
image_widget.add_class('resource-image')
image_widget.add_class('resource-image%d%d' % (i, j))
return image_widget
def create_video(i):
video_widget = ipywidgets.Image(
value=cv2.imencode('.png', cv2.cvtColor(thumbnail('demo/videos/%d.mp4' % i), cv2.COLOR_RGB2BGR))[1].tostring(),
format='png'
)
video_widget.add_class('resource')
video_widget.add_class('resource-video')
video_widget.add_class('resource-video%d' % i)
return video_widget
def create_title(title):
title_widget = ipywidgets.Label(title)
title_widget.add_class('title')
return title_widget
def download_output(button):
complete.layout.display = 'none'
loading.layout.display = ''
files.download('output.mp4')
loading.layout.display = 'none'
complete.layout.display = ''
def convert_output(button):
complete.layout.display = 'none'
loading.layout.display = ''
FFmpeg(inputs={'output.mp4': None}, outputs={'scaled.mp4': '-vf "scale=1080x1080:flags=lanczos,pad=1920:1080:420:0" -y'}).run()
files.download('scaled.mp4')
loading.layout.display = 'none'
complete.layout.display = ''
def back_to_main(button):
complete.layout.display = 'none'
main.layout.display = ''
label_or = ipywidgets.Label('or')
label_or.add_class('label-or')
image_titles = ['Peoples', 'Cartoons', 'Dolls', 'Game of Thrones', 'Statues']
image_lengths = [8, 4, 8, 9, 4]
image_tab = ipywidgets.Tab()
image_tab.children = [ipywidgets.HBox([create_image(i, j) for j in range(length)]) for i, length in enumerate(image_lengths)]
for i, title in enumerate(image_titles):
image_tab.set_title(i, title)
input_image_widget = ipywidgets.Output()
input_image_widget.add_class('input-widget')
upload_input_image_button = ipywidgets.FileUpload(accept='image/*', button_style='primary')
upload_input_image_button.add_class('input-button')
image_part = ipywidgets.HBox([
ipywidgets.VBox([input_image_widget, upload_input_image_button]),
label_or,
image_tab
])
video_tab = ipywidgets.Tab()
video_tab.children = [ipywidgets.HBox([create_video(i) for i in range(5)])]
video_tab.set_title(0, 'All Videos')
input_video_widget = ipywidgets.Output()
input_video_widget.add_class('input-widget')
upload_input_video_button = ipywidgets.FileUpload(accept='video/*', button_style='primary')
upload_input_video_button.add_class('input-button')
video_part = ipywidgets.HBox([
ipywidgets.VBox([input_video_widget, upload_input_video_button]),
label_or,
video_tab
])
model = ipywidgets.Dropdown(
description="Model:",
options=[
'vox',
'vox-adv',
'taichi',
'taichi-adv',
'nemo',
'mgif',
'fashion',
'bair'
]
)
warning = ipywidgets.HTML('<b>Warning:</b> Upload your own images and videos (see README)')
warning.add_class('warning')
model_part = ipywidgets.HBox([model, warning])
relative = ipywidgets.Checkbox(description="Relative keypoint displacement (Inherit object proporions from the video)", value=True)
adapt_movement_scale = ipywidgets.Checkbox(description="Adapt movement scale (Don’t touch unless you know want you are doing)", value=True)
generate_button = ipywidgets.Button(description="Generate", button_style='primary')
main = ipywidgets.VBox([
create_title('Choose Image'),
image_part,
create_title('Choose Video'),
video_part,
create_title('Settings'),
model_part,
relative,
adapt_movement_scale,
generate_button
])
loader = ipywidgets.Label()
loader.add_class("loader")
loading_label = ipywidgets.Label("This may take several minutes to process…")
loading_label.add_class("loading-label")
loading = ipywidgets.VBox([loader, loading_label])
loading.add_class('loading')
output_widget = ipywidgets.Output()
output_widget.add_class('output-widget')
download = ipywidgets.Button(description='Download', button_style='primary')
download.add_class('output-button')
download.on_click(download_output)
convert = ipywidgets.Button(description='Convert to 1920×1080', button_style='primary')
convert.add_class('output-button')
convert.on_click(convert_output)
back = ipywidgets.Button(description='Back', button_style='primary')
back.add_class('output-button')
back.on_click(back_to_main)
comparison_widget = ipywidgets.Output()
comparison_widget.add_class('comparison-widget')
comparison_label = ipywidgets.Label('Comparison')
comparison_label.add_class('comparison-label')
complete = ipywidgets.HBox([
ipywidgets.VBox([output_widget, download, convert, back]),
ipywidgets.VBox([comparison_widget, comparison_label])
])
display(ipywidgets.VBox([main, loading, complete]))
display(Javascript("""
var images, videos;
function deselectImages() {
images.forEach(function(item) {
item.classList.remove("selected");
});
}
function deselectVideos() {
videos.forEach(function(item) {
item.classList.remove("selected");
});
}
function invokePython(func) {
google.colab.kernel.invokeFunction("notebook." + func, [].slice.call(arguments, 1), {});
}
setTimeout(function() {
(images = [].slice.call(document.getElementsByClassName("resource-image"))).forEach(function(item) {
item.addEventListener("click", function() {
deselectImages();
item.classList.add("selected");
invokePython("select_image", item.className.match(/resource-image(\d\d)/)[1]);
});
});
images[0].classList.add("selected");
(videos = [].slice.call(document.getElementsByClassName("resource-video"))).forEach(function(item) {
item.addEventListener("click", function() {
deselectVideos();
item.classList.add("selected");
invokePython("select_video", item.className.match(/resource-video(\d)/)[1]);
});
});
videos[0].classList.add("selected");
}, 1000);
"""))
selected_image = None
def select_image(filename):
global selected_image
selected_image = resize(PIL.Image.open('demo/images/%s.png' % filename).convert("RGB"))
input_image_widget.clear_output(wait=True)
with input_image_widget:
display(HTML('Image'))
input_image_widget.remove_class('uploaded')
output.register_callback("notebook.select_image", select_image)
selected_video = None
def select_video(filename):
global selected_video
selected_video = 'demo/videos/%s.mp4' % filename
input_video_widget.clear_output(wait=True)
with input_video_widget:
display(HTML('Video'))
input_video_widget.remove_class('uploaded')
output.register_callback("notebook.select_video", select_video)
def resize(image, size=(256, 256)):
w, h = image.size
d = min(w, h)
r = ((w - d) // 2, (h - d) // 2, (w + d) // 2, (h + d) // 2)
return image.resize(size, resample=PIL.Image.LANCZOS, box=r)
def upload_image(change):
global selected_image
for name, file_info in upload_input_image_button.value.items():
content = file_info['content']
if content is not None:
selected_image = resize(PIL.Image.open(io.BytesIO(content)).convert("RGB"))
input_image_widget.clear_output(wait=True)
with input_image_widget:
display(selected_image)
input_image_widget.add_class('uploaded')
display(Javascript('deselectImages()'))
upload_input_image_button.observe(upload_image, names='value')
def upload_video(change):
global selected_video
for name, file_info in upload_input_video_button.value.items():
content = file_info['content']
if content is not None:
selected_video = content
preview = resize(PIL.Image.fromarray(thumbnail(selected_video)).convert("RGB"))
input_video_widget.clear_output(wait=True)
with input_video_widget:
display(preview)
input_video_widget.add_class('uploaded')
display(Javascript('deselectVideos()'))
upload_input_video_button.observe(upload_video, names='value')
def change_model(change):
if model.value.startswith('vox'):
warning.remove_class('warn')
else:
warning.add_class('warn')
model.observe(change_model, names='value')
def generate(button):
main.layout.display = 'none'
loading.layout.display = ''
filename = model.value + ('' if model.value == 'fashion' else '-cpk') + '.pth.tar'
if not os.path.isfile(filename):
download = requests.get(requests.get('https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key=https://yadi.sk/d/lEw8uRm140L_eQ&path=/' + filename).json().get('href'))
with open(filename, 'wb') as checkpoint:
checkpoint.write(download.content)
reader = imageio.get_reader(selected_video, mode='I', format='FFMPEG')
fps = reader.get_meta_data()['fps']
driving_video = []
for frame in reader:
driving_video.append(frame)
generator, kp_detector = load_checkpoints(config_path='config/%s-256.yaml' % model.value, checkpoint_path=filename)
predictions = make_animation(
skimage.transform.resize(numpy.asarray(selected_image), (256, 256)),
[skimage.transform.resize(frame, (256, 256)) for frame in driving_video],
generator,
kp_detector,
relative=relative.value,
adapt_movement_scale=adapt_movement_scale.value
)
if selected_video == 'demo/videos/0.mp4':
imageio.mimsave('temp.mp4', [img_as_ubyte(frame) for frame in predictions], fps=fps)
FFmpeg(inputs={'temp.mp4': None, selected_video: None}, outputs={'output.mp4': '-c copy -y'}).run()
else:
imageio.mimsave('output.mp4', [img_as_ubyte(frame) for frame in predictions], fps=fps)
loading.layout.display = 'none'
complete.layout.display = ''
with output_widget:
display(HTML('<video id="left" controls src="data:video/mp4;base64,%s" />' % b64encode(open('output.mp4', 'rb').read()).decode()))
with comparison_widget:
display(HTML('<video id="right" src="data:video/mp4;base64,%s" />' % b64encode(open(selected_video, 'rb').read() if type(selected_video) is str else selected_video).decode()))
display(Javascript("""
(function(left, right) {
left.addEventListener("play", function() {
right.play();
});
left.addEventListener("pause", function() {
right.pause();
});
left.addEventListener("seeking", function() {
right.currentTime = left.currentTime;
});
})(document.getElementById("left"), document.getElementById("right"));
"""))
generate_button.on_click(generate)
loading.layout.display = 'none'
complete.layout.display = 'none'
select_image('00')
select_video('0')
| 0.28597 | 0.607634 |
# Notebook01: Compute Order Parameters
In this notebook, we'll show you how to do the complete pipeline from downloading a united-atom trajectory file (Berger POPC) from Zenodo, reconstructing hydrogens and calculating the order parameters using **buildH**. In addition, we show how to plot the calculated order parameters using matplotlib.
## Foreword
### Launching Unix commands within a Jupyter Notebook
Even though **buildH** can be used as a module (see [Notebook04](Notebook_04_library.ipynb)), it is mainly intended to be used in the Unix command line in a terminal. Thus, many cells in this notebook will run Unix commands instead of Python instructions. All Unix commands are prefixed with the symbol `!`. For example:
```
!ls
```
### Checking buildH installation
First, you have to install **buildH**. For that you can follow the instructions on the main [README](https://github.com/patrickfuchs/buildH/blob/master/README.md) of the **buildH** repository or the [installation help page](../installation.md). If you did not install **buildH**, this notebook will not work.
Before launching this notebook, you should have activated the conda environment from **buildH** (the `$` below corresponds to you prompt) :
```
$ conda activate buildh
```
If this command worked, you should have got the message `(buildh)`
at the beginning of your prompt stating that the buildh environment has been properly activated.
Once done, you have launched this Jupyter notebook :
```
$ jupyter lab
```
At this point, we should be able to launch **buildH** within this notebook:
```
!buildH
```
If you do not get this usage message and have some error, you probably did not activate correctly the **buildH** conda environment before launching this notebook. One other possibility is that the installation of the **buildH** environment did not succeed. In such a case, please refer to **buildH** installation.
## Trajectory download
Most of the time, you will use **buildH** on trajectories that you have produced. Here, for the sake of learning, let us download a [trajectory](https://doi.org/10.5281/zenodo.13279) of a Berger POPC membrane from Zenodo (taken from NMRlipidsI project). This should take a while for the trr file, so be patient!
```
!wget https://zenodo.org/record/13279/files/popc0-25ns.trr
!wget https://zenodo.org/record/13279/files/endCONF.gro
!ls
```
OK the 2 files freshly downloaded are here. Now we need one last file. It's a definition file relating generic names for each C-H bond to the atom names in the PDB file. In **buildH**, there is a specific directory [def_files](https://github.com/patrickfuchs/buildH/tree/master/def_files) from which you can download such a def file. (note that you can find some also on the [NMRlipids repo](https://github.com/NMRLipids/MATCH/tree/master/scripts/orderParm_defs)). Here we want to download the `.def` file corresponding to POPC Berger :
```
!wget https://raw.githubusercontent.com/patrickfuchs/buildH/master/def_files/Berger_POPC.def
!ls
```
Let us have a look to this def file:
```
!head Berger_POPC.def
```
The first column is the generic C-H name, second is the residue (lipid) name, third column the carbon PDB name, last column the hydrogen PDB name. This file will tell **buildH** which Hs you want to rebuild, and then to calculate the order parameter on the corresponding C-Hs. It means that it is possible to provide only a subset of possible C-Hs if, for example, you only want those from the polar heads. Note that if you request **buildH** to output a trajectory (not done in this notebook), the def file must contain **all** possible C-Hs of the lipid considered. In such a case, the newly built H will have the name written in the 4th column of this def file.
Now, let's have a quick look to the `.gro` file:
```
!head endCONF.gro
```
We see that in this `.gro` file containing POPC lipids, the residue name is called `PLA`. This residue name will be of importance when launching **buildH**.
Great, all files were successfully downloaded, we can now proceed. Let us launch **buildH**!
## Launching **buildH** for order parameter calculation
Now, we can launch **buildH** on this downloaded trajectory. For that we need a few arguments :
- the `-c` argument corresponding to the coordinate file. It consists of a pdb or gro file, here `endCONF.gro`, **buildH** infers the topology of the system from it;
- the `-l` argument which tells which lipid we want to analyze, here `Berger_PLA` (`Berger` corresponds to the force field name used for this simulation, `PLA` is the residue name of the lipid we want to analyze in the pdb/gro file); note that if you simulated a mixture of different lipids, you will have to launch **buildH** separately for each lipid (see [Notebook03](Notebook_03_mixture_POPC_POPE.ipynb) and [Notebook05](Notebook_05_mixture_POPC_cholesterol.ipynb));
- the `-d` argument corresponding to the def file we just downloaded, here `Berger_POPC.def` ;
- the `-t` argument which is the trajectory file, here `popc0-25ns.trr`.
- the `-o` argument which tells **buildH** the output file name for storing the calculated order parameters.
Please be patient, this will take a while since we have 2500 frames. Fortunately, **buildH** geometrical calculations are accelerated using [Numba](https://numba.pydata.org/). On single core Xeon @ 3.60GHz, it took ~ 7'.
```
!buildH -c endCONF.gro -l Berger_PLA -d Berger_POPC.def -t popc0-25ns.trr -o OP_POPC_Berger_25ns.out
```
On the above cell, we launched **buildH** to show you how it works. For each frame handled by **buildH**, a message `Dealing with frame XXX at YYY ps.` is printed to the output. Because it takes some time and because it should have printed 2500 `Dealing with frame...`, we just pressed Ctrl-C to get this notebook not too much polluted by **buildH** output. This explains why there is a Python `KeyboardInterrupt` message.
Note that, you can also launch **buildH** in a regular shell that way:
```
$ buildH -c endCONF.gro -l Berger_PLA -d Berger_POPC.def -t popc0-25ns.trr -o OP_POPC_Berger_25ns.out
```
Or that way:
```
$ buildH -c endCONF.gro -l Berger_PLA -d Berger_POPC.def -t popc0-25ns.trr -o OP_POPC_Berger_25ns.out > /dev/null
```
In this case, all the messages `Dealing with frame...` go to `/dev/null`, thus no more lengthy output ;-)
Once completed, we should have a new file called `OP_POPC_Berger_25ns.out` containing the order parameters. You can choose any name with the option `-o` when you launch **buildH**. If you did not run **buildH** on the full trajectory, you can find that file [here](data/OP_POPC_Berger_25ns.out).
```
!ls
```
## Analyzing results
First, let's have a look to what this output file `OP_POPC_Berger_25ns.out` contains.
```
!head OP_POPC_Berger_25ns.out
```
For each C-H we get the mean order parameter `OP_mean` (average over frames and over residues), its standard deviation `OP_stddev` (standard deviation over residues), and its standard error of the mean `OP_stem` (`OP_stddev` divided by the square root of the number of residues). More information about how `OP_mean`, `OP_stddev` and `OP_stem` are computed can be found in the documentation: [Order parameters and statistics](../order_parameter.md)
Now we can make a plot of the order parameter. For that, we can use [Pandas](https://pandas.pydata.org/) and its very convenient [dataframes](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) to load **buildH** output. Pandas dataframes allow the selection of any row or column in a very powerful way. If you are not familiar with pandas dataframes, you can take a look [here](https://realpython.com/pandas-dataframe/). For reading **buildH** output and generate a dataframe, we use the Pandas function [.read_csv()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html) which has a great deal of options to read a file. Here we tell it to use a specific separator (`\s+` which means any combination of white spaces), to skip 2 lines at the beginning of the files (beginning with a `#`) and to assign specific names to each column.
```
import pandas as pd
df = pd.read_csv("OP_POPC_Berger_25ns.out", sep="\s+", skiprows=2,
names=["CH_name", "res", "carbon", "hydrogen", "OP", "STD", "STEM"])
df
```
If we want only the order parameter for the polar head we can use the following. Suffixing the dataframe with a list of lists `[["CH_name", "OP"]]` allows us to select the columns we are interested in. Then, with the `.iloc()` method we can get the 18 first rows corresponding to the polar head (beware it starts counting at 0 like all sequential objects in Python).
```
df[["CH_name", "OP"]].iloc[:18]
```
Let us do the same for the palmitoyl chain.
```
df[["CH_name", "OP"]].iloc[18:49]
```
And now, for the oleoyl chain.
```
df[["CH_name", "OP"]].iloc[49:]
```
## Plot of the order parameter
Now that we have loaded the data with pandas, and that we can select any part of the lipid, we can use the matplotlib module to make a nice plot. For example, we can plot the order parameter of the palmitoyl chain. Note that we select rows 18 to 45 to show only the first 15 carbons, since we do not want the final CH3 of carbon 16.
```
import numpy as np
import matplotlib.pyplot as plt
OPs = df["OP"].iloc[18:46]
x = np.arange(len(OPs))
plt.scatter(x, -OPs)
plt.xticks(x, df["CH_name"].iloc[18:46], rotation='vertical')
plt.xlabel("C-H name")
plt.ylabel("-S_CH")
plt.title("Order parameter palmitoyl chain, Berger POPC")
```
Another example with the polar head, on the alpha beta of the choline and glycerol C-Hs.
```
import numpy as np
import matplotlib.pyplot as plt
OPs = df["OP"].iloc[9:18]
STEMs = df["STEM"].iloc[9:18]
# These 2 avoid plotting a line between the points.
lines = {'linestyle': 'None'}
plt.rc('lines', **lines)
x = np.arange(len(OPs))
plt.errorbar(x, -OPs, STEMs, fmt='', marker='.')
plt.xticks(x, df["CH_name"].iloc[9:18], rotation='vertical')
plt.xlabel("C-H name")
plt.ylabel("-S_CH")
plt.title("Order parameter polar head, Berger POPC")
```
Since the error bars are very small, we used `marker='.'` to draw tiny points. If we use the standard `marker='o'`, the error bars are often not seen because they are smaller than the point itself.
## Conclusion
In this notebook, we showed you how to use **buildH** to build hydrogens from a united-atom trajectory and calculate the order parameter on it. Then, we showed some convenient use of pandas and matplotlib Python modules to select the data we are interested in and plot the corresponding results.
|
github_jupyter
|
!ls
$ conda activate buildh
$ jupyter lab
!buildH
!wget https://zenodo.org/record/13279/files/popc0-25ns.trr
!wget https://zenodo.org/record/13279/files/endCONF.gro
!ls
!wget https://raw.githubusercontent.com/patrickfuchs/buildH/master/def_files/Berger_POPC.def
!ls
!head Berger_POPC.def
!head endCONF.gro
!buildH -c endCONF.gro -l Berger_PLA -d Berger_POPC.def -t popc0-25ns.trr -o OP_POPC_Berger_25ns.out
$ buildH -c endCONF.gro -l Berger_PLA -d Berger_POPC.def -t popc0-25ns.trr -o OP_POPC_Berger_25ns.out
$ buildH -c endCONF.gro -l Berger_PLA -d Berger_POPC.def -t popc0-25ns.trr -o OP_POPC_Berger_25ns.out > /dev/null
!ls
!head OP_POPC_Berger_25ns.out
import pandas as pd
df = pd.read_csv("OP_POPC_Berger_25ns.out", sep="\s+", skiprows=2,
names=["CH_name", "res", "carbon", "hydrogen", "OP", "STD", "STEM"])
df
df[["CH_name", "OP"]].iloc[:18]
df[["CH_name", "OP"]].iloc[18:49]
df[["CH_name", "OP"]].iloc[49:]
import numpy as np
import matplotlib.pyplot as plt
OPs = df["OP"].iloc[18:46]
x = np.arange(len(OPs))
plt.scatter(x, -OPs)
plt.xticks(x, df["CH_name"].iloc[18:46], rotation='vertical')
plt.xlabel("C-H name")
plt.ylabel("-S_CH")
plt.title("Order parameter palmitoyl chain, Berger POPC")
import numpy as np
import matplotlib.pyplot as plt
OPs = df["OP"].iloc[9:18]
STEMs = df["STEM"].iloc[9:18]
# These 2 avoid plotting a line between the points.
lines = {'linestyle': 'None'}
plt.rc('lines', **lines)
x = np.arange(len(OPs))
plt.errorbar(x, -OPs, STEMs, fmt='', marker='.')
plt.xticks(x, df["CH_name"].iloc[9:18], rotation='vertical')
plt.xlabel("C-H name")
plt.ylabel("-S_CH")
plt.title("Order parameter polar head, Berger POPC")
| 0.547948 | 0.983231 |
```
import json
import pickle
from indra.databases.hgnc_client import get_hgnc_name
from indra.literature.adeft_tools import universal_extract_text
from indra_db.util.content_scripts import get_text_content_from_pmids
from indra_db.util.content_scripts import get_stmts_with_agent_text_like
from indra_db.util.content_scripts import get_text_content_from_stmt_ids
from adeft.discover import AdeftMiner
from adeft.gui import ground_with_gui
from adeft.modeling.label import AdeftLabeler
from adeft.modeling.classify import AdeftClassifier
from adeft.disambiguate import AdeftDisambiguator
from adeft_indra.ground import gilda_ground
shortform = 'BI'
stmts = get_stmts_with_agent_text_like(shortform)[shortform]
ids, content = get_text_content_from_stmt_ids(stmts)
shortform_texts = [universal_extract_text(text, contains=[shortform]) for text in content.values() if text]
miner = AdeftMiner(shortform)
miner.process_texts(shortform_texts)
```
It's then necessary to check if Acromine produced the correct results. We must fix errors manually
```
shortform_texts[5]
top = miner.top(15)
top
longforms = miner.get_longforms(cutoff=2.9)
longforms
longforms = [lf for i, lf in enumerate(longforms) if i in [1, 2, 3, 4, 5, 6, 7]]
longforms.extend([top[3], top[7], top[11]])
longforms.sort(key=lambda x: -x[1])
longforms
longforms, scores = zip(*longforms)
grounding_map = {}
for longform in longforms:
grounding = gilda_ground(longform)
if grounding[0]:
grounding_map[longform] = f'{grounding[0]}:{grounding[1]}'
grounding_map
result = ground_with_gui(longforms, scores, grounding_map=grounding_map)
result
grounding_map, names, pos_labels = ({'medetomidine': 'CHEBI:CHEBI:48552',
'mediator': 'ungrounded',
'mediterranean': 'ungrounded',
'metathesis electrodialysis': 'ungrounded',
'microendoscopic discectomy': 'ungrounded',
'minimal effective dose': 'ungrounded',
'minimal erythema dose': 'ungrounded',
'morphine equivalent dose': 'ungrounded',
'multiple epiphyseal dysplasia': 'MESH:D010009',
'mycoepoxydiene': 'PUBCHEM:11300750'},
{'MESH:D010009': 'Osteochondrodysplasias',
'PUBCHEM:11300750': 'Mycoepoxydiene'},
['CHEBI:CHEBI:48552', 'PUBCHEM:11300750'])
grounding_dict = {'MED': grounding_map}
classifier = AdeftClassifier('MED', pos_labels)
len(texts)
param_grid = {'C': [100.0], 'max_features': [1000]}
labeler = AdeftLabeler(grounding_dict)
corpus = labeler.build_from_texts(shortform_texts)
texts, labels = zip(*corpus)
classifier.cv(texts, labels, param_grid, cv=5, n_jobs=8)
classifier.stats
disamb = AdeftDisambiguator(classifier, grounding_dict, names)
disamb.disambiguate(texts[2])
disamb.dump('MED', '../results')
from adeft.disambiguate import load_disambiguator
d = load_disambiguator('HIR', '../results')
d.disambiguate(texts[0])
a = load_disambiguator('AR')
a.disambiguate('Androgen')
logit = d.classifier.estimator.named_steps['logit']
logit.classes_
```
|
github_jupyter
|
import json
import pickle
from indra.databases.hgnc_client import get_hgnc_name
from indra.literature.adeft_tools import universal_extract_text
from indra_db.util.content_scripts import get_text_content_from_pmids
from indra_db.util.content_scripts import get_stmts_with_agent_text_like
from indra_db.util.content_scripts import get_text_content_from_stmt_ids
from adeft.discover import AdeftMiner
from adeft.gui import ground_with_gui
from adeft.modeling.label import AdeftLabeler
from adeft.modeling.classify import AdeftClassifier
from adeft.disambiguate import AdeftDisambiguator
from adeft_indra.ground import gilda_ground
shortform = 'BI'
stmts = get_stmts_with_agent_text_like(shortform)[shortform]
ids, content = get_text_content_from_stmt_ids(stmts)
shortform_texts = [universal_extract_text(text, contains=[shortform]) for text in content.values() if text]
miner = AdeftMiner(shortform)
miner.process_texts(shortform_texts)
shortform_texts[5]
top = miner.top(15)
top
longforms = miner.get_longforms(cutoff=2.9)
longforms
longforms = [lf for i, lf in enumerate(longforms) if i in [1, 2, 3, 4, 5, 6, 7]]
longforms.extend([top[3], top[7], top[11]])
longforms.sort(key=lambda x: -x[1])
longforms
longforms, scores = zip(*longforms)
grounding_map = {}
for longform in longforms:
grounding = gilda_ground(longform)
if grounding[0]:
grounding_map[longform] = f'{grounding[0]}:{grounding[1]}'
grounding_map
result = ground_with_gui(longforms, scores, grounding_map=grounding_map)
result
grounding_map, names, pos_labels = ({'medetomidine': 'CHEBI:CHEBI:48552',
'mediator': 'ungrounded',
'mediterranean': 'ungrounded',
'metathesis electrodialysis': 'ungrounded',
'microendoscopic discectomy': 'ungrounded',
'minimal effective dose': 'ungrounded',
'minimal erythema dose': 'ungrounded',
'morphine equivalent dose': 'ungrounded',
'multiple epiphyseal dysplasia': 'MESH:D010009',
'mycoepoxydiene': 'PUBCHEM:11300750'},
{'MESH:D010009': 'Osteochondrodysplasias',
'PUBCHEM:11300750': 'Mycoepoxydiene'},
['CHEBI:CHEBI:48552', 'PUBCHEM:11300750'])
grounding_dict = {'MED': grounding_map}
classifier = AdeftClassifier('MED', pos_labels)
len(texts)
param_grid = {'C': [100.0], 'max_features': [1000]}
labeler = AdeftLabeler(grounding_dict)
corpus = labeler.build_from_texts(shortform_texts)
texts, labels = zip(*corpus)
classifier.cv(texts, labels, param_grid, cv=5, n_jobs=8)
classifier.stats
disamb = AdeftDisambiguator(classifier, grounding_dict, names)
disamb.disambiguate(texts[2])
disamb.dump('MED', '../results')
from adeft.disambiguate import load_disambiguator
d = load_disambiguator('HIR', '../results')
d.disambiguate(texts[0])
a = load_disambiguator('AR')
a.disambiguate('Androgen')
logit = d.classifier.estimator.named_steps['logit']
logit.classes_
| 0.282988 | 0.257158 |
This is an MRI based reconstruction demo, for 2D MRI data. The network is relatively similar to the AutoMap technique (https://arxiv.org/abs/1704.08841). This is a relatively 'brute force' aproach to image reconstruction in which the transoform is given no direct knowledge of the physics (although the network architecture is a bit tuned to the problem). In this work, we are assuming one direction is fully sampled (i.e. frequency encoded).
# MRI Sampling
In MRI the data is often discretely Fourier transoformed in one direction leading to the discretized signal model:
$s(k)=\sum_{j=1}^{N}\rho (x_j)e^{i2\pi kx}$
The expected reconstruction for fully sampled data is an inverse discrete Fourier transform:
$s(x)=\sum_{j=1}^{N}s(k_j)e^{i2\pi k_j x}$
# Questions to think about:
1) What is the minimal network architecture to compute a DFT? It's a square matrix multiply.
2) What is the apropriate loss function if we wish to train an image reconstruction?
3) What is the role of the convolutional layers? When are they needed?
4) What is the network learning if you train on simulated images?
# PyTorch
This exercise is written in PyTorch (https://pytorch.org/docs/stable/index.html). There is also a Keras version available online. PyTorch has some different coding constructs but there are similar ideas to Keras.
# Setup
```
'''
In python you need to import libraries in order to use them.
'''
# Using PyTorch for this code
import torch
# Utilities
import numpy as np
import math
# Plotting
import matplotlib.pyplot as plt
from IPython.display import clear_output
'''
We will define some functions to use later. Normally you might put these in another file and import them
'''
# Some support functions
def montage( img_in, size=(3,5) ):
for j in range(size[0]):
plot_image = img_in[0+size[1]*j,:,:]
for i in range(size[1]-1):
plot_image = np.concatenate((plot_image, img_in[1+i+size[1]*j,:,:]), axis=1)
if j == 0:
img = plot_image
else:
img = np.concatenate((img,plot_image),axis=0)
return img
def complex_to_channels( img_in):
return(np.stack(img_in.real,img_in.imag))
def channels_to_complex( img_in):
return(img_in[...,0]+1j*img_in[...,1])
'''
Mount your google drive, we'll grab data from the shared folder
'''
from google.colab import drive
drive.mount('/content/drive')
```
# Training Images
We are training this network using a simulation enviroment. Images are grabbed from a set of MRI brain images. This example is using the output dicoms which have been preprocessed. We then simulate the image to MRI raw data conversion.
```
# load training, validation, and testing data
import h5py
with h5py.File('/content/drive/MyDrive/ML4MI_BOOTCAMP_DATA/ImageReconstructionData.h5','r') as hf:
x_train = np.array(hf['x_train'])
x_val = np.array(hf['x_val'])
print(f'Validate Dataset Size {x_val.shape}')
print(f'Train Dataset Size {x_train.shape}')
N = x_train.shape[-2]
```
# Simulate Sampling
MRI data generation is aproximately dsicrete sampling of a continous Fourier transform the the data. In this example, we are using a Discrete Fourier transform to aproximate this. We also consider the case when we randomly remove data points. This would allow us to go faster and is used in compressed sensing application ( e.g. https://onlinelibrary.wiley.com/doi/pdf/10.1002/mrm.21391 ). Noise is added a complex, white, gaussian noise (MRI noise is so called Johnson/Nyquist noise). Things to try:
1) Add higher levels of noise. What happens to the training rate and output images?
2) Increase the undersampling rate. How does the neural network compare to traditional aproaches?
3) Comment the FFT shift, does the network still learn the transform?
```
'''
The creates a sampling mask which can be used to subsample the data.
'''
# Get the number of phase encodes
undersample_factor = 1.5
noise_level = 0.001
number_phase_encodes = int(N/undersample_factor)
print('Using ' + str(number_phase_encodes) + ' phase encode')
# Create a random mask to resample the data
idx = np.full(N, False)
idx[:number_phase_encodes] = True
np.random.seed(1) # Keep this one so code is reproducible
np.random.shuffle(idx)
sampling_mask = idx
'''
Fourier transform, subsample, and add noise (Train Data)
'''
Nexamples = x_train.shape[0]
kspace_train = np.zeros((Nexamples,N,number_phase_encodes,2),x_train.dtype)
for example in range(x_train.shape[0]):
if example % 1000 == 0:
print(f'Working on example {example} of {Nexamples}')
# Grab one image
temp = x_train[example,:,:,0] + 1j*x_train[example,:,:,1]
# Fourier Transform
kspace_temp = np.fft.fftn(temp,axes=(1,))/N
kspace_temp = np.fft.fftshift(kspace_temp,axes=(1,))
kspace_temp =np.stack( (kspace_temp.real, kspace_temp.imag), axis=-1)
# Subsample
kspace_temp = kspace_temp[:,sampling_mask,:]
# Add noise
kspace_temp += noise_level*np.random.randn(*kspace_temp.shape)
# Put back
kspace_train[example,:,:,:] = kspace_temp
print('Dimensions of training data are ' + str(kspace_train.shape) + '[ Examples x Nx x Ny x Channels]')
'''
Fourier transform, subsample, and add noise (Validate Data)
'''
Nexamples = x_val.shape[0]
kspace_val = np.zeros((Nexamples,N,number_phase_encodes,2),x_train.dtype)
for example in range(x_val.shape[0]):
if example % 1000 == 0:
print(f'Working on example {example} of {Nexamples}')
# Grab one image
temp = x_val[example,:,:,0] + 1j*x_val[example,:,:,1]
# Fourier Transform
kspace_temp = np.fft.fftn(temp,axes=(1,))/N
kspace_temp = np.fft.fftshift(kspace_temp,axes=(1,))
kspace_temp =np.stack( (kspace_temp.real, kspace_temp.imag), axis=-1)
# Subsample
kspace_temp = kspace_temp[:,sampling_mask,:]
# Add noise
kspace_temp += noise_level*np.random.randn(*kspace_temp.shape)
# Put back
kspace_val[example,:,:,:] = kspace_temp
print('Dimensions of validation data are ' + str(kspace_val.shape) + '[ Examples x Nx x Ny x Channels]')
example = 400
# Show one image and k-space pair
img = x_train[example,:,:,0] + 1j*x_train[example,:,:,1]
plt.figure()
plt.subplot(121)
plt.imshow(np.abs(img),cmap='gray')
plt.title('Grayscale')
img = kspace_train[example,:,:,0] + 1j*kspace_train[example,:,:,1]
plt.subplot(122)
plt.imshow(np.abs(img),cmap='gray')
plt.title('K-Space (1D FFT)')
plt.show()
```
# Build the network architecture
In PyTorch, we need to create a model just as we do in Keras. However, models are usually defined as a class. A class is a structure that holds data and functions. We base our classes on torch.nn.Module and will only modify the __init__ and forward functions. The __init__ module will define the modules (convultions, activations) that we will use in the forward pass. This method is very flexible and PyTorch's autograd functionally will generally handle all the backpropagation.
**FYI** PyTorch defaults to channels first. So the images will be [batch index, channels, x, y]
We will define the following functions.
* FullyConnected a modules to perform the fully connected layers
* Denoiser a module to perform denoising using convolutions
* AutoMap a simple module which sequentially applies FullyConnected and Denoiser
```
import torch
import torch.nn as nn
import torch.nn.functional as F
class FullyConnected(torch.nn.Module):
"""A fully connected network to transform from k-space to images.
"""
def __init__(self, input_phase_encodes, image_size):
super(FullyConnected, self).__init__()
self.input_phase_encodes = input_phase_encodes
self.image_size = image_size
# These two convolution will be fully connected along one dimension. We are not
# using a fully connected network in 2D due to memory contraints and the fact
# that the data is undersampling in one dimension.
self.conv1 = nn.Conv2d(in_channels=2, out_channels=2*image_size, kernel_size=(1, input_phase_encodes), padding='valid')
self.conv2 = nn.Conv2d(in_channels=2, out_channels=2*image_size, kernel_size=(1, image_size), padding='valid')
def forward(self, data):
# This is a fully connected network followed by reshaping into an image
layer = self.conv1(data)
layer = layer.view((-1,2,self.image_size,self.image_size))
layer = torch.moveaxis(layer,-2,-1)
# This is a fully connected network followed by reshaping into an image
layer = self.conv2(layer)
layer = layer.view((-1,2,self.image_size,self.image_size))
layer = torch.moveaxis(layer,-2,-1)
return layer
class Denoiser(torch.nn.Module):
"""An image to image denoiser
"""
def __init__(self, depth=3, initial_features=8):
super(Denoiser, self).__init__()
# The channels start as complex (2 channel) and we can increase them during the denoiser
in_channels = 2
out_channels = initial_features
# This will build the list of operators, similar to keras sequential which each being a convolution followed by activation
layers = []
for l in range(depth):
layers.append( nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(3,3), padding='same'))
in_channels = out_channels
layers.append(torch.nn.ReLU())
# Last layer is a convolution without activiation
layers.append( nn.Conv2d(in_channels=in_channels, out_channels=2, kernel_size=(3,3), padding='same'))
# This stores the layers in a list that will be tracked for backpropogation
self.convolutions = nn.ModuleList(layers)
def forward(self, image):
for l in self.convolutions:
image = l(image)
return image
class AutoMap(torch.nn.Module):
"""A network combining the fully connected network with the image denoiser.
"""
def __init__(self, input_phase_encodes, image_size, depth=3):
super(AutoMap, self).__init__()
self.fc_net = FullyConnected( input_phase_encodes, image_size)
self.denoiser = Denoiser(depth)
def forward(self, data):
image = self.fc_net(data)
image = self.denoiser(image)
return image
# To run the model we need to create an object based on those class definitions above
recon_model = AutoMap(input_phase_encodes=number_phase_encodes, image_size=N)
# Models have a device they run on. We need to put the model on the gpu
recon_model = recon_model.cuda()
# Torch summary enables similar formatting to Keras
from torchsummary import summary
summary(recon_model, (2,120,120))
```
# Data Loader
In PyTorch, it is very useful to have data as a torch Dataset. This could select all the images in a folder, generate data on the fly, etc. In this case, we will write a loader which just grabs data. Following this, we define a data loader which handles batching, shuffling, etc.
```
class Dataset(torch.utils.data.Dataset):
def __init__(self, kspace_data, image_data):
self.kspace_data = np.moveaxis( kspace_data, -1, 1)
self.image_data = np.moveaxis( image_data, -1, 1)
def __len__(self):
return self.kspace_data.shape[0]
def __getitem__(self, idx):
return self.kspace_data[idx], self.image_data[idx]
# Create datasets
dataset_train = Dataset( kspace_train, x_train)
dataset_val = Dataset( kspace_val, x_val)
# Create data loader to handle shuffling, batching, etc
train_generator = torch.utils.data.DataLoader(dataset_train, batch_size=32, shuffle=True)
val_generator = torch.utils.data.DataLoader(dataset_val, batch_size=32, shuffle=False)
```
# Training
Training in PyTorch is a bit more transparent than in Keras. The below will train the model with the key steps being:
* Define an optimizer which will optimize the paramaters we pass to it
* Define a loss function. This could be anything written in PyTorch but we will use a predefined function for mean squared error
* Loop over the data with a for loop.
For each batch, we will:
* Zero the gradients (gradients accumulate over multiple passes)
* Perform a forward pass
* Calculate the loss
* Perform a backwards pass
* Increment the weights using optimizer.step()
```
# Define an optimizer
optimizer = torch.optim.Adam( recon_model.parameters(), lr=1e-3)
# Define a loss function
loss_fcn = nn.MSELoss()
# Get a device
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# Ensure the model weights are on the desired device
recon_model.to(device)
# Define number of epochs
n_epochs = 20
# Empty list to store losses over epochs
train_losses = []
val_losses = []
# Epoch loop
for epoch in range(n_epochs):
# Put the model in train mode
recon_model.train()
# Loop over training batches
train_loss_avg = 0.0
for kspace, image in train_generator:
# Move data to the GPU
image = image.to(device)
kspace = kspace.to(device)
# Zero out the gradients
optimizer.zero_grad()
# Forward pass
image_guess = recon_model(kspace)
# Loss
loss = loss_fcn( image_guess, image)
# Store loss
train_loss_avg += loss.detach()
# Backwards with perform back propogation to get weights
loss.backward()
# Take a gradient descent step
optimizer.step()
# Store the average loss
train_loss_avg /= len(train_generator)
train_losses.append(train_loss_avg)
# Switch to eval mode (weights fixed)
recon_model.eval()
# Loop eval batches
val_loss_avg = 0.0
for count, (kspace, image) in enumerate(val_generator):
# Move data to the GPU
image = image.to(device)
kspace = kspace.to(device)
# Forward pass
image_guess = recon_model(kspace)
# Loss
loss = loss_fcn( image_guess, image)
val_loss_avg += loss.detach()
# Plotting images
if count == 0:
image_mag = torch.abs(image[0,0,...] + 1j*image[0,1,...])
image_guess_mag = torch.abs(image_guess[0,0,...] + 1j*image_guess[0,1,...])
clear_output(wait=True)
plt.figure(figsize=(10,3))
plt.subplot(131)
plt.imshow(image_mag.detach().cpu().numpy(),cmap='gray')
plt.title('Truth')
plt.subplot(132)
plt.imshow(image_guess_mag.detach().cpu().numpy(),cmap='gray')
plt.title('NN Guess')
# Store the average loss
val_loss_avg /= len(val_generator)
val_losses.append(val_loss_avg)
# Plotting loss curve
plt.subplot(133)
plt.semilogy(train_losses, label="Loss")
plt.semilogy(val_losses, label="Loss (validation)")
plt.legend()
plt.show()
print(f'Epoch = {epoch} Loss = {train_loss_avg}, Val Loss = {val_loss_avg}')
```
# Run the model fit
```
example = 400
# Test with synthetic data
kspace = kspace_val[example,...]
kspace = np.expand_dims(kspace,0)
image = x_val[example,...]
image = np.expand_dims(image,0)
# Get the prediction
kspace_torch = torch.tensor(np.moveaxis( kspace, -1, 1))
predicted_image = recon_model(kspace_torch.to(device))
predicted_image = predicted_image.detach().cpu().numpy()
predicted_image = np.moveaxis(predicted_image,1,-1)
# Convert to complex
predicted_image = np.squeeze(channels_to_complex(predicted_image))
act_image = np.squeeze(channels_to_complex(x_val[example,...]))
# Plot
plt.figure(figsize=(11, 3), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(131)
plt.imshow(np.abs(predicted_image),cmap='gray',vmin=0,vmax=1)
plt.axis('off')
plt.colorbar()
plt.title('Predicted')
plt.subplot(132)
plt.imshow(np.abs(act_image),cmap='gray',vmin=0,vmax=1)
plt.axis('off')
plt.colorbar()
plt.title('True Image')
plt.subplot(133)
plt.imshow(np.abs(act_image-predicted_image),cmap='gray',vmin=0)
plt.axis('off')
plt.colorbar()
plt.title('Difference Image')
plt.show()
```
# Compare to least squares solution with data
Here we compare to an alterantive aproach, regularied least squares. In this technique, we build an encoding matrix which simulates the data acquisition. Then we minimize:
$\parallel Ex-d \parallel_2 + \lambda \parallel x \parallel_2$
Where $\lambda$ is a factor that regularizes the solution when its illposed ( see https://en.wikipedia.org/wiki/Tikhonov_regularization ). The solution to this set of equations is:
$ \widetilde{x} = (E^hE + \lambda I)^{-1}E^hd$
Where I is an identity matrix. Similar to the neural network this is an aproximate solution.
```
# Lets also solve this a different way using a matrix inverse
def DFT_matrix(N):
i, j = np.meshgrid(np.arange(N), np.arange(N))
omega = np.exp( 2 * math.pi * 1J / N )
W = np.power( omega, i * j ) / N #math.sqrt(N)
return W
E = DFT_matrix(N)
E = np.fft.fftshift(E,axes=(0,))
E = E[idx,:]
# Grab the data
D = np.matrix.getH(channels_to_complex(kspace_val[400,...]))
# Solve for psuedo inverse
Eh = np.matrix.getH(E)
EhE = np.matmul(Eh,E)
Ei = np.linalg.inv(EhE + 0.000001*np.identity(N))
EiEh = np.matmul(Ei,Eh)
linear_algebra_prediction = np.transpose(np.matmul(EiEh,D))
plt.figure(figsize=(11, 11), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(231)
plt.imshow(np.abs(linear_algebra_prediction),cmap='gray',vmin=0)
plt.axis('off')
plt.title('Least Squares Solution')
plt.subplot(234)
plt.imshow(np.abs(linear_algebra_prediction-act_image),cmap='gray',vmin=0,vmax=0.2)
plt.axis('off')
plt.title('Difference Least Squares')
plt.subplot(232)
plt.imshow(np.abs(predicted_image),cmap='gray',vmin=0,vmax=1)
plt.axis('off')
plt.title('Neural Net Prediction')
plt.subplot(235)
plt.imshow(np.abs(predicted_image-act_image),cmap='gray',vmin=0,vmax=0.2)
plt.axis('off')
plt.title('Difference Neural Net')
plt.subplot(233)
plt.imshow(np.abs(act_image),cmap='gray',vmin=0,vmax=1)
plt.axis('off')
plt.title('Actual Image')
plt.show()
print('Image Domain Mean Squared Error NN = ' + str(np.sum(np.square(abs(np.squeeze(predicted_image) - act_image)))) )
print('Image Domain Mean Squared Error LS = ' + str(np.sum(np.square(abs(linear_algebra_prediction - act_image)))) )
# Lets also get the kspace error
kspace_NN = np.matmul(E,np.squeeze(predicted_image))
kspace_LA = np.matmul(E,linear_algebra_prediction)
# Difference
diff_kspace_NN = kspace_NN - D
diff_kspace_LA = kspace_LA - D
print('Kspace Mean Squared Error NN = ' + str(np.sum(np.square(abs(diff_kspace_NN)))) )
print('Kspace Mean Squared Error LS = ' + str(np.sum(np.square(abs(diff_kspace_LA)))) )
```
# Load real MRI data to test
This is actual acquired MRI data from a brain scan consisting. The data size is larger and we crop in k-space. Just to make things doable in a short time we are keeping everything 1D, as above.
```
# Load a Kspace dataset from an actual acquisition
with h5py.File('/content/drive/MyDrive/ML4MI_BOOTCAMP_DATA/Example_MRI_Data.h5','r') as hf:
kspace_mri = np.array(hf['Kspace'])
#Crop Kspace
crop = ( kspace_mri.shape[-2] - N ) // 2
kspace_mri = kspace_mri[...,::2,crop:-crop]
print(f'Kspace size = {kspace_mri.shape} [ channels, slices, Nx, Ny], type = {kspace_mri.dtype}')
coils = kspace_mri.shape[0]
slices = kspace_mri.shape[1]
```
# Run a traditional reconstruction
The most common reconstruction on MRI scanners is to just do a discrete Fourier transform of the data. Just a note, the data actually has 48 recievers of the signal. We are taking the sum of squares to average these signals.
```
# Traditional recon of fully sampled data
image_full = np.fft.ifftn(kspace_mri,axes=(-1,))
# do sum of squares to average coils (detectors)
image_full = np.sum(abs(image_full),axis=0)
image_full = np.sqrt(image_full)
# Make a montage (there are other options)
plot_image = montage(image_full[8::2,:,:])
# Show the image
plt.figure(figsize=(20,20))
plt.imshow(plot_image,aspect=1,interpolation='bilinear',cmap='gray')
plt.axis('off')
plt.title('DFT of Kspace')
plt.show()
```
# Do inference on the real MRI data
Machine Learning Based Reconstruction
```
# Subsample kspace and convert to channels
kspace_mri2 = kspace_mri[:,:,:,sampling_mask]
kspace_mri2 = np.stack((kspace_mri2.real,kspace_mri2.imag),axis=-1)
kspace_mri2 = np.reshape(kspace_mri2,(-1,N,number_phase_encodes,2))
print(kspace_mri2.shape)
# Run model
kspace_torch = torch.tensor(np.moveaxis( kspace_mri2, -1, 1))
predicted_image = recon_model(kspace_torch.to(device))
predicted_image = predicted_image.detach().cpu().numpy()
image_NN = np.moveaxis(predicted_image,1,-1)
print(image_NN.shape)
# Reshape
image_NN = np.reshape( image_NN,(coils,slices,N,N,2))
image_NN = channels_to_complex(image_NN)
# do sum of squares to average coils (detectors)
image_NN = np.sum(abs(image_NN),axis=0)
image_NN = np.sqrt(image_NN)
# Make a montage (there are other options)
plot_image = montage( image_NN[8::2,:,:])
# Show the image
plt.figure(figsize=(20,20))
plt.imshow(plot_image,aspect=1,interpolation='bilinear',cmap='gray')
plt.axis('off')
plt.title('Neural network prediction from Kspace')
plt.show()
```
Linear algebra based solution
```
image_LA = np.zeros(image_full.shape,dtype=image_full.dtype)
for k in range(slices):
# Subsample kspace and convert to channels
kspace_mri2 = np.squeeze(kspace_mri[:,k,:,:])
kspace_mri2 = kspace_mri2[:,:,sampling_mask]
kspace_mri2 = np.reshape(kspace_mri2,(-1,number_phase_encodes))
kspace_mri2 = np.expand_dims(kspace_mri2,-1)
# Also do for Least squares estimate
image = np.matmul(EiEh,kspace_mri2)
image = np.reshape(image,newshape=(coils,N,N))
# do sum of squares to average coils (detectors)
image = np.sum(abs(image),axis=0)
image = np.sqrt(image)
image_LA[k,:,:] = np.fliplr(image)
# Make a montage (there are other options)
plot_image = montage( image_LA[8::2,:,:])
# Show the image
plt.figure(figsize=(20,20))
plt.imshow(plot_image,aspect=1,interpolation='bilinear',cmap='gray')
plt.axis('off')
plt.title('Linear algebra prediction from Kspace')
plt.show()
```
# Now compare the solutions
```
slice = 24
print(image_LA.shape)
print(image_NN.shape)
print(image_full.shape)
plt.figure(figsize=(20,20))
plt.subplot(131)
plt.imshow(abs(image_LA[slice,:,:]),cmap='gray')
plt.axis('off')
plt.title('Linear Algebra')
plt.subplot(132)
plt.imshow(abs(image_NN[slice,:,:]),cmap='gray')
plt.axis('off')
plt.title('Neural Net')
plt.subplot(133)
plt.imshow(abs(image_full[slice,:,:]),cmap='gray')
plt.axis('off')
plt.title('Ground Truth')
plt.show()
# Slice for viewing
slice = 24
# Scale to minimize difference (scaling unimportant in MRI)
scale_LA = np.sum( image_full*np.conj(image_LA)) /np.sum(image_LA**2)
scale_NN = np.sum( image_full*np.conj(image_NN)) /np.sum(image_NN**2)
diff_LA = scale_LA*image_LA - image_full
diff_NN = scale_NN*image_NN - image_full
# Print Error
error_LA = np.linalg.norm(diff_LA)/np.linalg.norm(image_full)
error_NN = np.linalg.norm(diff_NN)/np.linalg.norm(image_full)
print(f'Image MSE Linear Algebra = {error_LA}')
print(f'Image MSE Neural Network = {error_NN}')
plt.figure(figsize=(20,20))
plt.subplot(131)
plt.imshow(abs(diff_LA[slice,:,:]),cmap='gray')
plt.axis('off')
plt.title('Linear Algebra')
plt.subplot(132)
plt.imshow(abs(diff_NN[slice,:,:]),cmap='gray')
plt.axis('off')
plt.title('Neural Net')
plt.subplot(133)
plt.imshow(abs(image_full[slice,:,:]),cmap='gray')
plt.axis('off')
plt.title('Ground Truth')
plt.show()
```
# Image Recon Challenge
Can you fix the image reconstruction example? The challenge is to reconstruct the images with the following paramaters:
undersample_factor = 1.5
noise_level = 0.001;
|
github_jupyter
|
'''
In python you need to import libraries in order to use them.
'''
# Using PyTorch for this code
import torch
# Utilities
import numpy as np
import math
# Plotting
import matplotlib.pyplot as plt
from IPython.display import clear_output
'''
We will define some functions to use later. Normally you might put these in another file and import them
'''
# Some support functions
def montage( img_in, size=(3,5) ):
for j in range(size[0]):
plot_image = img_in[0+size[1]*j,:,:]
for i in range(size[1]-1):
plot_image = np.concatenate((plot_image, img_in[1+i+size[1]*j,:,:]), axis=1)
if j == 0:
img = plot_image
else:
img = np.concatenate((img,plot_image),axis=0)
return img
def complex_to_channels( img_in):
return(np.stack(img_in.real,img_in.imag))
def channels_to_complex( img_in):
return(img_in[...,0]+1j*img_in[...,1])
'''
Mount your google drive, we'll grab data from the shared folder
'''
from google.colab import drive
drive.mount('/content/drive')
# load training, validation, and testing data
import h5py
with h5py.File('/content/drive/MyDrive/ML4MI_BOOTCAMP_DATA/ImageReconstructionData.h5','r') as hf:
x_train = np.array(hf['x_train'])
x_val = np.array(hf['x_val'])
print(f'Validate Dataset Size {x_val.shape}')
print(f'Train Dataset Size {x_train.shape}')
N = x_train.shape[-2]
'''
The creates a sampling mask which can be used to subsample the data.
'''
# Get the number of phase encodes
undersample_factor = 1.5
noise_level = 0.001
number_phase_encodes = int(N/undersample_factor)
print('Using ' + str(number_phase_encodes) + ' phase encode')
# Create a random mask to resample the data
idx = np.full(N, False)
idx[:number_phase_encodes] = True
np.random.seed(1) # Keep this one so code is reproducible
np.random.shuffle(idx)
sampling_mask = idx
'''
Fourier transform, subsample, and add noise (Train Data)
'''
Nexamples = x_train.shape[0]
kspace_train = np.zeros((Nexamples,N,number_phase_encodes,2),x_train.dtype)
for example in range(x_train.shape[0]):
if example % 1000 == 0:
print(f'Working on example {example} of {Nexamples}')
# Grab one image
temp = x_train[example,:,:,0] + 1j*x_train[example,:,:,1]
# Fourier Transform
kspace_temp = np.fft.fftn(temp,axes=(1,))/N
kspace_temp = np.fft.fftshift(kspace_temp,axes=(1,))
kspace_temp =np.stack( (kspace_temp.real, kspace_temp.imag), axis=-1)
# Subsample
kspace_temp = kspace_temp[:,sampling_mask,:]
# Add noise
kspace_temp += noise_level*np.random.randn(*kspace_temp.shape)
# Put back
kspace_train[example,:,:,:] = kspace_temp
print('Dimensions of training data are ' + str(kspace_train.shape) + '[ Examples x Nx x Ny x Channels]')
'''
Fourier transform, subsample, and add noise (Validate Data)
'''
Nexamples = x_val.shape[0]
kspace_val = np.zeros((Nexamples,N,number_phase_encodes,2),x_train.dtype)
for example in range(x_val.shape[0]):
if example % 1000 == 0:
print(f'Working on example {example} of {Nexamples}')
# Grab one image
temp = x_val[example,:,:,0] + 1j*x_val[example,:,:,1]
# Fourier Transform
kspace_temp = np.fft.fftn(temp,axes=(1,))/N
kspace_temp = np.fft.fftshift(kspace_temp,axes=(1,))
kspace_temp =np.stack( (kspace_temp.real, kspace_temp.imag), axis=-1)
# Subsample
kspace_temp = kspace_temp[:,sampling_mask,:]
# Add noise
kspace_temp += noise_level*np.random.randn(*kspace_temp.shape)
# Put back
kspace_val[example,:,:,:] = kspace_temp
print('Dimensions of validation data are ' + str(kspace_val.shape) + '[ Examples x Nx x Ny x Channels]')
example = 400
# Show one image and k-space pair
img = x_train[example,:,:,0] + 1j*x_train[example,:,:,1]
plt.figure()
plt.subplot(121)
plt.imshow(np.abs(img),cmap='gray')
plt.title('Grayscale')
img = kspace_train[example,:,:,0] + 1j*kspace_train[example,:,:,1]
plt.subplot(122)
plt.imshow(np.abs(img),cmap='gray')
plt.title('K-Space (1D FFT)')
plt.show()
import torch
import torch.nn as nn
import torch.nn.functional as F
class FullyConnected(torch.nn.Module):
"""A fully connected network to transform from k-space to images.
"""
def __init__(self, input_phase_encodes, image_size):
super(FullyConnected, self).__init__()
self.input_phase_encodes = input_phase_encodes
self.image_size = image_size
# These two convolution will be fully connected along one dimension. We are not
# using a fully connected network in 2D due to memory contraints and the fact
# that the data is undersampling in one dimension.
self.conv1 = nn.Conv2d(in_channels=2, out_channels=2*image_size, kernel_size=(1, input_phase_encodes), padding='valid')
self.conv2 = nn.Conv2d(in_channels=2, out_channels=2*image_size, kernel_size=(1, image_size), padding='valid')
def forward(self, data):
# This is a fully connected network followed by reshaping into an image
layer = self.conv1(data)
layer = layer.view((-1,2,self.image_size,self.image_size))
layer = torch.moveaxis(layer,-2,-1)
# This is a fully connected network followed by reshaping into an image
layer = self.conv2(layer)
layer = layer.view((-1,2,self.image_size,self.image_size))
layer = torch.moveaxis(layer,-2,-1)
return layer
class Denoiser(torch.nn.Module):
"""An image to image denoiser
"""
def __init__(self, depth=3, initial_features=8):
super(Denoiser, self).__init__()
# The channels start as complex (2 channel) and we can increase them during the denoiser
in_channels = 2
out_channels = initial_features
# This will build the list of operators, similar to keras sequential which each being a convolution followed by activation
layers = []
for l in range(depth):
layers.append( nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(3,3), padding='same'))
in_channels = out_channels
layers.append(torch.nn.ReLU())
# Last layer is a convolution without activiation
layers.append( nn.Conv2d(in_channels=in_channels, out_channels=2, kernel_size=(3,3), padding='same'))
# This stores the layers in a list that will be tracked for backpropogation
self.convolutions = nn.ModuleList(layers)
def forward(self, image):
for l in self.convolutions:
image = l(image)
return image
class AutoMap(torch.nn.Module):
"""A network combining the fully connected network with the image denoiser.
"""
def __init__(self, input_phase_encodes, image_size, depth=3):
super(AutoMap, self).__init__()
self.fc_net = FullyConnected( input_phase_encodes, image_size)
self.denoiser = Denoiser(depth)
def forward(self, data):
image = self.fc_net(data)
image = self.denoiser(image)
return image
# To run the model we need to create an object based on those class definitions above
recon_model = AutoMap(input_phase_encodes=number_phase_encodes, image_size=N)
# Models have a device they run on. We need to put the model on the gpu
recon_model = recon_model.cuda()
# Torch summary enables similar formatting to Keras
from torchsummary import summary
summary(recon_model, (2,120,120))
class Dataset(torch.utils.data.Dataset):
def __init__(self, kspace_data, image_data):
self.kspace_data = np.moveaxis( kspace_data, -1, 1)
self.image_data = np.moveaxis( image_data, -1, 1)
def __len__(self):
return self.kspace_data.shape[0]
def __getitem__(self, idx):
return self.kspace_data[idx], self.image_data[idx]
# Create datasets
dataset_train = Dataset( kspace_train, x_train)
dataset_val = Dataset( kspace_val, x_val)
# Create data loader to handle shuffling, batching, etc
train_generator = torch.utils.data.DataLoader(dataset_train, batch_size=32, shuffle=True)
val_generator = torch.utils.data.DataLoader(dataset_val, batch_size=32, shuffle=False)
# Define an optimizer
optimizer = torch.optim.Adam( recon_model.parameters(), lr=1e-3)
# Define a loss function
loss_fcn = nn.MSELoss()
# Get a device
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# Ensure the model weights are on the desired device
recon_model.to(device)
# Define number of epochs
n_epochs = 20
# Empty list to store losses over epochs
train_losses = []
val_losses = []
# Epoch loop
for epoch in range(n_epochs):
# Put the model in train mode
recon_model.train()
# Loop over training batches
train_loss_avg = 0.0
for kspace, image in train_generator:
# Move data to the GPU
image = image.to(device)
kspace = kspace.to(device)
# Zero out the gradients
optimizer.zero_grad()
# Forward pass
image_guess = recon_model(kspace)
# Loss
loss = loss_fcn( image_guess, image)
# Store loss
train_loss_avg += loss.detach()
# Backwards with perform back propogation to get weights
loss.backward()
# Take a gradient descent step
optimizer.step()
# Store the average loss
train_loss_avg /= len(train_generator)
train_losses.append(train_loss_avg)
# Switch to eval mode (weights fixed)
recon_model.eval()
# Loop eval batches
val_loss_avg = 0.0
for count, (kspace, image) in enumerate(val_generator):
# Move data to the GPU
image = image.to(device)
kspace = kspace.to(device)
# Forward pass
image_guess = recon_model(kspace)
# Loss
loss = loss_fcn( image_guess, image)
val_loss_avg += loss.detach()
# Plotting images
if count == 0:
image_mag = torch.abs(image[0,0,...] + 1j*image[0,1,...])
image_guess_mag = torch.abs(image_guess[0,0,...] + 1j*image_guess[0,1,...])
clear_output(wait=True)
plt.figure(figsize=(10,3))
plt.subplot(131)
plt.imshow(image_mag.detach().cpu().numpy(),cmap='gray')
plt.title('Truth')
plt.subplot(132)
plt.imshow(image_guess_mag.detach().cpu().numpy(),cmap='gray')
plt.title('NN Guess')
# Store the average loss
val_loss_avg /= len(val_generator)
val_losses.append(val_loss_avg)
# Plotting loss curve
plt.subplot(133)
plt.semilogy(train_losses, label="Loss")
plt.semilogy(val_losses, label="Loss (validation)")
plt.legend()
plt.show()
print(f'Epoch = {epoch} Loss = {train_loss_avg}, Val Loss = {val_loss_avg}')
example = 400
# Test with synthetic data
kspace = kspace_val[example,...]
kspace = np.expand_dims(kspace,0)
image = x_val[example,...]
image = np.expand_dims(image,0)
# Get the prediction
kspace_torch = torch.tensor(np.moveaxis( kspace, -1, 1))
predicted_image = recon_model(kspace_torch.to(device))
predicted_image = predicted_image.detach().cpu().numpy()
predicted_image = np.moveaxis(predicted_image,1,-1)
# Convert to complex
predicted_image = np.squeeze(channels_to_complex(predicted_image))
act_image = np.squeeze(channels_to_complex(x_val[example,...]))
# Plot
plt.figure(figsize=(11, 3), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(131)
plt.imshow(np.abs(predicted_image),cmap='gray',vmin=0,vmax=1)
plt.axis('off')
plt.colorbar()
plt.title('Predicted')
plt.subplot(132)
plt.imshow(np.abs(act_image),cmap='gray',vmin=0,vmax=1)
plt.axis('off')
plt.colorbar()
plt.title('True Image')
plt.subplot(133)
plt.imshow(np.abs(act_image-predicted_image),cmap='gray',vmin=0)
plt.axis('off')
plt.colorbar()
plt.title('Difference Image')
plt.show()
# Lets also solve this a different way using a matrix inverse
def DFT_matrix(N):
i, j = np.meshgrid(np.arange(N), np.arange(N))
omega = np.exp( 2 * math.pi * 1J / N )
W = np.power( omega, i * j ) / N #math.sqrt(N)
return W
E = DFT_matrix(N)
E = np.fft.fftshift(E,axes=(0,))
E = E[idx,:]
# Grab the data
D = np.matrix.getH(channels_to_complex(kspace_val[400,...]))
# Solve for psuedo inverse
Eh = np.matrix.getH(E)
EhE = np.matmul(Eh,E)
Ei = np.linalg.inv(EhE + 0.000001*np.identity(N))
EiEh = np.matmul(Ei,Eh)
linear_algebra_prediction = np.transpose(np.matmul(EiEh,D))
plt.figure(figsize=(11, 11), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(231)
plt.imshow(np.abs(linear_algebra_prediction),cmap='gray',vmin=0)
plt.axis('off')
plt.title('Least Squares Solution')
plt.subplot(234)
plt.imshow(np.abs(linear_algebra_prediction-act_image),cmap='gray',vmin=0,vmax=0.2)
plt.axis('off')
plt.title('Difference Least Squares')
plt.subplot(232)
plt.imshow(np.abs(predicted_image),cmap='gray',vmin=0,vmax=1)
plt.axis('off')
plt.title('Neural Net Prediction')
plt.subplot(235)
plt.imshow(np.abs(predicted_image-act_image),cmap='gray',vmin=0,vmax=0.2)
plt.axis('off')
plt.title('Difference Neural Net')
plt.subplot(233)
plt.imshow(np.abs(act_image),cmap='gray',vmin=0,vmax=1)
plt.axis('off')
plt.title('Actual Image')
plt.show()
print('Image Domain Mean Squared Error NN = ' + str(np.sum(np.square(abs(np.squeeze(predicted_image) - act_image)))) )
print('Image Domain Mean Squared Error LS = ' + str(np.sum(np.square(abs(linear_algebra_prediction - act_image)))) )
# Lets also get the kspace error
kspace_NN = np.matmul(E,np.squeeze(predicted_image))
kspace_LA = np.matmul(E,linear_algebra_prediction)
# Difference
diff_kspace_NN = kspace_NN - D
diff_kspace_LA = kspace_LA - D
print('Kspace Mean Squared Error NN = ' + str(np.sum(np.square(abs(diff_kspace_NN)))) )
print('Kspace Mean Squared Error LS = ' + str(np.sum(np.square(abs(diff_kspace_LA)))) )
# Load a Kspace dataset from an actual acquisition
with h5py.File('/content/drive/MyDrive/ML4MI_BOOTCAMP_DATA/Example_MRI_Data.h5','r') as hf:
kspace_mri = np.array(hf['Kspace'])
#Crop Kspace
crop = ( kspace_mri.shape[-2] - N ) // 2
kspace_mri = kspace_mri[...,::2,crop:-crop]
print(f'Kspace size = {kspace_mri.shape} [ channels, slices, Nx, Ny], type = {kspace_mri.dtype}')
coils = kspace_mri.shape[0]
slices = kspace_mri.shape[1]
# Traditional recon of fully sampled data
image_full = np.fft.ifftn(kspace_mri,axes=(-1,))
# do sum of squares to average coils (detectors)
image_full = np.sum(abs(image_full),axis=0)
image_full = np.sqrt(image_full)
# Make a montage (there are other options)
plot_image = montage(image_full[8::2,:,:])
# Show the image
plt.figure(figsize=(20,20))
plt.imshow(plot_image,aspect=1,interpolation='bilinear',cmap='gray')
plt.axis('off')
plt.title('DFT of Kspace')
plt.show()
# Subsample kspace and convert to channels
kspace_mri2 = kspace_mri[:,:,:,sampling_mask]
kspace_mri2 = np.stack((kspace_mri2.real,kspace_mri2.imag),axis=-1)
kspace_mri2 = np.reshape(kspace_mri2,(-1,N,number_phase_encodes,2))
print(kspace_mri2.shape)
# Run model
kspace_torch = torch.tensor(np.moveaxis( kspace_mri2, -1, 1))
predicted_image = recon_model(kspace_torch.to(device))
predicted_image = predicted_image.detach().cpu().numpy()
image_NN = np.moveaxis(predicted_image,1,-1)
print(image_NN.shape)
# Reshape
image_NN = np.reshape( image_NN,(coils,slices,N,N,2))
image_NN = channels_to_complex(image_NN)
# do sum of squares to average coils (detectors)
image_NN = np.sum(abs(image_NN),axis=0)
image_NN = np.sqrt(image_NN)
# Make a montage (there are other options)
plot_image = montage( image_NN[8::2,:,:])
# Show the image
plt.figure(figsize=(20,20))
plt.imshow(plot_image,aspect=1,interpolation='bilinear',cmap='gray')
plt.axis('off')
plt.title('Neural network prediction from Kspace')
plt.show()
image_LA = np.zeros(image_full.shape,dtype=image_full.dtype)
for k in range(slices):
# Subsample kspace and convert to channels
kspace_mri2 = np.squeeze(kspace_mri[:,k,:,:])
kspace_mri2 = kspace_mri2[:,:,sampling_mask]
kspace_mri2 = np.reshape(kspace_mri2,(-1,number_phase_encodes))
kspace_mri2 = np.expand_dims(kspace_mri2,-1)
# Also do for Least squares estimate
image = np.matmul(EiEh,kspace_mri2)
image = np.reshape(image,newshape=(coils,N,N))
# do sum of squares to average coils (detectors)
image = np.sum(abs(image),axis=0)
image = np.sqrt(image)
image_LA[k,:,:] = np.fliplr(image)
# Make a montage (there are other options)
plot_image = montage( image_LA[8::2,:,:])
# Show the image
plt.figure(figsize=(20,20))
plt.imshow(plot_image,aspect=1,interpolation='bilinear',cmap='gray')
plt.axis('off')
plt.title('Linear algebra prediction from Kspace')
plt.show()
slice = 24
print(image_LA.shape)
print(image_NN.shape)
print(image_full.shape)
plt.figure(figsize=(20,20))
plt.subplot(131)
plt.imshow(abs(image_LA[slice,:,:]),cmap='gray')
plt.axis('off')
plt.title('Linear Algebra')
plt.subplot(132)
plt.imshow(abs(image_NN[slice,:,:]),cmap='gray')
plt.axis('off')
plt.title('Neural Net')
plt.subplot(133)
plt.imshow(abs(image_full[slice,:,:]),cmap='gray')
plt.axis('off')
plt.title('Ground Truth')
plt.show()
# Slice for viewing
slice = 24
# Scale to minimize difference (scaling unimportant in MRI)
scale_LA = np.sum( image_full*np.conj(image_LA)) /np.sum(image_LA**2)
scale_NN = np.sum( image_full*np.conj(image_NN)) /np.sum(image_NN**2)
diff_LA = scale_LA*image_LA - image_full
diff_NN = scale_NN*image_NN - image_full
# Print Error
error_LA = np.linalg.norm(diff_LA)/np.linalg.norm(image_full)
error_NN = np.linalg.norm(diff_NN)/np.linalg.norm(image_full)
print(f'Image MSE Linear Algebra = {error_LA}')
print(f'Image MSE Neural Network = {error_NN}')
plt.figure(figsize=(20,20))
plt.subplot(131)
plt.imshow(abs(diff_LA[slice,:,:]),cmap='gray')
plt.axis('off')
plt.title('Linear Algebra')
plt.subplot(132)
plt.imshow(abs(diff_NN[slice,:,:]),cmap='gray')
plt.axis('off')
plt.title('Neural Net')
plt.subplot(133)
plt.imshow(abs(image_full[slice,:,:]),cmap='gray')
plt.axis('off')
plt.title('Ground Truth')
plt.show()
| 0.764804 | 0.9879 |
```
# default_exp calibrate
```
# Calibrate
> Calibration for the Open Source DIY Hyperspectral Imager.
First, the use of a common file format for storing calibration data is discussed followed by functions for calibration.
```
#hide
from nbdev.showdoc import *
```
## Common File Format
First, we convert all calibration files to a common format (HDF5). Numpy arrays inside this file can be accessed like a dictionary. We then only package HDF5 calibration files for PyPi.
### wavesoln.npz $\rightarrow$ wave_soln.hdf5
`wavesoln.npz` contains `wavecal` and `newwave`.
```
import numpy as np
import matplotlib.pyplot as plt
npzfile = np.load("cal_files/wavesoln.npz")
wavecal = npzfile["wavecal"]
newwave = npzfile["newwave"]
import h5py
with h5py.File("cal_files/wave_soln.hdf5", "w") as f:
dset = f.create_dataset("wavecal",shape=wavecal.shape,dtype=np.float64)
dset[...] = wavecal
print(f'wavecal has shape {wavecal.shape}')
dset2 = f.create_dataset("newwave",shape=newwave.shape,dtype=np.float64)
dset2[...] = newwave
print(f'newwave has shape {newwave.shape}')
# check if we can open the HDF5 files
with h5py.File("cal_files/wave_soln.hdf5", "r") as f:
print(f'File has entries: {list(f.keys())}')
plt.subplots(nrows=1,ncols=2,figsize=(10,4))
plt.subplot(1,2,1); plt.title('wavecal')
plt.imshow(f['wavecal'])
plt.colorbar()
plt.xlabel('wavelength (nm) ???'); plt.ylabel('line pixels ???')
plt.subplot(1,2,2); plt.title('newwave')
plt.plot(f['newwave'])
plt.xlabel('array index'); plt.ylabel('???')
```
### arc.fits $\rightarrow$ arc.hdf5
`arc.fits` contains `hdulist[0].data` which is called `arcimg`. `hdulist` has length 1.
```
from astropy.io import fits as fitsio
hdulist = fitsio.open("cal_files/arc.fits")
arcimg = np.rot90(hdulist[0].data, -1)
import h5py
with h5py.File("cal_files/arc.hdf5", "w") as f:
dset = f.create_dataset("arc_img",shape=arcimg.shape,dtype=np.float64)
dset[...] = arcimg
print(f'arcimg has shape {arcimg.shape}')
# check if we can open the HDF5 files
with h5py.File("cal_files/arc.hdf5", "r") as f:
print(f'File has entries: {list(f.keys())}')
plt.title('arc_img')
plt.imshow(f['arc_img'])
plt.xlabel('wavelength (nm) ???'); plt.ylabel('line pixels ???')
```
### hgar_linelist_cfht.mat $\rightarrow$ HgAr_lines.hdf5
`hgar_linelist_cfht.mat` contains `#refs#`, `Species`, and `wavelength`. I don't know how to deal with `#refs#` and `Species`.
```
with h5py.File('cal_files/hgar_linelist_cfht.mat','r') as f:
print(f'File has entries: {list(f.keys())}')
with h5py.File('cal_files/HgAr_lines.hdf5','w') as f2:
wavelength = f.get('wavelength')[0,:] # original shape is (1,252)
dset = f2.create_dataset("wavelength",shape=wavelength.shape,dtype=np.float64)
dset[...] = wavelength
print(f'wavelength has shape {wavelength.shape}')
with h5py.File('cal_files/HgAr_lines.hdf5','r') as f:
plt.plot(np.array(f['wavelength'])/10)
plt.xlabel('array index')
plt.ylabel('wavelength (nm) ???')
#data = np.array( f.get('hypercube') )
A = np.array([[1,2,3,4],[5,6,7,8]])
print(A)
print(f'shape is {A.shape}')
print(f'first row is {A[0,:]}')
```
## The Calibration Module
```
#export
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import find_peaks, savgol_filter
from scipy.optimize import curve_fit
from scipy import interpolate
from fastprogress.fastprogress import master_bar, progress_bar
import h5py
def NGaussFunc(x, *params): # x0 pk width
y = np.zeros_like(x)
for i in range(0, len(params) - 1, 3):
ctr = params[i]
amp = params[i + 1]
wid = params[i + 2]
y = y + amp * np.exp(-((x - ctr) / wid) ** 2)
return y + params[-1]
def fit_arc_lines(arcimg, spatial_col_skip=1, wavecalfile=""):
arcimg = arcimg * 1.0 / np.max(arcimg, axis=1)[:, None]
spatialaxis = np.arange(0, arcimg.shape[0])
waveaxis = np.arange(0, arcimg.shape[1])
arcgausfits = np.zeros((len(range(1, arcimg.shape[0], spatial_col_skip)) + 1, 28))
spec = arcimg[0, :]
peaks, properties = find_peaks(spec, height=0.01, width=1.5, prominence=0.01)
y0 = np.zeros((peaks.size * 3))
y0[0::3] = peaks
y0[1::3] = properties["peak_heights"]
y0[2::3] = properties["widths"] * 0.5
y0 = np.append(y0, 0.02)
arcgausfits[0, :], pcov = curve_fit(NGaussFunc, waveaxis, spec, p0=y0)
i = 0
print("Fitting Arc Lines in each col...")
for col in progress_bar(range(1, arcimg.shape[0], spatial_col_skip)):
i += 1
spec = arcimg[col, :]
y0 = arcgausfits[i - 1, :]
arcgausfits[i, :], pcov = curve_fit(NGaussFunc, waveaxis, spec, p0=y0)
pos = arcgausfits[:, range(0, arcgausfits.shape[1] - 1, 3)]
peakheight = arcgausfits[:, range(0, arcgausfits.shape[1] - 1, 3)]
width = arcgausfits[:, range(2, arcgausfits.shape[1] - 1, 3)]
smoothed_pos = np.zeros_like(pos)
for row in progress_bar(range(0, pos.shape[1])):
smoothed_pos[:, row] = savgol_filter(pos[:, row], 21, 3)
spectral_lines = np.asarray([4358.328, 5460.735, 5769.598, 5790.663, 6965.4307,
7067.2175, 7272.9359, 7383.9805, 7503.8691]) / 10
wavecal = np.zeros((smoothed_pos.shape[0], waveaxis.shape[0]))
p = []
for i in range(0, smoothed_pos.shape[0]):
z = np.polyfit(smoothed_pos[i, :], spectral_lines, 4)
p.append(np.poly1d(z))
wavecal[i, :] = p[-1](waveaxis)
if True:
plt.imshow(wavecal)
plt.xlabel('wavelength (nm) with some offset ???')
plt.ylabel('spatial pixels')
plt.colorbar()
plt.show()
minwave = wavecal.min(axis=1).max()
maxwave = wavecal.max(axis=1).min()
minwavedelta = np.diff(wavecal, axis=1).min()
newwave = np.arange(minwave, maxwave, minwavedelta)
interpimg = np.zeros((wavecal.shape[0], newwave.shape[0]))
for col in range(0, wavecal.shape[0]):
f = interpolate.interp1d(wavecal[col, :], arcimg[col, :])
interpimg[col, :] = f(newwave)
result = {
"wavecal": wavecal,
"pos": pos,
"peakheight": peakheight,
"width": width,
"smoothed_pos": smoothed_pos,
"newwave": newwave,
}
if len(wavecalfile):
np.savez(wavecalfile, **result)
return result
with h5py.File("cal_files/arc.hdf5", "r") as f:
arcimg = np.array(f['arc_img'])
result_orig = fit_arc_lines(arcimg)
```
### Partly refactored functions
```
def linearise_wavelength(raw:np.ndarray, cal_file:h5py.File) -> np.ndarray:
"""Linearise `raw` by wavelength ??? Defined but not used."""
with h5py.File('cal_files/wave_soln.hdf5','r') as f:
wavecal = f['wavecal']
newwave = f['newwave']
rows = wavecal.shape[0]; cols = newwave.shape[0]
interp_img = np.zeros((rows,cols))
print(f'interp_img has shape {interpimg.shape}')
for i in range(rows):
g = interpolate.interp1d( wavecal[i,:], raw[i,:] )
interp_img[i,:] = g(newwave)
return interp_img
#export
def sum_gaussians(x:"wavelength np.array",
*args:"amplitude, peak position, peak width, constant") -> np.array:
split = len(args)//3
A = args[0:split] # amplitude
mu = args[split:2*split] # peak position
sigma = args[split*2:-1] # peak stdev
c = args[-1] # offset
return np.array( [A[i] * np.exp( - np.square( (x - mu[i])/sigma[i] ) )
for i in range(len(A))] ).sum(axis=0) + c
def fit_arc_lines2(arc_file:str = "cal_files/arc.hdf5", wave_save_file:str = None, skip:int = 1,
show:bool = True) -> dict:
"""Fit a bunch of guassians on top of a spectrum. ???"""
with h5py.File(arc_file, "r") as f:
arc_img = np.array(f['arc_img'],dtype=np.float64)
# normalise the image?
arc_img /= np.max(arc_img, axis = 1)[:, None]
# rows -> spatial axis, cols -> wavelength axis
rows, cols = arc_img.shape
x_array, wavelengths = np.arange(rows), np.arange(cols)
# init arrays. why 28 cols?
# took a while, but this is rows*(9peaks*3arrays+1constant)
# the assumption here is that there will only be 9 peaks.
arc_gauss_fit = np.zeros((rows//skip,28))
# init with first pixel's spectrum
# does spec mean species?
spec = arc_img[0,:]
mu, props = find_peaks(spec, height = 0.01, width = 1.5, prominence = 0.01)
A = props["peak_heights"]
sigma = 0.5 * props["widths"]
c = 0.02
params0 = [*A,*mu,*sigma,c]
if show:
plt.subplots(nrows=1,ncols=2,figsize=(10,5))
plt.subplot(1,2,1)
plt.plot(wavelengths,spec)
plt.plot(mu, A, 'r*')
plt.xlabel('wavelength (nm) with some offset ???')
plt.ylabel('normalised amplitude')
#plt.show()
#breakpoint()
#arc_gauss_fit[0,:], _ = curve_fit(sum_gaussians, wavelengths, spec, p0=params)
print('Fit arc lines for each spatial pixel')
#skip = rows
for i in progress_bar(range(0,rows,skip)):
params = params0 if i == 0 else arc_gauss_fit[i-1,:]
arc_gauss_fit[i,:], _ = curve_fit(sum_gaussians, wavelengths, arc_img[i,:], p0=params)
split = len(params0)//3
A = arc_gauss_fit[:,:split]
mu = arc_gauss_fit[:,split:2*split]
sigma = arc_gauss_fit[:,2*split:-1]
# why smooth the peak centres?
# shape is (spatial pixels,9 peaks found)
smooth_mu = np.zeros_like(mu)
for j in range(split):
smooth_mu[:,j] = savgol_filter(mu[:,j], 21, 3)
spectral_lines = np.asarray([4358.328, 5460.735, 5769.598, 5790.663, 6965.4307,
7067.2175, 7272.9359, 7383.9805, 7503.8691]) / 10
wave_cal = np.zeros((rows,cols))
poly_funcs = [np.poly1d( np.polyfit(smooth_mu[i,:], spectral_lines, 4) ) for i in range(rows)]
#breakpoint()
wave_cal = np.array([p(wavelengths) for p in poly_funcs])
# what is the reasoning behind .max() after min()? and vice versa
min_wavelength = wave_cal.min(axis=1).max()
max_wavelength = wave_cal.max(axis=1).min()
delta_wavelength = np.diff(wave_cal, axis=1).min()
#breakpoint()
newwave = np.arange(min_wavelength, max_wavelength, delta_wavelength)
interp_img = np.zeros((rows,len(newwave)))
f = [interpolate.interp1d(wave_cal[i,:], arc_img[i,:]) for i in range(rows)]
for i in range(rows):
interp_img[i,:] = f[i](newwave)
if show:
plt.subplot(1,2,2)
plt.imshow(wave_cal)
plt.xlabel('wavelength (nm) with some offset ???')
plt.ylabel('spatial pixels')
plt.colorbar()
plt.show()
result = { "wavecal": wave_cal,
"pos": mu,
"peakheight": A,
"width": sigma,
"smoothed_pos": smooth_mu,
"newwave": newwave}
if wave_save_file:
with h5py.File(wave_save_file, "w") as f:
for k, v in result.items():
f.create_dataset(k, data=np.array(v, dtype=np.float64))
return result
result_new = fit_arc_lines2()
```
### Testing equality of the original vs refactored.
Outputs are the same
```
result_orig['pos'], result_new['pos']
sum_gaussians(np.arange(100),*np.array([10,10,20,20,10,10,10])) # amp, ctr, width, const
NGaussFunc(np.arange(100),*np.array([20,10,10,20,10,10])) # ctr, amp, width, const
```
|
github_jupyter
|
# default_exp calibrate
#hide
from nbdev.showdoc import *
import numpy as np
import matplotlib.pyplot as plt
npzfile = np.load("cal_files/wavesoln.npz")
wavecal = npzfile["wavecal"]
newwave = npzfile["newwave"]
import h5py
with h5py.File("cal_files/wave_soln.hdf5", "w") as f:
dset = f.create_dataset("wavecal",shape=wavecal.shape,dtype=np.float64)
dset[...] = wavecal
print(f'wavecal has shape {wavecal.shape}')
dset2 = f.create_dataset("newwave",shape=newwave.shape,dtype=np.float64)
dset2[...] = newwave
print(f'newwave has shape {newwave.shape}')
# check if we can open the HDF5 files
with h5py.File("cal_files/wave_soln.hdf5", "r") as f:
print(f'File has entries: {list(f.keys())}')
plt.subplots(nrows=1,ncols=2,figsize=(10,4))
plt.subplot(1,2,1); plt.title('wavecal')
plt.imshow(f['wavecal'])
plt.colorbar()
plt.xlabel('wavelength (nm) ???'); plt.ylabel('line pixels ???')
plt.subplot(1,2,2); plt.title('newwave')
plt.plot(f['newwave'])
plt.xlabel('array index'); plt.ylabel('???')
from astropy.io import fits as fitsio
hdulist = fitsio.open("cal_files/arc.fits")
arcimg = np.rot90(hdulist[0].data, -1)
import h5py
with h5py.File("cal_files/arc.hdf5", "w") as f:
dset = f.create_dataset("arc_img",shape=arcimg.shape,dtype=np.float64)
dset[...] = arcimg
print(f'arcimg has shape {arcimg.shape}')
# check if we can open the HDF5 files
with h5py.File("cal_files/arc.hdf5", "r") as f:
print(f'File has entries: {list(f.keys())}')
plt.title('arc_img')
plt.imshow(f['arc_img'])
plt.xlabel('wavelength (nm) ???'); plt.ylabel('line pixels ???')
with h5py.File('cal_files/hgar_linelist_cfht.mat','r') as f:
print(f'File has entries: {list(f.keys())}')
with h5py.File('cal_files/HgAr_lines.hdf5','w') as f2:
wavelength = f.get('wavelength')[0,:] # original shape is (1,252)
dset = f2.create_dataset("wavelength",shape=wavelength.shape,dtype=np.float64)
dset[...] = wavelength
print(f'wavelength has shape {wavelength.shape}')
with h5py.File('cal_files/HgAr_lines.hdf5','r') as f:
plt.plot(np.array(f['wavelength'])/10)
plt.xlabel('array index')
plt.ylabel('wavelength (nm) ???')
#data = np.array( f.get('hypercube') )
A = np.array([[1,2,3,4],[5,6,7,8]])
print(A)
print(f'shape is {A.shape}')
print(f'first row is {A[0,:]}')
#export
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import find_peaks, savgol_filter
from scipy.optimize import curve_fit
from scipy import interpolate
from fastprogress.fastprogress import master_bar, progress_bar
import h5py
def NGaussFunc(x, *params): # x0 pk width
y = np.zeros_like(x)
for i in range(0, len(params) - 1, 3):
ctr = params[i]
amp = params[i + 1]
wid = params[i + 2]
y = y + amp * np.exp(-((x - ctr) / wid) ** 2)
return y + params[-1]
def fit_arc_lines(arcimg, spatial_col_skip=1, wavecalfile=""):
arcimg = arcimg * 1.0 / np.max(arcimg, axis=1)[:, None]
spatialaxis = np.arange(0, arcimg.shape[0])
waveaxis = np.arange(0, arcimg.shape[1])
arcgausfits = np.zeros((len(range(1, arcimg.shape[0], spatial_col_skip)) + 1, 28))
spec = arcimg[0, :]
peaks, properties = find_peaks(spec, height=0.01, width=1.5, prominence=0.01)
y0 = np.zeros((peaks.size * 3))
y0[0::3] = peaks
y0[1::3] = properties["peak_heights"]
y0[2::3] = properties["widths"] * 0.5
y0 = np.append(y0, 0.02)
arcgausfits[0, :], pcov = curve_fit(NGaussFunc, waveaxis, spec, p0=y0)
i = 0
print("Fitting Arc Lines in each col...")
for col in progress_bar(range(1, arcimg.shape[0], spatial_col_skip)):
i += 1
spec = arcimg[col, :]
y0 = arcgausfits[i - 1, :]
arcgausfits[i, :], pcov = curve_fit(NGaussFunc, waveaxis, spec, p0=y0)
pos = arcgausfits[:, range(0, arcgausfits.shape[1] - 1, 3)]
peakheight = arcgausfits[:, range(0, arcgausfits.shape[1] - 1, 3)]
width = arcgausfits[:, range(2, arcgausfits.shape[1] - 1, 3)]
smoothed_pos = np.zeros_like(pos)
for row in progress_bar(range(0, pos.shape[1])):
smoothed_pos[:, row] = savgol_filter(pos[:, row], 21, 3)
spectral_lines = np.asarray([4358.328, 5460.735, 5769.598, 5790.663, 6965.4307,
7067.2175, 7272.9359, 7383.9805, 7503.8691]) / 10
wavecal = np.zeros((smoothed_pos.shape[0], waveaxis.shape[0]))
p = []
for i in range(0, smoothed_pos.shape[0]):
z = np.polyfit(smoothed_pos[i, :], spectral_lines, 4)
p.append(np.poly1d(z))
wavecal[i, :] = p[-1](waveaxis)
if True:
plt.imshow(wavecal)
plt.xlabel('wavelength (nm) with some offset ???')
plt.ylabel('spatial pixels')
plt.colorbar()
plt.show()
minwave = wavecal.min(axis=1).max()
maxwave = wavecal.max(axis=1).min()
minwavedelta = np.diff(wavecal, axis=1).min()
newwave = np.arange(minwave, maxwave, minwavedelta)
interpimg = np.zeros((wavecal.shape[0], newwave.shape[0]))
for col in range(0, wavecal.shape[0]):
f = interpolate.interp1d(wavecal[col, :], arcimg[col, :])
interpimg[col, :] = f(newwave)
result = {
"wavecal": wavecal,
"pos": pos,
"peakheight": peakheight,
"width": width,
"smoothed_pos": smoothed_pos,
"newwave": newwave,
}
if len(wavecalfile):
np.savez(wavecalfile, **result)
return result
with h5py.File("cal_files/arc.hdf5", "r") as f:
arcimg = np.array(f['arc_img'])
result_orig = fit_arc_lines(arcimg)
def linearise_wavelength(raw:np.ndarray, cal_file:h5py.File) -> np.ndarray:
"""Linearise `raw` by wavelength ??? Defined but not used."""
with h5py.File('cal_files/wave_soln.hdf5','r') as f:
wavecal = f['wavecal']
newwave = f['newwave']
rows = wavecal.shape[0]; cols = newwave.shape[0]
interp_img = np.zeros((rows,cols))
print(f'interp_img has shape {interpimg.shape}')
for i in range(rows):
g = interpolate.interp1d( wavecal[i,:], raw[i,:] )
interp_img[i,:] = g(newwave)
return interp_img
#export
def sum_gaussians(x:"wavelength np.array",
*args:"amplitude, peak position, peak width, constant") -> np.array:
split = len(args)//3
A = args[0:split] # amplitude
mu = args[split:2*split] # peak position
sigma = args[split*2:-1] # peak stdev
c = args[-1] # offset
return np.array( [A[i] * np.exp( - np.square( (x - mu[i])/sigma[i] ) )
for i in range(len(A))] ).sum(axis=0) + c
def fit_arc_lines2(arc_file:str = "cal_files/arc.hdf5", wave_save_file:str = None, skip:int = 1,
show:bool = True) -> dict:
"""Fit a bunch of guassians on top of a spectrum. ???"""
with h5py.File(arc_file, "r") as f:
arc_img = np.array(f['arc_img'],dtype=np.float64)
# normalise the image?
arc_img /= np.max(arc_img, axis = 1)[:, None]
# rows -> spatial axis, cols -> wavelength axis
rows, cols = arc_img.shape
x_array, wavelengths = np.arange(rows), np.arange(cols)
# init arrays. why 28 cols?
# took a while, but this is rows*(9peaks*3arrays+1constant)
# the assumption here is that there will only be 9 peaks.
arc_gauss_fit = np.zeros((rows//skip,28))
# init with first pixel's spectrum
# does spec mean species?
spec = arc_img[0,:]
mu, props = find_peaks(spec, height = 0.01, width = 1.5, prominence = 0.01)
A = props["peak_heights"]
sigma = 0.5 * props["widths"]
c = 0.02
params0 = [*A,*mu,*sigma,c]
if show:
plt.subplots(nrows=1,ncols=2,figsize=(10,5))
plt.subplot(1,2,1)
plt.plot(wavelengths,spec)
plt.plot(mu, A, 'r*')
plt.xlabel('wavelength (nm) with some offset ???')
plt.ylabel('normalised amplitude')
#plt.show()
#breakpoint()
#arc_gauss_fit[0,:], _ = curve_fit(sum_gaussians, wavelengths, spec, p0=params)
print('Fit arc lines for each spatial pixel')
#skip = rows
for i in progress_bar(range(0,rows,skip)):
params = params0 if i == 0 else arc_gauss_fit[i-1,:]
arc_gauss_fit[i,:], _ = curve_fit(sum_gaussians, wavelengths, arc_img[i,:], p0=params)
split = len(params0)//3
A = arc_gauss_fit[:,:split]
mu = arc_gauss_fit[:,split:2*split]
sigma = arc_gauss_fit[:,2*split:-1]
# why smooth the peak centres?
# shape is (spatial pixels,9 peaks found)
smooth_mu = np.zeros_like(mu)
for j in range(split):
smooth_mu[:,j] = savgol_filter(mu[:,j], 21, 3)
spectral_lines = np.asarray([4358.328, 5460.735, 5769.598, 5790.663, 6965.4307,
7067.2175, 7272.9359, 7383.9805, 7503.8691]) / 10
wave_cal = np.zeros((rows,cols))
poly_funcs = [np.poly1d( np.polyfit(smooth_mu[i,:], spectral_lines, 4) ) for i in range(rows)]
#breakpoint()
wave_cal = np.array([p(wavelengths) for p in poly_funcs])
# what is the reasoning behind .max() after min()? and vice versa
min_wavelength = wave_cal.min(axis=1).max()
max_wavelength = wave_cal.max(axis=1).min()
delta_wavelength = np.diff(wave_cal, axis=1).min()
#breakpoint()
newwave = np.arange(min_wavelength, max_wavelength, delta_wavelength)
interp_img = np.zeros((rows,len(newwave)))
f = [interpolate.interp1d(wave_cal[i,:], arc_img[i,:]) for i in range(rows)]
for i in range(rows):
interp_img[i,:] = f[i](newwave)
if show:
plt.subplot(1,2,2)
plt.imshow(wave_cal)
plt.xlabel('wavelength (nm) with some offset ???')
plt.ylabel('spatial pixels')
plt.colorbar()
plt.show()
result = { "wavecal": wave_cal,
"pos": mu,
"peakheight": A,
"width": sigma,
"smoothed_pos": smooth_mu,
"newwave": newwave}
if wave_save_file:
with h5py.File(wave_save_file, "w") as f:
for k, v in result.items():
f.create_dataset(k, data=np.array(v, dtype=np.float64))
return result
result_new = fit_arc_lines2()
result_orig['pos'], result_new['pos']
sum_gaussians(np.arange(100),*np.array([10,10,20,20,10,10,10])) # amp, ctr, width, const
NGaussFunc(np.arange(100),*np.array([20,10,10,20,10,10])) # ctr, amp, width, const
| 0.486088 | 0.912553 |
```
import os
import json
import pickle
import random
from collections import defaultdict, Counter
from indra.literature.adeft_tools import universal_extract_text
from indra.databases.hgnc_client import get_hgnc_name, get_hgnc_id
from adeft.discover import AdeftMiner
from adeft.gui import ground_with_gui
from adeft.modeling.label import AdeftLabeler
from adeft.modeling.classify import AdeftClassifier
from adeft.disambiguate import AdeftDisambiguator
from adeft_indra.ground.ground import AdeftGrounder
from adeft_indra.model_building.s3 import model_to_s3
from adeft_indra.model_building.escape import escape_filename
from adeft_indra.db.content import get_pmids_for_agent_text, get_pmids_for_entity, \
get_plaintexts_for_pmids
adeft_grounder = AdeftGrounder()
shortforms = ['AE', 'AEs']
model_name = ':'.join(sorted(escape_filename(shortform) for shortform in shortforms))
results_path = os.path.abspath(os.path.join('../..', 'results', model_name))
miners = dict()
all_texts = {}
for shortform in shortforms:
pmids = get_pmids_for_agent_text(shortform)
if len(pmids) > 10000:
pmids = random.choices(pmids, k=10000)
text_dict = get_plaintexts_for_pmids(pmids, contains=shortforms)
text_dict = {pmid: text for pmid, text in text_dict.items() if len(text) > 5}
miners[shortform] = AdeftMiner(shortform)
miners[shortform].process_texts(text_dict.values())
all_texts.update(text_dict)
longform_dict = {}
for shortform in shortforms:
longforms = miners[shortform].get_longforms()
longforms = [(longform, count, score) for longform, count, score in longforms
if count*score > 2]
longform_dict[shortform] = longforms
combined_longforms = Counter()
for longform_rows in longform_dict.values():
combined_longforms.update({longform: count for longform, count, score
in longform_rows})
grounding_map = {}
names = {}
for longform in combined_longforms:
groundings = adeft_grounder.ground(longform)
if groundings:
grounding = groundings[0]['grounding']
grounding_map[longform] = grounding
names[grounding] = groundings[0]['name']
longforms, counts = zip(*combined_longforms.most_common())
pos_labels = []
list(zip(longforms, counts))
list(zip(longforms, counts))
grounding_map, names, pos_labels = ground_with_gui(longforms, counts,
grounding_map=grounding_map,
names=names, pos_labels=pos_labels, no_browser=True, port=8891)
result = [grounding_map, names, pos_labels]
result
grounding_map, names, pos_labels = [{'abelmoschus esculentus': 'ungrounded',
'absence epilepsy': 'MESH:D004827',
'absolute error': 'ungrounded',
'absorption efficiency': 'ungrounded',
'acetate': 'CHEBI:CHEBI:30089',
'acetone extract': 'ungrounded',
'acetone water extract': 'ungrounded',
'acetylesterase': 'ungrounded',
'acid etch': 'ungrounded',
'acid extract': 'ungrounded',
'acoustic emission': 'ungrounded',
'acoustoelectric': 'ungrounded',
'acquired epilepsy': 'MESH:D004827',
'acridinium ester': 'ungrounded',
'acrodermatitis enteropathica': 'MESH:C538178',
'acrosomal exocytosis': 'ungrounded',
'activity element': 'ungrounded',
'activity enediyne chromophore': 'ungrounded',
'activity expiration': 'ungrounded',
'acute esophagitis': 'MESH:D004941',
'acute exacerbation': 'ungrounded',
'adapted epitopes': 'MESH:D000939',
'adverse effects': 'NCIT:C41331',
'adverse events': 'NCIT:C41331',
'aerobic exercise': 'MESH:D000076663',
'aerobic exercise training': 'ungrounded',
'aeromedical evacuation': 'ungrounded',
'aerosol': 'ungrounded',
'african eggplant': 'ungrounded',
'after effects': 'ungrounded',
'after excision': 'ungrounded',
'after exercise': 'ungrounded',
'aftereffects': 'ungrounded',
'afterload enhanced': 'ungrounded',
'agronomic efficiency': 'ungrounded',
'airway epithelium': 'MESH:D004848',
'alcohol expectancies': 'ungrounded',
'alcohol exposed': 'ungrounded',
'alcohol exposure': 'ungrounded',
'aldrin epoxidase': 'ungrounded',
'alkylethoxylate': 'ungrounded',
'aloe emodin': 'CHEBI:CHEBI:2607',
'alveolar echinococcosis': 'MESH:C536591',
'amenorrhoeic exercise': 'ungrounded',
'aminoestrogens': 'ungrounded',
'aml1 eto': 'MESH:C107844',
'amla extract': 'ungrounded',
'amniotic epithelial': 'ungrounded',
'anaplastic ependymoma': 'MESH:D004806',
'and effacing': 'ungrounded',
'androstenedione': 'CHEBI:CHEBI:16422',
'anejaculation': 'HP:HP:0012879',
'angioedema': 'MESH:D000799',
'angioembolization': 'ungrounded',
'anhydrous ethanol': 'CHEBI:CHEBI:16236',
'anion exchanger': 'ungrounded',
'anther extrusion': 'ungrounded',
'antiestrogen': 'CHEBI:CHEBI:50739',
'antiestrogens': 'CHEBI:CHEBI:50739',
'antioxidant enzymes': 'ungrounded',
'any events': 'ungrounded',
'aortic endarterectomy': 'MESH:D004691',
'apple extract': 'ungrounded',
'approximately entropy': 'ungrounded',
'aqueous': 'ungrounded',
'aqueous extract': 'ungrounded',
'aralia elata miq seem': 'ungrounded',
'arnebia euchroma': 'ungrounded',
'arylesterase': 'ungrounded',
'ascaris extract': 'ungrounded',
'assimilation efficiency': 'ungrounded',
'asthma exacerbation': 'ungrounded',
'atopic eczema': 'MESH:D003876',
'atrial endocardium': 'MESH:D004699',
'attaching effacing': 'ungrounded',
'auto encoder': 'ungrounded',
'autoencoder': 'ungrounded',
'autoimmune encephalitis': 'MESH:C535841',
'autoimmune epilepsy': 'MESH:D004827',
'axial element': 'GO:GO:0000800',
'axillaris × p exserta': 'ungrounded',
'serious adverse effects': 'ungrounded'},
{'CHEBI:CHEBI:30089': 'acetate',
'MESH:D004827': 'Epilepsy',
'MESH:C538178': 'Acrodermatitis enteropathica',
'MESH:D004941': 'Esophagitis',
'MESH:D000939': 'Epitopes',
'NCIT:C41331': 'Adverse Event',
'MESH:D000076663': 'Endurance Training',
'MESH:D004848': 'Epithelium',
'CHEBI:CHEBI:2607': 'Aloe emodin',
'MESH:C536591': 'Alveolar echinococcosis',
'MESH:C107844': 'AML1-ETO fusion protein, human',
'MESH:D004806': 'Ependymoma',
'CHEBI:CHEBI:16422': 'androst-4-ene-3,17-dione',
'HP:HP:0012879': 'Anejaculation',
'MESH:D000799': 'Angioedema',
'CHEBI:CHEBI:16236': 'ethanol',
'CHEBI:CHEBI:50739': 'estrogen receptor modulator',
'MESH:D004691': 'Endarterectomy',
'MESH:D003876': 'Dermatitis, Atopic',
'MESH:D004699': 'Endocardium',
'MESH:C535841': "Hashimoto's encephalitis",
'GO:GO:0000800': 'lateral element'},
['CHEBI:CHEBI:2607',
'CHEBI:CHEBI:50739',
'MESH:C107844',
'MESH:C536591',
'MESH:D000076663',
'MESH:D000799',
'MESH:C538178',
'NCIT:C41331',
'GO:GO:0000800']]
excluded_longforms = []
grounding_dict = {shortform: {longform: grounding_map[longform]
for longform, _, _ in longforms if longform in grounding_map
and longform not in excluded_longforms}
for shortform, longforms in longform_dict.items()}
result = [grounding_dict, names, pos_labels]
if not os.path.exists(results_path):
os.mkdir(results_path)
with open(os.path.join(results_path, f'{model_name}_preliminary_grounding_info.json'), 'w') as f:
json.dump(result, f)
additional_entities = {}
unambiguous_agent_texts = {}
labeler = AdeftLabeler(grounding_dict)
corpus = labeler.build_from_texts((text, pmid) for pmid, text in all_texts.items())
agent_text_pmid_map = defaultdict(list)
for text, label, id_ in corpus:
agent_text_pmid_map[label].append(id_)
entity_pmid_map = {entity: set(get_pmids_for_entity(*entity.split(':', maxsplit=1),
major_topic=True))for entity in additional_entities}
intersection1 = []
for entity1, pmids1 in entity_pmid_map.items():
for entity2, pmids2 in entity_pmid_map.items():
intersection1.append((entity1, entity2, len(pmids1 & pmids2)))
intersection2 = []
for entity1, pmids1 in agent_text_pmid_map.items():
for entity2, pmids2 in entity_pmid_map.items():
intersection2.append((entity1, entity2, len(set(pmids1) & pmids2)))
intersection1
intersection2
all_used_pmids = set()
for entity, agent_texts in unambiguous_agent_texts.items():
used_pmids = set()
for agent_text in agent_texts:
pmids = set(get_pmids_for_agent_text(agent_text))
new_pmids = list(pmids - all_texts.keys() - used_pmids)
text_dict = get_plaintexts_for_pmids(new_pmids, contains=agent_texts)
corpus.extend([(text, entity, pmid) for pmid, text in text_dict.items()])
used_pmids.update(new_pmids)
all_used_pmids.update(used_pmids)
for entity, pmids in entity_pmid_map.items():
new_pmids = list(set(pmids) - all_texts.keys() - all_used_pmids)
if len(new_pmids) > 10000:
new_pmids = random.choices(new_pmids, k=10000)
text_dict = get_plaintexts_for_pmids(new_pmids, contains=['RTCA', 'RTCD1', 'RPC', 'RTC1', 'RTC'])
corpus.extend([(text, entity, pmid) for pmid, text in text_dict.items()])
names.update(additional_entitie)
%%capture
classifier = AdeftClassifier(shortforms, pos_labels=pos_labels, random_state=1729)
param_grid = {'C': [100.0], 'max_features': [10000]}
texts, labels, pmids = zip(*corpus)
classifier.cv(texts, labels, param_grid, cv=5, n_jobs=5)
classifier.stats
disamb = AdeftDisambiguator(classifier, grounding_dict, names)
disamb.dump(model_name, results_path)
print(disamb.info())
model_to_s3(disamb)
```
|
github_jupyter
|
import os
import json
import pickle
import random
from collections import defaultdict, Counter
from indra.literature.adeft_tools import universal_extract_text
from indra.databases.hgnc_client import get_hgnc_name, get_hgnc_id
from adeft.discover import AdeftMiner
from adeft.gui import ground_with_gui
from adeft.modeling.label import AdeftLabeler
from adeft.modeling.classify import AdeftClassifier
from adeft.disambiguate import AdeftDisambiguator
from adeft_indra.ground.ground import AdeftGrounder
from adeft_indra.model_building.s3 import model_to_s3
from adeft_indra.model_building.escape import escape_filename
from adeft_indra.db.content import get_pmids_for_agent_text, get_pmids_for_entity, \
get_plaintexts_for_pmids
adeft_grounder = AdeftGrounder()
shortforms = ['AE', 'AEs']
model_name = ':'.join(sorted(escape_filename(shortform) for shortform in shortforms))
results_path = os.path.abspath(os.path.join('../..', 'results', model_name))
miners = dict()
all_texts = {}
for shortform in shortforms:
pmids = get_pmids_for_agent_text(shortform)
if len(pmids) > 10000:
pmids = random.choices(pmids, k=10000)
text_dict = get_plaintexts_for_pmids(pmids, contains=shortforms)
text_dict = {pmid: text for pmid, text in text_dict.items() if len(text) > 5}
miners[shortform] = AdeftMiner(shortform)
miners[shortform].process_texts(text_dict.values())
all_texts.update(text_dict)
longform_dict = {}
for shortform in shortforms:
longforms = miners[shortform].get_longforms()
longforms = [(longform, count, score) for longform, count, score in longforms
if count*score > 2]
longform_dict[shortform] = longforms
combined_longforms = Counter()
for longform_rows in longform_dict.values():
combined_longforms.update({longform: count for longform, count, score
in longform_rows})
grounding_map = {}
names = {}
for longform in combined_longforms:
groundings = adeft_grounder.ground(longform)
if groundings:
grounding = groundings[0]['grounding']
grounding_map[longform] = grounding
names[grounding] = groundings[0]['name']
longforms, counts = zip(*combined_longforms.most_common())
pos_labels = []
list(zip(longforms, counts))
list(zip(longforms, counts))
grounding_map, names, pos_labels = ground_with_gui(longforms, counts,
grounding_map=grounding_map,
names=names, pos_labels=pos_labels, no_browser=True, port=8891)
result = [grounding_map, names, pos_labels]
result
grounding_map, names, pos_labels = [{'abelmoschus esculentus': 'ungrounded',
'absence epilepsy': 'MESH:D004827',
'absolute error': 'ungrounded',
'absorption efficiency': 'ungrounded',
'acetate': 'CHEBI:CHEBI:30089',
'acetone extract': 'ungrounded',
'acetone water extract': 'ungrounded',
'acetylesterase': 'ungrounded',
'acid etch': 'ungrounded',
'acid extract': 'ungrounded',
'acoustic emission': 'ungrounded',
'acoustoelectric': 'ungrounded',
'acquired epilepsy': 'MESH:D004827',
'acridinium ester': 'ungrounded',
'acrodermatitis enteropathica': 'MESH:C538178',
'acrosomal exocytosis': 'ungrounded',
'activity element': 'ungrounded',
'activity enediyne chromophore': 'ungrounded',
'activity expiration': 'ungrounded',
'acute esophagitis': 'MESH:D004941',
'acute exacerbation': 'ungrounded',
'adapted epitopes': 'MESH:D000939',
'adverse effects': 'NCIT:C41331',
'adverse events': 'NCIT:C41331',
'aerobic exercise': 'MESH:D000076663',
'aerobic exercise training': 'ungrounded',
'aeromedical evacuation': 'ungrounded',
'aerosol': 'ungrounded',
'african eggplant': 'ungrounded',
'after effects': 'ungrounded',
'after excision': 'ungrounded',
'after exercise': 'ungrounded',
'aftereffects': 'ungrounded',
'afterload enhanced': 'ungrounded',
'agronomic efficiency': 'ungrounded',
'airway epithelium': 'MESH:D004848',
'alcohol expectancies': 'ungrounded',
'alcohol exposed': 'ungrounded',
'alcohol exposure': 'ungrounded',
'aldrin epoxidase': 'ungrounded',
'alkylethoxylate': 'ungrounded',
'aloe emodin': 'CHEBI:CHEBI:2607',
'alveolar echinococcosis': 'MESH:C536591',
'amenorrhoeic exercise': 'ungrounded',
'aminoestrogens': 'ungrounded',
'aml1 eto': 'MESH:C107844',
'amla extract': 'ungrounded',
'amniotic epithelial': 'ungrounded',
'anaplastic ependymoma': 'MESH:D004806',
'and effacing': 'ungrounded',
'androstenedione': 'CHEBI:CHEBI:16422',
'anejaculation': 'HP:HP:0012879',
'angioedema': 'MESH:D000799',
'angioembolization': 'ungrounded',
'anhydrous ethanol': 'CHEBI:CHEBI:16236',
'anion exchanger': 'ungrounded',
'anther extrusion': 'ungrounded',
'antiestrogen': 'CHEBI:CHEBI:50739',
'antiestrogens': 'CHEBI:CHEBI:50739',
'antioxidant enzymes': 'ungrounded',
'any events': 'ungrounded',
'aortic endarterectomy': 'MESH:D004691',
'apple extract': 'ungrounded',
'approximately entropy': 'ungrounded',
'aqueous': 'ungrounded',
'aqueous extract': 'ungrounded',
'aralia elata miq seem': 'ungrounded',
'arnebia euchroma': 'ungrounded',
'arylesterase': 'ungrounded',
'ascaris extract': 'ungrounded',
'assimilation efficiency': 'ungrounded',
'asthma exacerbation': 'ungrounded',
'atopic eczema': 'MESH:D003876',
'atrial endocardium': 'MESH:D004699',
'attaching effacing': 'ungrounded',
'auto encoder': 'ungrounded',
'autoencoder': 'ungrounded',
'autoimmune encephalitis': 'MESH:C535841',
'autoimmune epilepsy': 'MESH:D004827',
'axial element': 'GO:GO:0000800',
'axillaris × p exserta': 'ungrounded',
'serious adverse effects': 'ungrounded'},
{'CHEBI:CHEBI:30089': 'acetate',
'MESH:D004827': 'Epilepsy',
'MESH:C538178': 'Acrodermatitis enteropathica',
'MESH:D004941': 'Esophagitis',
'MESH:D000939': 'Epitopes',
'NCIT:C41331': 'Adverse Event',
'MESH:D000076663': 'Endurance Training',
'MESH:D004848': 'Epithelium',
'CHEBI:CHEBI:2607': 'Aloe emodin',
'MESH:C536591': 'Alveolar echinococcosis',
'MESH:C107844': 'AML1-ETO fusion protein, human',
'MESH:D004806': 'Ependymoma',
'CHEBI:CHEBI:16422': 'androst-4-ene-3,17-dione',
'HP:HP:0012879': 'Anejaculation',
'MESH:D000799': 'Angioedema',
'CHEBI:CHEBI:16236': 'ethanol',
'CHEBI:CHEBI:50739': 'estrogen receptor modulator',
'MESH:D004691': 'Endarterectomy',
'MESH:D003876': 'Dermatitis, Atopic',
'MESH:D004699': 'Endocardium',
'MESH:C535841': "Hashimoto's encephalitis",
'GO:GO:0000800': 'lateral element'},
['CHEBI:CHEBI:2607',
'CHEBI:CHEBI:50739',
'MESH:C107844',
'MESH:C536591',
'MESH:D000076663',
'MESH:D000799',
'MESH:C538178',
'NCIT:C41331',
'GO:GO:0000800']]
excluded_longforms = []
grounding_dict = {shortform: {longform: grounding_map[longform]
for longform, _, _ in longforms if longform in grounding_map
and longform not in excluded_longforms}
for shortform, longforms in longform_dict.items()}
result = [grounding_dict, names, pos_labels]
if not os.path.exists(results_path):
os.mkdir(results_path)
with open(os.path.join(results_path, f'{model_name}_preliminary_grounding_info.json'), 'w') as f:
json.dump(result, f)
additional_entities = {}
unambiguous_agent_texts = {}
labeler = AdeftLabeler(grounding_dict)
corpus = labeler.build_from_texts((text, pmid) for pmid, text in all_texts.items())
agent_text_pmid_map = defaultdict(list)
for text, label, id_ in corpus:
agent_text_pmid_map[label].append(id_)
entity_pmid_map = {entity: set(get_pmids_for_entity(*entity.split(':', maxsplit=1),
major_topic=True))for entity in additional_entities}
intersection1 = []
for entity1, pmids1 in entity_pmid_map.items():
for entity2, pmids2 in entity_pmid_map.items():
intersection1.append((entity1, entity2, len(pmids1 & pmids2)))
intersection2 = []
for entity1, pmids1 in agent_text_pmid_map.items():
for entity2, pmids2 in entity_pmid_map.items():
intersection2.append((entity1, entity2, len(set(pmids1) & pmids2)))
intersection1
intersection2
all_used_pmids = set()
for entity, agent_texts in unambiguous_agent_texts.items():
used_pmids = set()
for agent_text in agent_texts:
pmids = set(get_pmids_for_agent_text(agent_text))
new_pmids = list(pmids - all_texts.keys() - used_pmids)
text_dict = get_plaintexts_for_pmids(new_pmids, contains=agent_texts)
corpus.extend([(text, entity, pmid) for pmid, text in text_dict.items()])
used_pmids.update(new_pmids)
all_used_pmids.update(used_pmids)
for entity, pmids in entity_pmid_map.items():
new_pmids = list(set(pmids) - all_texts.keys() - all_used_pmids)
if len(new_pmids) > 10000:
new_pmids = random.choices(new_pmids, k=10000)
text_dict = get_plaintexts_for_pmids(new_pmids, contains=['RTCA', 'RTCD1', 'RPC', 'RTC1', 'RTC'])
corpus.extend([(text, entity, pmid) for pmid, text in text_dict.items()])
names.update(additional_entitie)
%%capture
classifier = AdeftClassifier(shortforms, pos_labels=pos_labels, random_state=1729)
param_grid = {'C': [100.0], 'max_features': [10000]}
texts, labels, pmids = zip(*corpus)
classifier.cv(texts, labels, param_grid, cv=5, n_jobs=5)
classifier.stats
disamb = AdeftDisambiguator(classifier, grounding_dict, names)
disamb.dump(model_name, results_path)
print(disamb.info())
model_to_s3(disamb)
| 0.320715 | 0.152537 |
# Alignment of MusicBrainz and Wikidata instruments
```
%run -i ../startup.py
ENTITY_TYPE = 'instrument'
```
## Instruments from Wikidata
Wikidata entities which are musical instruments or families of musical instruments:
```
# instance of musical instrument
wd_musical_instruments = sparql("""
SELECT ?instrument ?instrumentLabel ?HornbostelSachs
WHERE {
{ ?instrument wdt:P31* wd:Q34379 . }
UNION
{ ?instrument wdt:P31 wd:Q1254773 . }
OPTIONAL
{ ?instrument wdt:P1762 ?HornbostelSachs . }
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}
""")
wd_musical_instruments.rename(columns={
'instrument': 'wd', 'instrumentLabel': 'name'}, inplace=True)
wd_musical_instruments.head()
```
Entities with "instrumental" links to MB:
```
# linked to MB instrument
links_from_wd = sparql("""
SELECT (?instrument AS ?wd) ?mbid ?instrumentLabel
WHERE {
?instrument wdt:P1330 ?mbid .
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}
ORDER BY ASC(?instrumentLabel)
""")
links_from_wd.rename(columns={'instrumentLabel': 'name'}, inplace=True)
display_df(links_from_wd.head())
```
### Wikidata instruments with several MusicBrainz links
Probably needs cleanup
```
set([wd for wd in links_from_wd.wd
if links_from_wd.wd.to_list().count(wd) > 1])
set([mbid for mbid in links_from_wd.mbid
if links_from_wd.mbid.to_list().count(mbid) > 1])
```
## Instruments from MusicBrainz with wikidata links
```
links_from_mb = sql("""
SELECT
url.url AS wd,
instrument.gid AS mbid,
instrument.name
FROM url
JOIN l_instrument_url AS llu ON llu.entity1 = url.id
JOIN instrument ON llu.entity0 = instrument.id
WHERE
url.url LIKE '%%wikidata.org%%'
ORDER BY instrument.name
;
""")
links_from_mb.wd = links_from_mb.wd.apply(lambda s: s.split('/')[-1])
links_from_mb.mbid = links_from_mb.mbid.apply(str)
display_df(links_from_mb.head())
```
### MusicBrainz instruments with several Wikidata links
Probably needs cleanup
```
set([wd for wd in links_from_mb.wd
if links_from_mb.wd.to_list().count(wd) > 1])
set([mbid for mbid in links_from_mb.mbid
if links_from_mb.mbid.to_list().count(mbid) > 1])
```
## Data alignment
```
merge = pd.merge(links_from_wd, links_from_mb,
on=['wd', 'mbid'], suffixes=('_wd', '_mb'),
how='outer', indicator=True)
display_df(merge.head())
# link in mb but missing in wd
links_to_add_to_wd = merge.loc[lambda x : x['_merge']=='right_only'][['name_mb', 'mbid', 'wd']]
display_df(links_to_add_to_wd)
```
24 links in MB that are not in WD
```
# link in wd but missing in mb
links_to_add_to_mb = merge.loc[lambda x : x['_merge']=='left_only'][['name_wd', 'wd', 'mbid']]
display_df(links_to_add_to_mb)
```
9 links in WD that are not in MB
In those mismatches, some are not recognized because of redirects on WD side: Q54995817 to Q4138014, Q16033036 to Q3181140
## Instruments from MusicBrainz without wikidata links
```
no_links_from_mb = sql("""
SELECT
gid AS mbid,
name
FROM
instrument
WHERE
id NOT IN (
SELECT
instrument.id
FROM url
JOIN l_instrument_url AS llu ON llu.entity1 = url.id
JOIN instrument ON llu.entity0 = instrument.id
WHERE
url.url LIKE '%%wikidata.org%%'
)
;
""")
no_links_from_mb.mbid = no_links_from_mb.mbid.apply(str)
display_df(no_links_from_mb)
```
## Alignment suggestions
### Exact match
Exact match between instrument names in WD and MB:
```
no_links_merge = pd.merge(no_links_from_mb, wd_musical_instruments,
on='name', how='inner', indicator=False)
display_df(no_links_merge)
```
### With fuzzy-matching library
Using fuzzy-matching to find close instrument names:
```
import fuzzymatcher
match = fuzzymatcher.fuzzy_left_join(
no_links_from_mb, wd_musical_instruments[['wd', 'name']],
left_on='name', right_on='name')[['best_match_score', 'mbid',
'name_left', 'name_right', 'wd']]
match = match[match['best_match_score'] > 0.09].sort_values(by='best_match_score',
ascending=False)
display_df(match, index=False)
```
### With record linkage library
```
import recordlinkage
# Indexation step
indexer = recordlinkage.SortedNeighbourhoodIndex('name', window=9)
pairs = indexer.index(no_links_from_mb, wd_musical_instruments[['wd', 'name']])
print(len(pairs))
# Comparison step
compare_cl = recordlinkage.Compare()
compare_cl.string('name', 'name', method='jarowinkler',
threshold=0.9, label='name')
features = compare_cl.compute(pairs, no_links_from_mb, wd_musical_instruments[['wd', 'name']])
print(features[features.sum(axis=1) > 0].shape)
# Classification step
linkage = []
for (idx0, idx1) in features[features.sum(axis=1) > 0].index:
linkage.append([
no_links_from_mb.loc[idx0]['mbid'],
no_links_from_mb.loc[idx0]['name'],
wd_musical_instruments.loc[idx1]['name'],
wd_musical_instruments.loc[idx1]['wd'],
])
display_df(pd.DataFrame(linkage, columns=('mbid', 'name_left', 'name_right', 'wd')),
index=False)
```
## Report
```
import jinja2
template = jinja2.Template("""
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Alignment of MusicBrainz and Wikidata Instruments</title>
<link href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
</head>
<body style="margin: 20px;">
<h1>Alignment of MusicBrainz and Wikidata Instruments</h1>
<p>Latest MB database update: {{ MB_DATABASE_VERSION }}</p>
<p>Latest update: {{ date.today().isoformat() }}</p>
<ol>
<li><a href="#wd2mb">Add missing Wikidata links to MusicBrainz</a></li>
<li><a href="#mb2wd">Add missing MusicBrainz links to Wikidata</a></li>
<li><a href="#alignment">Missing alignment suggestions</a>
</ol>
<h2 id="wd2mb">Add missing Wikidata links to MusicBrainz</h2>
{{ df_to_html(links_to_add_to_mb) }}
<h2 id="mb2wd">Add missing MusicBrainz links to Wikidata</h2>
{{ df_to_html(links_to_add_to_wd) }}
<h2 id="alignment">Missing alignment suggestions</h2>
<h3>Alignment on exact names</h3>
{{ df_to_html(no_links_merge) }}
<h3>Alignment on fuzzy matching</h3>
{{ df_to_html(match) }}
</body>
</html>
""")
with open('../docs/wd-instruments-report.html', 'w') as f:
f.write(template.render(**globals())
.replace('<', '<').replace('>', '>')
.replace('class="dataframe"', 'class="table table-striped table-hover table-sm"')
.replace('thead', 'thead class="thead-light"'))
```
|
github_jupyter
|
%run -i ../startup.py
ENTITY_TYPE = 'instrument'
# instance of musical instrument
wd_musical_instruments = sparql("""
SELECT ?instrument ?instrumentLabel ?HornbostelSachs
WHERE {
{ ?instrument wdt:P31* wd:Q34379 . }
UNION
{ ?instrument wdt:P31 wd:Q1254773 . }
OPTIONAL
{ ?instrument wdt:P1762 ?HornbostelSachs . }
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}
""")
wd_musical_instruments.rename(columns={
'instrument': 'wd', 'instrumentLabel': 'name'}, inplace=True)
wd_musical_instruments.head()
# linked to MB instrument
links_from_wd = sparql("""
SELECT (?instrument AS ?wd) ?mbid ?instrumentLabel
WHERE {
?instrument wdt:P1330 ?mbid .
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}
ORDER BY ASC(?instrumentLabel)
""")
links_from_wd.rename(columns={'instrumentLabel': 'name'}, inplace=True)
display_df(links_from_wd.head())
set([wd for wd in links_from_wd.wd
if links_from_wd.wd.to_list().count(wd) > 1])
set([mbid for mbid in links_from_wd.mbid
if links_from_wd.mbid.to_list().count(mbid) > 1])
links_from_mb = sql("""
SELECT
url.url AS wd,
instrument.gid AS mbid,
instrument.name
FROM url
JOIN l_instrument_url AS llu ON llu.entity1 = url.id
JOIN instrument ON llu.entity0 = instrument.id
WHERE
url.url LIKE '%%wikidata.org%%'
ORDER BY instrument.name
;
""")
links_from_mb.wd = links_from_mb.wd.apply(lambda s: s.split('/')[-1])
links_from_mb.mbid = links_from_mb.mbid.apply(str)
display_df(links_from_mb.head())
set([wd for wd in links_from_mb.wd
if links_from_mb.wd.to_list().count(wd) > 1])
set([mbid for mbid in links_from_mb.mbid
if links_from_mb.mbid.to_list().count(mbid) > 1])
merge = pd.merge(links_from_wd, links_from_mb,
on=['wd', 'mbid'], suffixes=('_wd', '_mb'),
how='outer', indicator=True)
display_df(merge.head())
# link in mb but missing in wd
links_to_add_to_wd = merge.loc[lambda x : x['_merge']=='right_only'][['name_mb', 'mbid', 'wd']]
display_df(links_to_add_to_wd)
# link in wd but missing in mb
links_to_add_to_mb = merge.loc[lambda x : x['_merge']=='left_only'][['name_wd', 'wd', 'mbid']]
display_df(links_to_add_to_mb)
no_links_from_mb = sql("""
SELECT
gid AS mbid,
name
FROM
instrument
WHERE
id NOT IN (
SELECT
instrument.id
FROM url
JOIN l_instrument_url AS llu ON llu.entity1 = url.id
JOIN instrument ON llu.entity0 = instrument.id
WHERE
url.url LIKE '%%wikidata.org%%'
)
;
""")
no_links_from_mb.mbid = no_links_from_mb.mbid.apply(str)
display_df(no_links_from_mb)
no_links_merge = pd.merge(no_links_from_mb, wd_musical_instruments,
on='name', how='inner', indicator=False)
display_df(no_links_merge)
import fuzzymatcher
match = fuzzymatcher.fuzzy_left_join(
no_links_from_mb, wd_musical_instruments[['wd', 'name']],
left_on='name', right_on='name')[['best_match_score', 'mbid',
'name_left', 'name_right', 'wd']]
match = match[match['best_match_score'] > 0.09].sort_values(by='best_match_score',
ascending=False)
display_df(match, index=False)
import recordlinkage
# Indexation step
indexer = recordlinkage.SortedNeighbourhoodIndex('name', window=9)
pairs = indexer.index(no_links_from_mb, wd_musical_instruments[['wd', 'name']])
print(len(pairs))
# Comparison step
compare_cl = recordlinkage.Compare()
compare_cl.string('name', 'name', method='jarowinkler',
threshold=0.9, label='name')
features = compare_cl.compute(pairs, no_links_from_mb, wd_musical_instruments[['wd', 'name']])
print(features[features.sum(axis=1) > 0].shape)
# Classification step
linkage = []
for (idx0, idx1) in features[features.sum(axis=1) > 0].index:
linkage.append([
no_links_from_mb.loc[idx0]['mbid'],
no_links_from_mb.loc[idx0]['name'],
wd_musical_instruments.loc[idx1]['name'],
wd_musical_instruments.loc[idx1]['wd'],
])
display_df(pd.DataFrame(linkage, columns=('mbid', 'name_left', 'name_right', 'wd')),
index=False)
import jinja2
template = jinja2.Template("""
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Alignment of MusicBrainz and Wikidata Instruments</title>
<link href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
</head>
<body style="margin: 20px;">
<h1>Alignment of MusicBrainz and Wikidata Instruments</h1>
<p>Latest MB database update: {{ MB_DATABASE_VERSION }}</p>
<p>Latest update: {{ date.today().isoformat() }}</p>
<ol>
<li><a href="#wd2mb">Add missing Wikidata links to MusicBrainz</a></li>
<li><a href="#mb2wd">Add missing MusicBrainz links to Wikidata</a></li>
<li><a href="#alignment">Missing alignment suggestions</a>
</ol>
<h2 id="wd2mb">Add missing Wikidata links to MusicBrainz</h2>
{{ df_to_html(links_to_add_to_mb) }}
<h2 id="mb2wd">Add missing MusicBrainz links to Wikidata</h2>
{{ df_to_html(links_to_add_to_wd) }}
<h2 id="alignment">Missing alignment suggestions</h2>
<h3>Alignment on exact names</h3>
{{ df_to_html(no_links_merge) }}
<h3>Alignment on fuzzy matching</h3>
{{ df_to_html(match) }}
</body>
</html>
""")
with open('../docs/wd-instruments-report.html', 'w') as f:
f.write(template.render(**globals())
.replace('<', '<').replace('>', '>')
.replace('class="dataframe"', 'class="table table-striped table-hover table-sm"')
.replace('thead', 'thead class="thead-light"'))
| 0.328637 | 0.702454 |
# F1 statistics & simulation
\[_In case you’re unable to see the atoti visualizations in GitHub, try viewing the notebook in [nbviewer](https://nbviewer.org/github/atoti/notebooks/blob/master/notebooks/formula-one/main.ipynb)._]
## Introduction
Credit:
The original version of this notebook was created by David Chevrier, Diggers.
More information: https://www.diggers-consulting.com/blog/articles/how-data-science-could-turn-felipe-massa-a-f1-world-champion
<div style="text-align: center;" ><a href="https://www.atoti.io/?utm_source=gallery&utm_content=formula-one" target="_blank" rel="noopener noreferrer"><img src="https://data.atoti.io/notebooks/banners/discover.png" alt="Try atoti"></a></div>
## Dataset
Data from https://ergast.com/mrd/db/#csv
F1 data from 1950 to 2019
## Initialization & creation of the atoti session
```
import atoti as tt
import numpy as np
import pandas as pd
session = tt.create_session()
```
## Creation of the stores
```
drivers_df = pd.read_csv(
"https://data.atoti.io/notebooks/formula-one/drivers.csv", encoding="latin-1"
)
drivers_df.rename(columns={"url": "driver_url"}, inplace=True)
drivers_df.rename(columns={"nationality": "driver_nationality"}, inplace=True)
drivers_table = session.read_pandas(
drivers_df, keys=["driverId"], table_name="F1 drivers"
)
drivers_table.head()
races_df = pd.read_csv(
"https://data.atoti.io/notebooks/formula-one/races.csv", encoding="latin-1"
)
races_df.rename(columns={"url": "race_url"}, inplace=True)
races_df.rename(columns={"name": "race_name"}, inplace=True)
races_df["year"] = races_df["year"].astype("str")
# by setting year as key, it will be converted into hierarchy
races_table = session.read_pandas(
races_df,
keys=["raceId", "year"],
table_name="F1 races",
)
races_table.head()
constructors_df = pd.read_csv(
"https://data.atoti.io/notebooks/formula-one/constructors.csv", encoding="latin-1"
)
constructors_df.rename(columns={"url": "constructor_url"}, inplace=True)
constructors_df.rename(columns={"name": "constructor_name"}, inplace=True)
constructors_df.rename(columns={"nationality": "constructor_nationality"}, inplace=True)
constructors_table = session.read_pandas(
constructors_df,
keys=["constructorId"],
table_name="F1 constructors",
)
constructors_table.head()
results_df = pd.read_csv("https://data.atoti.io/notebooks/formula-one/results.csv")
results_df.loc[results_df["fastestLapSpeed"] == "\\N", "fastestLapSpeed"] = None
results_df.loc[results_df["position"] == "\\N", "position"] = None
# cast position to numeric
results_df["fastestLapSpeed"] = pd.to_numeric(results_df["fastestLapSpeed"])
results_df["position"] = pd.to_numeric(results_df["position"])
resultsTypes = {
"points": tt.type.DOUBLE,
"position": tt.type.INT,
}
results_table = session.read_pandas(
results_df,
keys=["resultId", "raceId", "driverId", "constructorId"],
table_name="F1 results",
types=resultsTypes,
)
results_table.head()
results_table.join(drivers_table, mapping={"driverId": "driverId"})
results_table.join(races_table, mapping={"raceId": "raceId"})
results_table.join(constructors_table, mapping={"constructorId": "constructorId"})
```
## Cube
```
f1cube = session.create_cube(results_table, "F1Cube")
f1cube.schema
h, l, m = f1cube.hierarchies, f1cube.levels, f1cube.measures
```
## Analysis
### Driver with the most races
Barichello has cumulated more than 326 races over his career!
```
session.visualize("Total number of races per driver")
```
### Fastest drivers
```
m["fastestLapSpeed.MIN"] = tt.agg.min(results_table["fastestLapSpeed"])
m["Fastest driver"] = tt.where(
m["fastestLapSpeed.MIN"] != None,
tt.agg.min_member(m["fastestLapSpeed.MIN"], l["driverRef"]),
)
m["Fastest year"] = tt.where(
m["fastestLapSpeed.MIN"] != None,
tt.agg.min_member(m["fastestLapSpeed.MIN"], l["year"]),
)
m["fastestLapSpeed.MIN"].formatter = "DOUBLE[#.000]"
m["Fastest year"].formatter = "DOUBLE[#]"
session.visualize()
```
### Top driver and constructors
Let's create some measures to return the driver and constructor with the most points.
```
m["Score"] = m["points.SUM"]
m["Top driver"] = tt.agg.max_member(m["Score"], l["driverRef"])
m["Top constructor"] = tt.agg.max_member(m["Score"], l["constructor_name"])
session.visualize("Top driver and constructor by year")
```
Lewis Hamilton and it's team Mercedes have been dominating the championship for almost the last 6 years.
Let's compare driver's scores:
```
session.visualize("Score per drivers")
```
Hamilton is the driver with the most points ever. However, it's unfair to compare points accross seasons since the scoring systems are not constant and most recent seasons award more points to drivers. This explains why pilots like Hamilton and Vettel have much more points than Schumacher while having ran and scored less races.
### Different scoring systems
Scoring rules for Formula One have [changed over time](https://en.wikipedia.org/wiki/List_of_Formula_One_World_Championship_points_scoring_systems). To make a fair comparison, let's normalize the points across all years by applying a constant scoring across all seasons.
First, we'll determine the maximum position across all seasons:
```
m["position.MAX"] = tt.agg.max(results_table["position"])
maxPositionDf = f1cube.query(m["position.MAX"])
maxPositionDf
```
This means that there has never been more than 33 drivers in one race. We'll use that information to create a data frame representing the different points awarded by positions across all scoring system:
```
# index = scoring systen
scoring_systems = [
"Scoring 1950 to 1953",
"Scoring 1954 to 1957 & 1959",
"Scoring 1958",
"Scoring 1960",
"Scoring 1961 to 1962 & 1966",
"Scoring 1963 to 1965",
"Scoring 1967 & 1969 & 1971",
"Scoring 1968 & 1972",
"Scoring 1970",
"Scoring 1973 to 1974",
"Scoring 1975",
"Scoring 1976 & 1978",
"Scoring 1977",
"Scoring 1979",
"Scoring 1980",
"Scoring 1981 to 1990",
"Scoring 1991 to 2002",
"Scoring 2003 to 2009",
"Scoring 2010 to 2020",
]
scoring_positions = np.arange(1, maxPositionDf["position.MAX"][0] + 1)
# a column for each position
# values = number of points scored for this year's race position
dfscoring = pd.DataFrame(
0,
index=scoring_systems,
columns=scoring_positions,
)
dfscoring.loc["Scoring 1950 to 1953"][1, 2, 3, 4, 5] = [8, 6, 4, 3, 2]
dfscoring.loc["Scoring 1954 to 1957 & 1959"] = dfscoring.loc["Scoring 1950 to 1953"]
dfscoring.loc["Scoring 1958"] = dfscoring.loc["Scoring 1950 to 1953"]
dfscoring.loc["Scoring 1960"][1, 2, 3, 4, 5, 6] = [8, 6, 4, 3, 2, 1]
dfscoring.loc["Scoring 1961 to 1962 & 1966"][1, 2, 3, 4, 5, 6] = [9, 6, 4, 3, 2, 1]
dfscoring.loc["Scoring 1967 & 1969 & 1971"] = dfscoring.loc[
"Scoring 1961 to 1962 & 1966"
]
dfscoring.loc["Scoring 1968 & 1972"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1970"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1973 to 1974"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1975"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1976 & 1978"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1977"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1979"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1980"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1981 to 1990"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1991 to 2002"][1, 2, 3, 4, 5, 6] = [10, 6, 4, 3, 2, 1]
dfscoring.loc["Scoring 2003 to 2009"][1, 2, 3, 4, 5, 6, 7, 8] = [
10,
8,
6,
5,
4,
3,
2,
1,
]
# FIXME 2014 + fastlap 2019+
dfscoring.loc["Scoring 2010 to 2020"][1, 2, 3, 4, 5, 6, 7, 8, 9, 10] = [
25,
18,
15,
12,
10,
8,
6,
4,
2,
1,
]
dfscoring
dfbestresult = pd.DataFrame(0, index=scoring_systems, columns=["Best result"])
dfbestresult.loc["Scoring 1950 to 1953"] = 4
dfbestresult.loc["Scoring 1954 to 1957 & 1959"] = 5
dfbestresult.loc["Scoring 1958"] = 6
dfbestresult.loc["Scoring 1960"] = 6
dfbestresult.loc["Scoring 1961 to 1962 & 1966"] = 5
dfbestresult.loc["Scoring 1963 to 1965"] = 6
# FIXME
dfbestresult.loc["Scoring 1967 & 1969 & 1971"] = 9
dfbestresult.loc["Scoring 1968 & 1972"] = 10
dfbestresult.loc["Scoring 1970"] = 11
dfbestresult.loc["Scoring 1973 to 1974"] = 13
dfbestresult.loc["Scoring 1975"] = 12
dfbestresult.loc["Scoring 1976 & 1978"] = 14
dfbestresult.loc["Scoring 1977"] = 15
dfbestresult.loc["Scoring 1979"] = 8
dfbestresult.loc["Scoring 1980"] = 10
dfbestresult.loc["Scoring 1981 to 1990"] = 11
dfbestresult.loc["Scoring 1991 to 2002"] = 1000
dfbestresult.loc["Scoring 2003 to 2009"] = 1000
dfbestresult.loc["Scoring 2010 to 2020"] = 1000
dfbestresult
```
### Score implementation
Using `atoti.create_parameter_simulation`, we create 2 new measures - `m["Best results"]` which defaults to 1000 and `m["points substitute"]` which defaults to None.
These 2 measures will allow us to perform simulations on the scoring systems to see how it impacts the championship standings.
```
pointssystem_sim = f1cube.create_parameter_simulation(
"pointssystem_sim",
measures={"Best results": 1000, "points substitute": None},
levels=[l["positionText"], l["resultId"]],
)
m["Best results.MEAN"] = tt.agg.mean(
m["Best results"],
scope=tt.scope.origin(l["race_name"], l["year"], l["positionText"], l["resultId"]),
)
```
As `m["Best results"]` is constant for each positionText of the race year, the above measure will provide us the `Best result` at the total level as we can see in the following visualization.
```
session.visualize()
```
We update the definition of the `m["points.SUM"]` to make use of the `m["points substitute"]` parameter created earlier on.
We will assign the simulated value provided for `m["points substitue"]` over the original value from the **results_table**.
```
m["points.SUM"] = tt.agg.sum(
tt.where(
m["points substitute"] == None,
tt.agg.sum(results_table["points"]),
m["points substitute"],
),
scope=tt.scope.origin(l["positionText"], l["resultId"]),
)
```
We can assign a ranking for the race based on `m["points.SUM"]`.
```
m["Result Rank"] = tt.rank(m["points.SUM"], h["race_name"], ascending=False)
```
Different scoring systems assign points to different number of best results of each race.
With the `m["Result Rank"]`, we can assign the simulated points if they are ranked within the accepted Best results range.
Note: We will assign the `m["points.SUM"]` to the base scenario (original dataset) regardless of the results, as declared in the `where` clause.
```
m["Score"] = tt.agg.sum(
tt.where(
(l["pointssystem_sim"] == "Base") | (m["Result Rank"] <= m["Best results"]),
m["points.SUM"],
0,
),
scope=tt.scope.origin(l["race_name"], l["positionText"], l["resultId"]),
)
```
### Simulations
Using the dataframe created before, we'll setup one simulation per scoring system:
```
# Feed of the different scenarios with points from related scoring systems
for scoring_name in scoring_systems:
simulated_best_results = int(dfbestresult.loc[scoring_name])
for i in scoring_positions:
simulated_points = float(dfscoring.loc[scoring_name][i])
pointssystem_sim += (
scoring_name,
str(i),
None,
simulated_best_results,
simulated_points,
)
# Example of scoring scenario
pointssystem_sim.head(10)
session.visualize("Top driver by year across scenarios")
# Apply conditional formatting via MDX
# Member [Measures].[Top driver (1)] AS [Measures].[Top driver], FORE_COLOR = iif(
# [Measures].[Top driver] <> (
# [Measures].[Top driver],
# [Measure Simulations].[pointssystem_sim].[pointssystem_sim].[Base]
# ),
# "red",
# NULL
# )
session.visualize("Champion comparison")
```
### Showing the differences for the race result of a famous GP, Brazil 2008
Where we see how the 1991-2002 scoring systems makes a big difference between chamionship rivals Massa & Hamilton
See race summary here: https://www.youtube.com/watch?v=XHSeGou-pCI ;)
```
session.visualize("Base vs. Simulation - Brazil 2008")
```
### Showing the differences for the 2008 world championship result
Would Felipe Massa have become world champ in 2008 with the 1991-2002 scoring system?? YES!
```
session.visualize("Base vs. Simulation System 1991 to 2002 - 2008 Championship")
```
Massa would have superceded Hamilton in the Champion standings using the 1991-2002 scoring system. There won't be any ties between Räikkönen and Kubica either.
<div style="text-align: center;" ><a href="https://www.atoti.io/?utm_source=gallery&utm_content=formula-one" target="_blank" rel="noopener noreferrer"><img src="https://data.atoti.io/notebooks/banners/discover-try.png" alt="Try atoti"></a></div>
|
github_jupyter
|
import atoti as tt
import numpy as np
import pandas as pd
session = tt.create_session()
drivers_df = pd.read_csv(
"https://data.atoti.io/notebooks/formula-one/drivers.csv", encoding="latin-1"
)
drivers_df.rename(columns={"url": "driver_url"}, inplace=True)
drivers_df.rename(columns={"nationality": "driver_nationality"}, inplace=True)
drivers_table = session.read_pandas(
drivers_df, keys=["driverId"], table_name="F1 drivers"
)
drivers_table.head()
races_df = pd.read_csv(
"https://data.atoti.io/notebooks/formula-one/races.csv", encoding="latin-1"
)
races_df.rename(columns={"url": "race_url"}, inplace=True)
races_df.rename(columns={"name": "race_name"}, inplace=True)
races_df["year"] = races_df["year"].astype("str")
# by setting year as key, it will be converted into hierarchy
races_table = session.read_pandas(
races_df,
keys=["raceId", "year"],
table_name="F1 races",
)
races_table.head()
constructors_df = pd.read_csv(
"https://data.atoti.io/notebooks/formula-one/constructors.csv", encoding="latin-1"
)
constructors_df.rename(columns={"url": "constructor_url"}, inplace=True)
constructors_df.rename(columns={"name": "constructor_name"}, inplace=True)
constructors_df.rename(columns={"nationality": "constructor_nationality"}, inplace=True)
constructors_table = session.read_pandas(
constructors_df,
keys=["constructorId"],
table_name="F1 constructors",
)
constructors_table.head()
results_df = pd.read_csv("https://data.atoti.io/notebooks/formula-one/results.csv")
results_df.loc[results_df["fastestLapSpeed"] == "\\N", "fastestLapSpeed"] = None
results_df.loc[results_df["position"] == "\\N", "position"] = None
# cast position to numeric
results_df["fastestLapSpeed"] = pd.to_numeric(results_df["fastestLapSpeed"])
results_df["position"] = pd.to_numeric(results_df["position"])
resultsTypes = {
"points": tt.type.DOUBLE,
"position": tt.type.INT,
}
results_table = session.read_pandas(
results_df,
keys=["resultId", "raceId", "driverId", "constructorId"],
table_name="F1 results",
types=resultsTypes,
)
results_table.head()
results_table.join(drivers_table, mapping={"driverId": "driverId"})
results_table.join(races_table, mapping={"raceId": "raceId"})
results_table.join(constructors_table, mapping={"constructorId": "constructorId"})
f1cube = session.create_cube(results_table, "F1Cube")
f1cube.schema
h, l, m = f1cube.hierarchies, f1cube.levels, f1cube.measures
session.visualize("Total number of races per driver")
m["fastestLapSpeed.MIN"] = tt.agg.min(results_table["fastestLapSpeed"])
m["Fastest driver"] = tt.where(
m["fastestLapSpeed.MIN"] != None,
tt.agg.min_member(m["fastestLapSpeed.MIN"], l["driverRef"]),
)
m["Fastest year"] = tt.where(
m["fastestLapSpeed.MIN"] != None,
tt.agg.min_member(m["fastestLapSpeed.MIN"], l["year"]),
)
m["fastestLapSpeed.MIN"].formatter = "DOUBLE[#.000]"
m["Fastest year"].formatter = "DOUBLE[#]"
session.visualize()
m["Score"] = m["points.SUM"]
m["Top driver"] = tt.agg.max_member(m["Score"], l["driverRef"])
m["Top constructor"] = tt.agg.max_member(m["Score"], l["constructor_name"])
session.visualize("Top driver and constructor by year")
session.visualize("Score per drivers")
m["position.MAX"] = tt.agg.max(results_table["position"])
maxPositionDf = f1cube.query(m["position.MAX"])
maxPositionDf
# index = scoring systen
scoring_systems = [
"Scoring 1950 to 1953",
"Scoring 1954 to 1957 & 1959",
"Scoring 1958",
"Scoring 1960",
"Scoring 1961 to 1962 & 1966",
"Scoring 1963 to 1965",
"Scoring 1967 & 1969 & 1971",
"Scoring 1968 & 1972",
"Scoring 1970",
"Scoring 1973 to 1974",
"Scoring 1975",
"Scoring 1976 & 1978",
"Scoring 1977",
"Scoring 1979",
"Scoring 1980",
"Scoring 1981 to 1990",
"Scoring 1991 to 2002",
"Scoring 2003 to 2009",
"Scoring 2010 to 2020",
]
scoring_positions = np.arange(1, maxPositionDf["position.MAX"][0] + 1)
# a column for each position
# values = number of points scored for this year's race position
dfscoring = pd.DataFrame(
0,
index=scoring_systems,
columns=scoring_positions,
)
dfscoring.loc["Scoring 1950 to 1953"][1, 2, 3, 4, 5] = [8, 6, 4, 3, 2]
dfscoring.loc["Scoring 1954 to 1957 & 1959"] = dfscoring.loc["Scoring 1950 to 1953"]
dfscoring.loc["Scoring 1958"] = dfscoring.loc["Scoring 1950 to 1953"]
dfscoring.loc["Scoring 1960"][1, 2, 3, 4, 5, 6] = [8, 6, 4, 3, 2, 1]
dfscoring.loc["Scoring 1961 to 1962 & 1966"][1, 2, 3, 4, 5, 6] = [9, 6, 4, 3, 2, 1]
dfscoring.loc["Scoring 1967 & 1969 & 1971"] = dfscoring.loc[
"Scoring 1961 to 1962 & 1966"
]
dfscoring.loc["Scoring 1968 & 1972"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1970"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1973 to 1974"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1975"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1976 & 1978"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1977"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1979"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1980"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1981 to 1990"] = dfscoring.loc["Scoring 1961 to 1962 & 1966"]
dfscoring.loc["Scoring 1991 to 2002"][1, 2, 3, 4, 5, 6] = [10, 6, 4, 3, 2, 1]
dfscoring.loc["Scoring 2003 to 2009"][1, 2, 3, 4, 5, 6, 7, 8] = [
10,
8,
6,
5,
4,
3,
2,
1,
]
# FIXME 2014 + fastlap 2019+
dfscoring.loc["Scoring 2010 to 2020"][1, 2, 3, 4, 5, 6, 7, 8, 9, 10] = [
25,
18,
15,
12,
10,
8,
6,
4,
2,
1,
]
dfscoring
dfbestresult = pd.DataFrame(0, index=scoring_systems, columns=["Best result"])
dfbestresult.loc["Scoring 1950 to 1953"] = 4
dfbestresult.loc["Scoring 1954 to 1957 & 1959"] = 5
dfbestresult.loc["Scoring 1958"] = 6
dfbestresult.loc["Scoring 1960"] = 6
dfbestresult.loc["Scoring 1961 to 1962 & 1966"] = 5
dfbestresult.loc["Scoring 1963 to 1965"] = 6
# FIXME
dfbestresult.loc["Scoring 1967 & 1969 & 1971"] = 9
dfbestresult.loc["Scoring 1968 & 1972"] = 10
dfbestresult.loc["Scoring 1970"] = 11
dfbestresult.loc["Scoring 1973 to 1974"] = 13
dfbestresult.loc["Scoring 1975"] = 12
dfbestresult.loc["Scoring 1976 & 1978"] = 14
dfbestresult.loc["Scoring 1977"] = 15
dfbestresult.loc["Scoring 1979"] = 8
dfbestresult.loc["Scoring 1980"] = 10
dfbestresult.loc["Scoring 1981 to 1990"] = 11
dfbestresult.loc["Scoring 1991 to 2002"] = 1000
dfbestresult.loc["Scoring 2003 to 2009"] = 1000
dfbestresult.loc["Scoring 2010 to 2020"] = 1000
dfbestresult
pointssystem_sim = f1cube.create_parameter_simulation(
"pointssystem_sim",
measures={"Best results": 1000, "points substitute": None},
levels=[l["positionText"], l["resultId"]],
)
m["Best results.MEAN"] = tt.agg.mean(
m["Best results"],
scope=tt.scope.origin(l["race_name"], l["year"], l["positionText"], l["resultId"]),
)
session.visualize()
m["points.SUM"] = tt.agg.sum(
tt.where(
m["points substitute"] == None,
tt.agg.sum(results_table["points"]),
m["points substitute"],
),
scope=tt.scope.origin(l["positionText"], l["resultId"]),
)
m["Result Rank"] = tt.rank(m["points.SUM"], h["race_name"], ascending=False)
m["Score"] = tt.agg.sum(
tt.where(
(l["pointssystem_sim"] == "Base") | (m["Result Rank"] <= m["Best results"]),
m["points.SUM"],
0,
),
scope=tt.scope.origin(l["race_name"], l["positionText"], l["resultId"]),
)
# Feed of the different scenarios with points from related scoring systems
for scoring_name in scoring_systems:
simulated_best_results = int(dfbestresult.loc[scoring_name])
for i in scoring_positions:
simulated_points = float(dfscoring.loc[scoring_name][i])
pointssystem_sim += (
scoring_name,
str(i),
None,
simulated_best_results,
simulated_points,
)
# Example of scoring scenario
pointssystem_sim.head(10)
session.visualize("Top driver by year across scenarios")
# Apply conditional formatting via MDX
# Member [Measures].[Top driver (1)] AS [Measures].[Top driver], FORE_COLOR = iif(
# [Measures].[Top driver] <> (
# [Measures].[Top driver],
# [Measure Simulations].[pointssystem_sim].[pointssystem_sim].[Base]
# ),
# "red",
# NULL
# )
session.visualize("Champion comparison")
session.visualize("Base vs. Simulation - Brazil 2008")
session.visualize("Base vs. Simulation System 1991 to 2002 - 2008 Championship")
| 0.374676 | 0.893681 |
<h1><center>CS 455/595a: Artificial Neural Network Demonstrations - 2020</center></h1>
<center>Richard S. Stansbury</center>
This notebook applies the ANN techniques for the Titanic Survivors and Boston Housing Prediction models covered in [1] with the [Titanic](https://www.kaggle.com/c/titanic/) and [Boston Housing](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html) data sets.
This demonstration focuses upon showcasing the Keras API to implement an ANN classifier and an ANN regressor for each.
Reference:
[1] Aurelen Geron. *Hands on Machine Learning with Scikit-Learn & TensorFlow* O'Reilley Media Inc, 2017.
[2] Aurelen Geron. "ageron/handson-ml: A series of Jupyter notebooks that walk you through the fundamentals of Machine Learning and Deep Learning in python using Scikit-Learn and TensorFlow." Github.com, online at: https://github.com/ageron/handson-ml [last accessed 2019-03-01]
[2] Aurelen Geron. *Hands on Machine Learning with Scikit-Learn, Keras, & TensorFlow* 2nd Edition, O'Reilley Media Inc, 2019.
[3] Aurelen Geron. "ageron/handson-ml: A series of Jupyter notebooks that walk you through the fundamentals of Machine Learning and Deep Learning in python using Scikit-Learn and TensorFlow." Github.com, online at: https://github.com/ageron/handson-ml2 [last accessed 2020-04-01]
**Table of Contents**
1. [Titanic Survivor ANN Classifiers](#Titanic-Survivor-Classifier)
2. [Boston Housing Cost Ensemble ANN Regressor](#Boston-Housing-Cost-Estimator)
## Library and Data Setup
```
from matplotlib import pyplot as plt
%matplotlib inline
import numpy as np
import pandas as pd
import os
# TensorFlow ≥2.0 is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
assert keras.__version__ >= "2.0"
#From Ageron demo if running under Collaboratory this will remind you to turn on your GPU.
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
IS_COLAB = True
except Exception:
IS_COLAB = False
if not tf.config.list_physical_devices('GPU'):
print("No GPU was detected. CNNs can be very slow without a GPU.")
if IS_COLAB:
print("Go to Runtime > Change runtime and select a GPU hardware accelerator.")
```
Import the data and apply pipelines to pre-process the data.
```
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.model_selection import train_test_split
# Read data from input files into Pandas data frames
data_path = os.path.join("datasets","titanic")
train_filename = "train.csv"
test_filename = "test.csv"
def read_csv(data_path, filename):
joined_path = os.path.join(data_path, filename)
return pd.read_csv(joined_path)
# Read CSV file into Pandas Dataframes
train_df = read_csv(data_path, train_filename)
# Defining Data Pre-Processing Pipelines
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attributes):
self.attributes = attributes
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attributes]
class MostFrequentImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
self.most_frequent = pd.Series([X[c].value_counts().index[0] for c in X],
index = X.columns)
return self
def transform(self, X):
return X.fillna(self.most_frequent)
numeric_pipe = Pipeline([
("Select", DataFrameSelector(["Age", "Fare", "SibSp", "Parch"])), # Selects Fields from dataframe
("Imputer", SimpleImputer(strategy="median")), # Fills in NaN w/ median value for its column
("Scaler", StandardScaler()),
])
categories_pipe = Pipeline([
("Select", DataFrameSelector(["Pclass", "Sex"])), # Selects Fields from dataframe
("MostFreqImp", MostFrequentImputer()), # Fill in NaN with most frequent
("OneHot", OneHotEncoder(sparse=False, categories='auto')), # Onehot encode
])
preprocessing_pipe = FeatureUnion(transformer_list = [
("numeric pipeline", numeric_pipe),
("categories pipeline", categories_pipe)
])
# Process Input Data Using Pipleines
X_data = preprocessing_pipe.fit_transform(train_df)
y_data = train_df["Survived"].values.reshape(-1,1)
# Process the output data.
feature_names = ["Age", "Fare", "SibSp", "Parch", "Class0", "class1","Sex0", "Sex1"]
print(X_data.shape)
print(y_data.shape)
```
Split the data into a training and validation set.
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size = 0.20)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.33)
print(X_train.shape)
print(X_test.shape)
print(X_val.shape)
print(y_train.shape)
print(y_test.shape)
print(y_val.shape)
```
## Model Set Up
```
model = keras.models.Sequential([
keras.layers.Input(shape=X_train.shape[1:]),
keras.layers.Dense(50, activation='relu'),
keras.layers.Dense(50, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
model.layers
model.summary()
from sklearn.tree import export_graphviz
from IPython.display import Image as PImage
import pydot
keras.utils.plot_model(model, "Titanic survivor classifier.png", show_shapes=True)
```
## Model Training
```
keras.backend.clear_session()
lr = 0.1
model.compile(loss='binary_crossentropy',
optimizer=keras.optimizers.SGD(learning_rate=lr),
metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=20, validation_data=(X_val,y_val))
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 1)
plt.show()
```
## Model Validation
```
from sklearn.metrics import confusion_matrix, precision_score, recall_score, accuracy_score, f1_score
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.metrics import roc_curve, precision_recall_curve
def plot_precision_recall_curve(y, y_score):
"""
Prints a precision vs. recall curve.
"""
precisions, recalls, thresholds = precision_recall_curve(y, y_score)
plt.figure(figsize=(8, 6))
plt.title("Precision-Recall Curve")
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.xlabel("Recall", fontsize=16)
plt.ylabel("Precision", fontsize=16)
plt.axis([0, 1, 0, 1])
plt.show()
def plot_roc(y, y_score):
"""
Prints a Receiver Operating Characteristic (ROC) Curve
"""
fpr, tpr, thresholds = roc_curve(y, y_score)
plt.figure(figsize=(8, 6))
plt.title("ROC Curve")
plt.plot(fpr, tpr, linewidth=2)
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([0,1,0,1])
plt.xlabel("False Positive Rate (FPR)")
plt.ylabel("True Positive Rate (TPR)")
plt.show()
def evaluate_classifier(y, y_pred):
"""
Prints the confusion matrix, precision score, recall score, and f1 score
"""
print("Confusion Matrix:")
print(confusion_matrix(y, y_pred))
print("Pecision Score = " + str(precision_score(y, y_pred)))
print("Recall Score = " + str(recall_score(y,y_pred)))
print("F1 Score = " + str(f1_score(y,y_pred)))
y_pred_prob = model.predict(X_test)
plot_precision_recall_curve(y_test, y_pred_prob)
plot_roc(y_test, y_pred_prob)
y_pred = [1 if y > 0.5 else 0 for y in y_pred_prob]
evaluate_classifier(y_test, y_pred)
```
# Boston Housing Cost Estimator
Building off the classifier examples above, this section demonstrates an implementation of a simple ANN regressor for boston housing.
## Library and Data Setup
```
# Load Data Set
from sklearn import datasets
from sklearn.preprocessing import MinMaxScaler
boston_housing_data = datasets.load_boston()
scaler = MinMaxScaler()
bouston_housing_data_instances = scaler.fit_transform(boston_housing_data.data)
bouston_housing_data_instances.shape
X_train, X_test, y_train, y_test = train_test_split(bouston_housing_data_instances,
boston_housing_data.target,
test_size=0.20)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.33)
print(X_train.shape)
print(X_test.shape)
print(X_val.shape)
print(y_train.shape)
print(y_test.shape)
print(y_val.shape)
```
## Model Configuration and Training
```
keras.backend.clear_session()
model = keras.models.Sequential([
keras.layers.Input(shape=X_train.shape[1:]),
keras.layers.Dense(97, activation='relu'),
keras.layers.Dense(97, activation='relu'),
keras.layers.Dense(97, activation='relu'),
keras.layers.Dense(1)
])
lr = 0.00059
model.compile(loss='mean_squared_error',
optimizer=keras.optimizers.SGD(learning_rate=lr),
metrics=['mae'])
history = model.fit(X_train, y_train,
epochs = 100,
validation_data = (X_val, y_val),
callbacks=[keras.callbacks.EarlyStopping(patience=10)])
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 100)
plt.show()
```
## Model Validation
## Demonstration of Hyperparameter Tuning using RandomSearch from SKLearn
```
from scipy.stats import reciprocal
from sklearn.model_selection import RandomizedSearchCV
keras.backend.clear_session()
def build_model(n_hidden=1, n_neurons=30, learning_rate=3e-3, input_shape=13):
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=input_shape))
for layer in range(n_hidden):
model.add(keras.layers.Dense(n_neurons, activation="relu"))
model.add(keras.layers.Dense(1))
optimizer = keras.optimizers.SGD(lr=learning_rate)
model.compile(loss="mse", optimizer=optimizer)
return model
param_dist = {
'n_hidden': [0,1,2,3],
'n_neurons': np.arange(1,100),
'learning_rate': reciprocal(3e-4,3e-2),
}
lr = 1e-2
model = keras.wrappers.scikit_learn.KerasRegressor(build_model)
rnd_search = RandomizedSearchCV(model,
param_dist,
n_iter=10,
cv=3, verbose=2,
error_score='raise-deprecating')
print(X_train)
rnd_search.fit(X_train,
y_train,
epochs=100,
validation_data=(X_val, y_val),
callbacks=[keras.callbacks.EarlyStopping(patience=10)])
rnd_search.best_params_
```
# MNIST CNN Demonstration
Our final demonstration will use the MNIST image set to demonstrate a CNN.
## Library and Data Setup
```
#(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
print(X_train_full.shape)
X_train, X_valid = X_train_full[:-5000], X_train_full[-5000:]
y_train, y_valid = y_train_full[:-5000], y_train_full[-5000:]
X_mean = X_train.mean(axis=0, keepdims=True)
X_std = X_train.std(axis=0, keepdims=True) + 1e-7
X_train = (X_train - X_mean) / X_std
X_valid = (X_valid - X_mean) / X_std
X_test = (X_test - X_mean) / X_std
X_train = X_train[..., np.newaxis]
X_valid = X_valid[..., np.newaxis]
X_test = X_test[..., np.newaxis]
```
## Model Set Up
```
from functools import partial
DefaultConv2D = partial(keras.layers.Conv2D,
kernel_size=3, activation='relu', padding="SAME")
model = keras.models.Sequential([
DefaultConv2D(filters=64, kernel_size=7, input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(pool_size=2),
DefaultConv2D(filters=128),
DefaultConv2D(filters=128),
keras.layers.MaxPooling2D(pool_size=2),
DefaultConv2D(filters=256),
DefaultConv2D(filters=256),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(units=128, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=64, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=10, activation='softmax'),
])
```
## Model Training
```
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
score = model.evaluate(X_test, y_test)
X_new = X_test[:10] # pretend we have new images
y_pred = model.predict(X_new)
```
## Model Validation
|
github_jupyter
|
from matplotlib import pyplot as plt
%matplotlib inline
import numpy as np
import pandas as pd
import os
# TensorFlow ≥2.0 is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
assert keras.__version__ >= "2.0"
#From Ageron demo if running under Collaboratory this will remind you to turn on your GPU.
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
IS_COLAB = True
except Exception:
IS_COLAB = False
if not tf.config.list_physical_devices('GPU'):
print("No GPU was detected. CNNs can be very slow without a GPU.")
if IS_COLAB:
print("Go to Runtime > Change runtime and select a GPU hardware accelerator.")
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.model_selection import train_test_split
# Read data from input files into Pandas data frames
data_path = os.path.join("datasets","titanic")
train_filename = "train.csv"
test_filename = "test.csv"
def read_csv(data_path, filename):
joined_path = os.path.join(data_path, filename)
return pd.read_csv(joined_path)
# Read CSV file into Pandas Dataframes
train_df = read_csv(data_path, train_filename)
# Defining Data Pre-Processing Pipelines
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attributes):
self.attributes = attributes
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attributes]
class MostFrequentImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
self.most_frequent = pd.Series([X[c].value_counts().index[0] for c in X],
index = X.columns)
return self
def transform(self, X):
return X.fillna(self.most_frequent)
numeric_pipe = Pipeline([
("Select", DataFrameSelector(["Age", "Fare", "SibSp", "Parch"])), # Selects Fields from dataframe
("Imputer", SimpleImputer(strategy="median")), # Fills in NaN w/ median value for its column
("Scaler", StandardScaler()),
])
categories_pipe = Pipeline([
("Select", DataFrameSelector(["Pclass", "Sex"])), # Selects Fields from dataframe
("MostFreqImp", MostFrequentImputer()), # Fill in NaN with most frequent
("OneHot", OneHotEncoder(sparse=False, categories='auto')), # Onehot encode
])
preprocessing_pipe = FeatureUnion(transformer_list = [
("numeric pipeline", numeric_pipe),
("categories pipeline", categories_pipe)
])
# Process Input Data Using Pipleines
X_data = preprocessing_pipe.fit_transform(train_df)
y_data = train_df["Survived"].values.reshape(-1,1)
# Process the output data.
feature_names = ["Age", "Fare", "SibSp", "Parch", "Class0", "class1","Sex0", "Sex1"]
print(X_data.shape)
print(y_data.shape)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size = 0.20)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.33)
print(X_train.shape)
print(X_test.shape)
print(X_val.shape)
print(y_train.shape)
print(y_test.shape)
print(y_val.shape)
model = keras.models.Sequential([
keras.layers.Input(shape=X_train.shape[1:]),
keras.layers.Dense(50, activation='relu'),
keras.layers.Dense(50, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
model.layers
model.summary()
from sklearn.tree import export_graphviz
from IPython.display import Image as PImage
import pydot
keras.utils.plot_model(model, "Titanic survivor classifier.png", show_shapes=True)
keras.backend.clear_session()
lr = 0.1
model.compile(loss='binary_crossentropy',
optimizer=keras.optimizers.SGD(learning_rate=lr),
metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=20, validation_data=(X_val,y_val))
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 1)
plt.show()
from sklearn.metrics import confusion_matrix, precision_score, recall_score, accuracy_score, f1_score
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.metrics import roc_curve, precision_recall_curve
def plot_precision_recall_curve(y, y_score):
"""
Prints a precision vs. recall curve.
"""
precisions, recalls, thresholds = precision_recall_curve(y, y_score)
plt.figure(figsize=(8, 6))
plt.title("Precision-Recall Curve")
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.xlabel("Recall", fontsize=16)
plt.ylabel("Precision", fontsize=16)
plt.axis([0, 1, 0, 1])
plt.show()
def plot_roc(y, y_score):
"""
Prints a Receiver Operating Characteristic (ROC) Curve
"""
fpr, tpr, thresholds = roc_curve(y, y_score)
plt.figure(figsize=(8, 6))
plt.title("ROC Curve")
plt.plot(fpr, tpr, linewidth=2)
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([0,1,0,1])
plt.xlabel("False Positive Rate (FPR)")
plt.ylabel("True Positive Rate (TPR)")
plt.show()
def evaluate_classifier(y, y_pred):
"""
Prints the confusion matrix, precision score, recall score, and f1 score
"""
print("Confusion Matrix:")
print(confusion_matrix(y, y_pred))
print("Pecision Score = " + str(precision_score(y, y_pred)))
print("Recall Score = " + str(recall_score(y,y_pred)))
print("F1 Score = " + str(f1_score(y,y_pred)))
y_pred_prob = model.predict(X_test)
plot_precision_recall_curve(y_test, y_pred_prob)
plot_roc(y_test, y_pred_prob)
y_pred = [1 if y > 0.5 else 0 for y in y_pred_prob]
evaluate_classifier(y_test, y_pred)
# Load Data Set
from sklearn import datasets
from sklearn.preprocessing import MinMaxScaler
boston_housing_data = datasets.load_boston()
scaler = MinMaxScaler()
bouston_housing_data_instances = scaler.fit_transform(boston_housing_data.data)
bouston_housing_data_instances.shape
X_train, X_test, y_train, y_test = train_test_split(bouston_housing_data_instances,
boston_housing_data.target,
test_size=0.20)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.33)
print(X_train.shape)
print(X_test.shape)
print(X_val.shape)
print(y_train.shape)
print(y_test.shape)
print(y_val.shape)
keras.backend.clear_session()
model = keras.models.Sequential([
keras.layers.Input(shape=X_train.shape[1:]),
keras.layers.Dense(97, activation='relu'),
keras.layers.Dense(97, activation='relu'),
keras.layers.Dense(97, activation='relu'),
keras.layers.Dense(1)
])
lr = 0.00059
model.compile(loss='mean_squared_error',
optimizer=keras.optimizers.SGD(learning_rate=lr),
metrics=['mae'])
history = model.fit(X_train, y_train,
epochs = 100,
validation_data = (X_val, y_val),
callbacks=[keras.callbacks.EarlyStopping(patience=10)])
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 100)
plt.show()
from scipy.stats import reciprocal
from sklearn.model_selection import RandomizedSearchCV
keras.backend.clear_session()
def build_model(n_hidden=1, n_neurons=30, learning_rate=3e-3, input_shape=13):
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=input_shape))
for layer in range(n_hidden):
model.add(keras.layers.Dense(n_neurons, activation="relu"))
model.add(keras.layers.Dense(1))
optimizer = keras.optimizers.SGD(lr=learning_rate)
model.compile(loss="mse", optimizer=optimizer)
return model
param_dist = {
'n_hidden': [0,1,2,3],
'n_neurons': np.arange(1,100),
'learning_rate': reciprocal(3e-4,3e-2),
}
lr = 1e-2
model = keras.wrappers.scikit_learn.KerasRegressor(build_model)
rnd_search = RandomizedSearchCV(model,
param_dist,
n_iter=10,
cv=3, verbose=2,
error_score='raise-deprecating')
print(X_train)
rnd_search.fit(X_train,
y_train,
epochs=100,
validation_data=(X_val, y_val),
callbacks=[keras.callbacks.EarlyStopping(patience=10)])
rnd_search.best_params_
#(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
print(X_train_full.shape)
X_train, X_valid = X_train_full[:-5000], X_train_full[-5000:]
y_train, y_valid = y_train_full[:-5000], y_train_full[-5000:]
X_mean = X_train.mean(axis=0, keepdims=True)
X_std = X_train.std(axis=0, keepdims=True) + 1e-7
X_train = (X_train - X_mean) / X_std
X_valid = (X_valid - X_mean) / X_std
X_test = (X_test - X_mean) / X_std
X_train = X_train[..., np.newaxis]
X_valid = X_valid[..., np.newaxis]
X_test = X_test[..., np.newaxis]
from functools import partial
DefaultConv2D = partial(keras.layers.Conv2D,
kernel_size=3, activation='relu', padding="SAME")
model = keras.models.Sequential([
DefaultConv2D(filters=64, kernel_size=7, input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(pool_size=2),
DefaultConv2D(filters=128),
DefaultConv2D(filters=128),
keras.layers.MaxPooling2D(pool_size=2),
DefaultConv2D(filters=256),
DefaultConv2D(filters=256),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(units=128, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=64, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=10, activation='softmax'),
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
score = model.evaluate(X_test, y_test)
X_new = X_test[:10] # pretend we have new images
y_pred = model.predict(X_new)
| 0.781205 | 0.986828 |
```
storageAccount="cookbookaadass2storage"
mountpoint = "/mnt/Gen2Source"
storageEndPoint ="abfss://rawdata@{}.dfs.core.windows.net/".format(storageAccount)
print ('Mount Point ='+mountpoint)
#ClientId, TenantId and Secret is for the Application(ADLSGen2App). You can use any SPN which has access to the storage
clientID =""
tenantID =""
clientSecret =""
oauth2Endpoint = "https://login.microsoftonline.com/{}/oauth2/token".format(tenantID)
configs = {"fs.azure.account.auth.type": "OAuth",
"fs.azure.account.oauth.provider.type": "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider",
"fs.azure.account.oauth2.client.id": clientID,
"fs.azure.account.oauth2.client.secret": clientSecret,
"fs.azure.account.oauth2.client.endpoint": oauth2Endpoint}
# dbutils.fs.mount(
# source = storageEndPoint,
# mount_point = mountpoint,
# extra_configs = configs)
try:
dbutils.fs.mount(
source = storageEndPoint,
mount_point = mountpoint,
extra_configs = configs)
except:
print("Already mounted...."+mountpoint)
display(dbutils.fs.ls("/mnt/Gen2Source/Customer/parquetFiles"))
db = "deltadb"
spark.sql(f"CREATE DATABASE IF NOT EXISTS {db}")
spark.sql(f"USE {db}")
spark.sql("SET spark.databricks.delta.formatCheck.enabled = false")
spark.sql("SET spark.databricks.delta.properties.defaults.autoOptimize.optimizeWrite = true")
import random
from datetime import datetime
from pyspark.sql.functions import *
from pyspark.sql.types import *
from delta.tables import *
from pyspark.sql.functions import from_unixtime
from pyspark.sql.functions import to_date
from pyspark.sql import Row
from pyspark.sql.functions import to_json, struct
from pyspark.sql import functions as F
import random
```
### Reading Customer parquet files from the mount point and writing to Delta tables.
```
#Reading parquet files and adding a new column to the dataframe and writing to delta table
cust_path = "/mnt/Gen2Source/Customer/parquetFiles"
df_cust = (spark.read.format("parquet").load(cust_path)
.withColumn("timestamp", current_timestamp()))
df_cust.write.format("delta").mode("overwrite").save("/mnt/Gen2Source/Customer/delta")
%sql
-- Creating Delta table
DROP TABLE IF EXISTS Customer;
CREATE TABLE Customer
USING delta
location "/mnt/Gen2Source/Customer/delta"
%sql
describe formatted Customer
%sql
select * from Customer limit 10;
# Reading new data which is in the folder parquetFiles_Dialy in the /nnt/Gen2Source/Customer
#This is scenario where new data is coming on daily basis and no updates to existing. If we need to handle updates then you might have to use Merge as shown in the first recipe(Delta Table Operations-Create,Read,Write) of this chapter.
daily_cust_path = "/mnt/Gen2Source/Customer/parquetFiles_Daily"
df_cust_daily = (spark.read.format("parquet").load(daily_cust_path)
.withColumn("timestamp", current_timestamp()))
df_cust_daily.write.format("delta").mode("append").save("/mnt/Gen2Source/Customer/delta")
%sql
-- Deleting from Delta table
DELETE FROM Customer WHERE C_CUSTKEY=82533
%sql
UPDATE Customer SET C_MKTSEGMENT="BUILDING" WHERE C_CUSTKEY=101275
```
### Writing Streaming Data into Delta Table and checking the delta log
```
#Creating the schema for the vehicle data json structure
jsonschema = StructType() \
.add("id", StringType()) \
.add("timestamp", TimestampType()) \
.add("rpm", IntegerType()) \
.add("speed", IntegerType()) \
.add("kms", IntegerType())
%fs mkdirs /mnt/Gen2Source/Vehicle_Delta/Chkpnt
def checkpoint_dir():
return "/mnt/Gen2Source/Vehicle_Delta/Chkpnt/%s" % str(random.randint(0, 10000))
BOOTSTRAP_SERVERS = "kafkaenabledeventhubns.servicebus.windows.net:9093"
EH_SASL = "kafkashaded.org.apache.kafka.common.security.plain.PlainLoginModule required username=\"$ConnectionString\" password=\"Endpoint=sb://kafkaenabledeventhubns.servicebus.windows.net/;SharedAccessKeyName=sendreceivekafka;SharedAccessKey=zzzzzzzz\";"
GROUP_ID = "$Default"
# Function to read data from EventHub and writing as delta format
def append_kafkadata_stream(topic="eventhubsource1"):
kafkaDF = (spark.readStream \
.format("kafka") \
.option("subscribe", topic) \
.option("kafka.bootstrap.servers", BOOTSTRAP_SERVERS) \
.option("kafka.sasl.mechanism", "PLAIN") \
.option("kafka.security.protocol", "SASL_SSL") \
.option("kafka.sasl.jaas.config", EH_SASL) \
.option("kafka.request.timeout.ms", "60000") \
.option("kafka.session.timeout.ms", "60000") \
.option("kafka.group.id", GROUP_ID) \
.option("failOnDataLoss", "false") \
.option("startingOffsets", "latest") \
.load().withColumn("source", lit(topic)))
newkafkaDF=kafkaDF.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)","source").withColumn('vehiclejson', from_json(col('value'),schema=jsonschema))
kafkajsonDF=newkafkaDF.select("key","value","source", "vehiclejson.*")
query=kafkajsonDF.selectExpr(
"id" \
,"timestamp" \
,"rpm" \
,"speed" \
,"kms"
,"source") \
.writeStream.format("delta") \
.outputMode("append") \
.option("checkpointLocation",checkpoint_dir()) \
.start("/mnt/Gen2Source/Vehicle_Delta/")
return query
query_source1 = append_kafkadata_stream(topic='eventhubsource1')
%sql
-- Creating the table on delta location
CREATE DATABASE IF NOT EXISTS deltadb;
CREATE TABLE IF NOT EXISTS deltadb.VehicleDetails_Delta
USING DELTA
LOCATION "/mnt/Gen2Source/Vehicle_Delta/"
```
|
github_jupyter
|
storageAccount="cookbookaadass2storage"
mountpoint = "/mnt/Gen2Source"
storageEndPoint ="abfss://rawdata@{}.dfs.core.windows.net/".format(storageAccount)
print ('Mount Point ='+mountpoint)
#ClientId, TenantId and Secret is for the Application(ADLSGen2App). You can use any SPN which has access to the storage
clientID =""
tenantID =""
clientSecret =""
oauth2Endpoint = "https://login.microsoftonline.com/{}/oauth2/token".format(tenantID)
configs = {"fs.azure.account.auth.type": "OAuth",
"fs.azure.account.oauth.provider.type": "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider",
"fs.azure.account.oauth2.client.id": clientID,
"fs.azure.account.oauth2.client.secret": clientSecret,
"fs.azure.account.oauth2.client.endpoint": oauth2Endpoint}
# dbutils.fs.mount(
# source = storageEndPoint,
# mount_point = mountpoint,
# extra_configs = configs)
try:
dbutils.fs.mount(
source = storageEndPoint,
mount_point = mountpoint,
extra_configs = configs)
except:
print("Already mounted...."+mountpoint)
display(dbutils.fs.ls("/mnt/Gen2Source/Customer/parquetFiles"))
db = "deltadb"
spark.sql(f"CREATE DATABASE IF NOT EXISTS {db}")
spark.sql(f"USE {db}")
spark.sql("SET spark.databricks.delta.formatCheck.enabled = false")
spark.sql("SET spark.databricks.delta.properties.defaults.autoOptimize.optimizeWrite = true")
import random
from datetime import datetime
from pyspark.sql.functions import *
from pyspark.sql.types import *
from delta.tables import *
from pyspark.sql.functions import from_unixtime
from pyspark.sql.functions import to_date
from pyspark.sql import Row
from pyspark.sql.functions import to_json, struct
from pyspark.sql import functions as F
import random
#Reading parquet files and adding a new column to the dataframe and writing to delta table
cust_path = "/mnt/Gen2Source/Customer/parquetFiles"
df_cust = (spark.read.format("parquet").load(cust_path)
.withColumn("timestamp", current_timestamp()))
df_cust.write.format("delta").mode("overwrite").save("/mnt/Gen2Source/Customer/delta")
%sql
-- Creating Delta table
DROP TABLE IF EXISTS Customer;
CREATE TABLE Customer
USING delta
location "/mnt/Gen2Source/Customer/delta"
%sql
describe formatted Customer
%sql
select * from Customer limit 10;
# Reading new data which is in the folder parquetFiles_Dialy in the /nnt/Gen2Source/Customer
#This is scenario where new data is coming on daily basis and no updates to existing. If we need to handle updates then you might have to use Merge as shown in the first recipe(Delta Table Operations-Create,Read,Write) of this chapter.
daily_cust_path = "/mnt/Gen2Source/Customer/parquetFiles_Daily"
df_cust_daily = (spark.read.format("parquet").load(daily_cust_path)
.withColumn("timestamp", current_timestamp()))
df_cust_daily.write.format("delta").mode("append").save("/mnt/Gen2Source/Customer/delta")
%sql
-- Deleting from Delta table
DELETE FROM Customer WHERE C_CUSTKEY=82533
%sql
UPDATE Customer SET C_MKTSEGMENT="BUILDING" WHERE C_CUSTKEY=101275
#Creating the schema for the vehicle data json structure
jsonschema = StructType() \
.add("id", StringType()) \
.add("timestamp", TimestampType()) \
.add("rpm", IntegerType()) \
.add("speed", IntegerType()) \
.add("kms", IntegerType())
%fs mkdirs /mnt/Gen2Source/Vehicle_Delta/Chkpnt
def checkpoint_dir():
return "/mnt/Gen2Source/Vehicle_Delta/Chkpnt/%s" % str(random.randint(0, 10000))
BOOTSTRAP_SERVERS = "kafkaenabledeventhubns.servicebus.windows.net:9093"
EH_SASL = "kafkashaded.org.apache.kafka.common.security.plain.PlainLoginModule required username=\"$ConnectionString\" password=\"Endpoint=sb://kafkaenabledeventhubns.servicebus.windows.net/;SharedAccessKeyName=sendreceivekafka;SharedAccessKey=zzzzzzzz\";"
GROUP_ID = "$Default"
# Function to read data from EventHub and writing as delta format
def append_kafkadata_stream(topic="eventhubsource1"):
kafkaDF = (spark.readStream \
.format("kafka") \
.option("subscribe", topic) \
.option("kafka.bootstrap.servers", BOOTSTRAP_SERVERS) \
.option("kafka.sasl.mechanism", "PLAIN") \
.option("kafka.security.protocol", "SASL_SSL") \
.option("kafka.sasl.jaas.config", EH_SASL) \
.option("kafka.request.timeout.ms", "60000") \
.option("kafka.session.timeout.ms", "60000") \
.option("kafka.group.id", GROUP_ID) \
.option("failOnDataLoss", "false") \
.option("startingOffsets", "latest") \
.load().withColumn("source", lit(topic)))
newkafkaDF=kafkaDF.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)","source").withColumn('vehiclejson', from_json(col('value'),schema=jsonschema))
kafkajsonDF=newkafkaDF.select("key","value","source", "vehiclejson.*")
query=kafkajsonDF.selectExpr(
"id" \
,"timestamp" \
,"rpm" \
,"speed" \
,"kms"
,"source") \
.writeStream.format("delta") \
.outputMode("append") \
.option("checkpointLocation",checkpoint_dir()) \
.start("/mnt/Gen2Source/Vehicle_Delta/")
return query
query_source1 = append_kafkadata_stream(topic='eventhubsource1')
%sql
-- Creating the table on delta location
CREATE DATABASE IF NOT EXISTS deltadb;
CREATE TABLE IF NOT EXISTS deltadb.VehicleDetails_Delta
USING DELTA
LOCATION "/mnt/Gen2Source/Vehicle_Delta/"
| 0.386879 | 0.32154 |
```
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import numpy as np
x_valid = np.load('/content/drive/MyDrive/VSAT_HW1/x_valid.npy')
x_train = np.load('/content/drive/MyDrive/VSAT_HW1/x_train.npy')
y_train=np.load('/content/drive/MyDrive/VSAT_HW1/y_train.npy')
y_valid=np.load('/content/drive/MyDrive/VSAT_HW1/y_valid.npy')
def convert_CHW(data):
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
tmp=[]
for i,img in enumerate(data,0):
image=np.array(transform(img).tolist())
tmp.append(image)
tmp=np.array(tmp)
return tmp
#transform
x_train=convert_CHW(x_train)
x_valid=convert_CHW(x_valid)
import torch
from torch.utils.data import Dataset,DataLoader
class MyDataset(Dataset):
def __init__(self,data,label,transform):
self.data = torch.FloatTensor(data)
self.label = torch.LongTensor(label)
self.transform = transform
def __getitem__(self,index):
if self.transform:
self.data[index] = self.data[index]
return self.data[index],self.label[index]
def __len__(self):
return len(self.data)
# GPU
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
print('GPU state:', device)
#dataset
trainset=MyDataset(data=x_train,label=y_train,transform=None)
testset=MyDataset(data=x_valid,label=y_valid,transform=None)
#dataLoader
trainLoader = DataLoader(dataset=trainset,batch_size=8,shuffle=True,num_workers=2)
testLoader = DataLoader(dataset=testset,batch_size=1,num_workers=2)
# Model structure
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net().to(device)
print(net)
# Parameters
criterion = nn.CrossEntropyLoss()
lr = 0.001
epochs = 8
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
def cal_accuracy(net,Loader):
correct = 0
total = 0
loss=0
running_loss,times=0,0
with torch.no_grad():
for data in Loader:
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
loss = criterion(outputs, labels)
values, predicted = outputs.topk(3, dim=1, largest=True, sorted=True)
total += labels.size(0)
for i,data in enumerate(labels,0):
if data in predicted[i]:
correct+=1
running_loss+=loss.item()
times+=1
return correct/total*100,running_loss/(times)
# Train
train_loss_value=[]
valid_loss_value=[]
now_epoch=0
for epoch in range(epochs):
running_loss = 0.0
for times, data in enumerate(trainLoader, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# Zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
now_valid_accuracy,now_valid_loss=cal_accuracy(net,testLoader)
tmp,now_train_loss=cal_accuracy(net,trainLoader)
print('[%d/%d, %d] train loss: %.3f validation loss: %.3f validation accuracy: %.1f' % (epoch+1, epochs, len(trainLoader), now_train_loss,now_valid_loss,now_valid_accuracy))
train_loss_value.append(running_loss/(times+1))
valid_loss_value.append(now_valid_loss)
print('Finished Training')
# validation accuracy
now_valid_accuracy,now_valid_loss=cal_accuracy(net,testLoader)
print('Accuracy of the network on the 10000 validation inputs: %.2f %%' % (now_valid_accuracy))
#loss curve
import matplotlib.pyplot as plt
plt.plot(np.array(train_loss_value), 'blue', label='train')
plt.plot(np.array(valid_loss_value), 'r', label='validation')
plt.legend()
plt.show
plt.savefig('/content/drive/MyDrive/VSAT_HW1/loss.png')
# Save model
model_name='/content/drive/MyDrive/VSAT_HW1/cnn_model.pth'
torch.save(net,model_name)
# Load model
net=torch.load('/content/drive/MyDrive/VSAT_HW1/cnn_model.pth')
now_valid_accuracy,now_valid_loss=cal_accuracy(net,testLoader)
print('Accuracy of the network on the 10000 validation inputs: %.2f %%' % (now_valid_accuracy))
print('Top-3 error rate of the network on the 10000 validation inputs: %.2f %%' % (100-now_valid_accuracy))
# test data
x_test=np.load('/content/drive/MyDrive/VSAT_HW1/x_test.npy')
x_test=convert_CHW(x_test)
testset=MyDataset(data=x_test,label=y_valid,transform=None)
testLoader = DataLoader(dataset=testset,batch_size=1,num_workers=2)
# output
ans=[]
with torch.no_grad():
for data in testLoader:
inputs, labels = data
inputs = inputs.to(device)
outputs = net(inputs)
values, predicted = outputs.topk(3, dim=1, largest=True, sorted=True)
for i,data in enumerate(predicted,0):
ans.append(data.tolist())
# write
txtpath='/content/drive/MyDrive/VSAT_HW1/0712534.txt'
f = open(txtpath, 'w')
for item in ans:
f.write(str(item)+'\n')
f.close()
# number of parameters
number_of_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
print('The number of parameters: %d' %number_of_params)
```
|
github_jupyter
|
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import numpy as np
x_valid = np.load('/content/drive/MyDrive/VSAT_HW1/x_valid.npy')
x_train = np.load('/content/drive/MyDrive/VSAT_HW1/x_train.npy')
y_train=np.load('/content/drive/MyDrive/VSAT_HW1/y_train.npy')
y_valid=np.load('/content/drive/MyDrive/VSAT_HW1/y_valid.npy')
def convert_CHW(data):
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
tmp=[]
for i,img in enumerate(data,0):
image=np.array(transform(img).tolist())
tmp.append(image)
tmp=np.array(tmp)
return tmp
#transform
x_train=convert_CHW(x_train)
x_valid=convert_CHW(x_valid)
import torch
from torch.utils.data import Dataset,DataLoader
class MyDataset(Dataset):
def __init__(self,data,label,transform):
self.data = torch.FloatTensor(data)
self.label = torch.LongTensor(label)
self.transform = transform
def __getitem__(self,index):
if self.transform:
self.data[index] = self.data[index]
return self.data[index],self.label[index]
def __len__(self):
return len(self.data)
# GPU
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
print('GPU state:', device)
#dataset
trainset=MyDataset(data=x_train,label=y_train,transform=None)
testset=MyDataset(data=x_valid,label=y_valid,transform=None)
#dataLoader
trainLoader = DataLoader(dataset=trainset,batch_size=8,shuffle=True,num_workers=2)
testLoader = DataLoader(dataset=testset,batch_size=1,num_workers=2)
# Model structure
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net().to(device)
print(net)
# Parameters
criterion = nn.CrossEntropyLoss()
lr = 0.001
epochs = 8
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
def cal_accuracy(net,Loader):
correct = 0
total = 0
loss=0
running_loss,times=0,0
with torch.no_grad():
for data in Loader:
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
loss = criterion(outputs, labels)
values, predicted = outputs.topk(3, dim=1, largest=True, sorted=True)
total += labels.size(0)
for i,data in enumerate(labels,0):
if data in predicted[i]:
correct+=1
running_loss+=loss.item()
times+=1
return correct/total*100,running_loss/(times)
# Train
train_loss_value=[]
valid_loss_value=[]
now_epoch=0
for epoch in range(epochs):
running_loss = 0.0
for times, data in enumerate(trainLoader, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# Zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
now_valid_accuracy,now_valid_loss=cal_accuracy(net,testLoader)
tmp,now_train_loss=cal_accuracy(net,trainLoader)
print('[%d/%d, %d] train loss: %.3f validation loss: %.3f validation accuracy: %.1f' % (epoch+1, epochs, len(trainLoader), now_train_loss,now_valid_loss,now_valid_accuracy))
train_loss_value.append(running_loss/(times+1))
valid_loss_value.append(now_valid_loss)
print('Finished Training')
# validation accuracy
now_valid_accuracy,now_valid_loss=cal_accuracy(net,testLoader)
print('Accuracy of the network on the 10000 validation inputs: %.2f %%' % (now_valid_accuracy))
#loss curve
import matplotlib.pyplot as plt
plt.plot(np.array(train_loss_value), 'blue', label='train')
plt.plot(np.array(valid_loss_value), 'r', label='validation')
plt.legend()
plt.show
plt.savefig('/content/drive/MyDrive/VSAT_HW1/loss.png')
# Save model
model_name='/content/drive/MyDrive/VSAT_HW1/cnn_model.pth'
torch.save(net,model_name)
# Load model
net=torch.load('/content/drive/MyDrive/VSAT_HW1/cnn_model.pth')
now_valid_accuracy,now_valid_loss=cal_accuracy(net,testLoader)
print('Accuracy of the network on the 10000 validation inputs: %.2f %%' % (now_valid_accuracy))
print('Top-3 error rate of the network on the 10000 validation inputs: %.2f %%' % (100-now_valid_accuracy))
# test data
x_test=np.load('/content/drive/MyDrive/VSAT_HW1/x_test.npy')
x_test=convert_CHW(x_test)
testset=MyDataset(data=x_test,label=y_valid,transform=None)
testLoader = DataLoader(dataset=testset,batch_size=1,num_workers=2)
# output
ans=[]
with torch.no_grad():
for data in testLoader:
inputs, labels = data
inputs = inputs.to(device)
outputs = net(inputs)
values, predicted = outputs.topk(3, dim=1, largest=True, sorted=True)
for i,data in enumerate(predicted,0):
ans.append(data.tolist())
# write
txtpath='/content/drive/MyDrive/VSAT_HW1/0712534.txt'
f = open(txtpath, 'w')
for item in ans:
f.write(str(item)+'\n')
f.close()
# number of parameters
number_of_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
print('The number of parameters: %d' %number_of_params)
| 0.713731 | 0.382026 |
# LeetCode #807. Max Increase to Keep City Skyline
## Question
https://leetcode.com/problems/max-increase-to-keep-city-skyline/
In a 2 dimensional array grid, each value grid[i][j] represents the height of a building located there. We are allowed to increase the height of any number of buildings, by any amount (the amounts can be different for different buildings). Height 0 is considered to be a building as well.
At the end, the "skyline" when viewed from all four directions of the grid, i.e. top, bottom, left, and right, must be the same as the skyline of the original grid. A city's skyline is the outer contour of the rectangles formed by all the buildings when viewed from a distance. See the following example.
What is the maximum total sum that the height of the buildings can be increased?
Example:
Input: grid = [[3,0,8,4],[2,4,5,7],[9,2,6,3],[0,3,1,0]]
Output: 35
Explanation:
The grid is:
[ [3, 0, 8, 4],
[2, 4, 5, 7],
[9, 2, 6, 3],
[0, 3, 1, 0] ]
The skyline viewed from top or bottom is: [9, 4, 8, 7]
The skyline viewed from left or right is: [8, 7, 9, 3]
The grid after increasing the height of buildings without affecting skylines is:
gridNew = [ [8, 4, 8, 7],
[7, 4, 7, 7],
[9, 4, 8, 7],
[3, 3, 3, 3] ]
Notes:
1 < grid.length = grid[0].length <= 50.
All heights grid[i][j] are in the range [0, 100].
All buildings in grid[i][j] occupy the entire grid cell: that is, they are a 1 x 1 x grid[i][j] rectangular prism.https://leetcode.com/problems/max-increase-to-keep-city-skyline/
## My Solution
```
def maxIncreaseKeepingSkyline(grid) -> int:
import numpy as np
t_grid = np.transpose(grid)
res = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
temp = min(max(grid[i]), max(t_grid[j]))
res += temp - grid[i][j]
grid[i][j] = temp
return res
# test code
grid = [[3, 0, 8, 4],
[2, 4, 5, 7],
[9, 2, 6, 3],
[0, 3, 1, 0]]
maxIncreaseKeepingSkyline(grid)
```
## My Result
__Runtime__ : 240 ms, faster than 5.01% of Python3 online submissions for Max Increase to Keep City Skyline.
__Memory Usage__ : 27.3 MB, less than 5.00% of Python3 online submissions for Max Increase to Keep City Skyline.
## @wemwalker's Solution
```
def maxIncreaseKeepingSkyline(grid):
rows = list(map(max, grid))
cols = list(map(max, zip(*grid)))
return sum(min(i, j) for i in rows for j in cols) - sum(map(sum, grid))
grid = [[3, 0, 8, 4],
[2, 4, 5, 7],
[9, 2, 6, 3],
[0, 3, 1, 0]]
maxIncreaseKeepingSkyline(grid)
```
## @wemwalker's Result
__Runtime__: 76 ms, faster than 98.39% of Python3 online submissions for Max Increase to Keep City Skyline.
__Memory Usage__ : 14 MB, less than 5.00% of Python3 online submissions for Max Increase to Keep City Skyline.
|
github_jupyter
|
def maxIncreaseKeepingSkyline(grid) -> int:
import numpy as np
t_grid = np.transpose(grid)
res = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
temp = min(max(grid[i]), max(t_grid[j]))
res += temp - grid[i][j]
grid[i][j] = temp
return res
# test code
grid = [[3, 0, 8, 4],
[2, 4, 5, 7],
[9, 2, 6, 3],
[0, 3, 1, 0]]
maxIncreaseKeepingSkyline(grid)
def maxIncreaseKeepingSkyline(grid):
rows = list(map(max, grid))
cols = list(map(max, zip(*grid)))
return sum(min(i, j) for i in rows for j in cols) - sum(map(sum, grid))
grid = [[3, 0, 8, 4],
[2, 4, 5, 7],
[9, 2, 6, 3],
[0, 3, 1, 0]]
maxIncreaseKeepingSkyline(grid)
| 0.218586 | 0.922132 |
```
import pyspark
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
import os
spark = SparkSession.builder.master("spark://{sparkname}:7077".format(sparkname=os.environ['SPARK_CLUSTER'])).getOrCreate()
DENODO_DRIVER = "com.denodo.vdp.jdbc.Driver"
DENODO_HOST = "your_host"
DENODO_USER = "your_user"
DENODO_PASSWORD = ""
DENODO_PORT = "9999"
DENODO_DB = "your_db"
denodo_jdbc_str = ("jdbc:vdb://{host}:{port}/{database}?queryTimeout=0").format( \
host=DENODO_HOST, \
port=DENODO_PORT, \
database=DENODO_DB)
denodo_prop = \
{"user": DENODO_USER, \
"password": DENODO_PASSWORD, \
"driver": DENODO_DRIVER, \
"sslConnection": "false"}
denodo_join_query123 = """
SELECT DISTINCT rowno, current_date as load_date, case_number, Contention_date, vin, Case_id, fd.body
FROM
(
SELECT rownum() as rowno, c.case_number, fd.created_date AS Contention_date, c.vin, fd.parent_id AS Case_id, fd.body as Body
FROM crrs_feeditem fd JOIN crrs_case c ON fd.parent_id = c.case_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND fd.body IS NOT NULL
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (fd.body NOT LIKE ('%Outbound%') AND fd.body NOT LIKE ('%No campaign data found%') AND fd.body NOT LIKE ('%viewed Campaign Info%') AND fd.body NOT LIKE ('%Recall%'))
AND c.DIVISION IN ('Honda', 'Acura')
AND fd.created_date between '2020/01/29' and '2020/01/31'
UNION
select rownum() as rowno, c.case_number, cf.source_created_date AS Contention_date, c.vin, cf.parent_id AS Case_id, cf.body AS Body --cast(body as varchar(10000)) AS case_msg
from crrs_casefeed cf JOIN crrs_case c ON cf.parent_id = c.case_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND cf.body IS NOT NULL AND isdeleted = 0
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (cf.body NOT LIKE ('%Outbound%') AND cf.body NOT LIKE ('%No campaign data found%') AND cf.body NOT LIKE ('%viewed Campaign Info%') AND cf.body NOT LIKE ('%Recall%'))
AND c.DIVISION IN ('Honda', 'Acura')
AND formatdate('yyyy/MM/dd',cf.source_created_date) between '2020/01/29' and '2020/01/31'
UNION
select rownum() as rowno, c.case_number, em.message_timestamp AS Contention_date, c.vin, em.parent_id AS Case_id, cast(em.body_text as varchar(10000)) AS Body
from crrs_email_message em JOIN crrs_case c ON em.parent_id = c.case_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND em.body_text IS NOT NULL
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (em.body_text NOT LIKE ('%Outbound%') AND em.body_text NOT LIKE ('%No campaign data found%') AND em.body_text NOT LIKE ('%viewed Campaign Info%') AND em.body_text NOT LIKE ('%Recall%'))
AND c.DIVISION IN ('Honda', 'Acura')
AND formatdate('yyyy/MM/dd',em.message_timestamp) between '2020/01/29' and '2020/01/31'
UNION
select rownum() as rowno, c.case_number, ch.start_timestamp AS Contention_date, c.vin, c.case_id as Case_id, cast(ch.body_text as varchar(10000)) AS Body
from crrs_livechat_transcript ch JOIN crrs_case c ON ch.case_id = c.case_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND ch.body_text IS NOT NULL
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (ch.body_text NOT LIKE ('%Outbound%') AND ch.body_text NOT LIKE ('%No campaign data found%') AND ch.body_text NOT LIKE ('%viewed Campaign Info%') AND ch.body_text NOT LIKE ('%Recall%'))
AND c.DIVISION IN ('Honda', 'Acura')
AND formatdate('yyyy/MM/dd',ch.start_timestamp) between '2020/01/29' and '2020/01/31'
) t
WHERE case_number IS NOT NULL AND CASE_ID IS NOT NULL AND VIN IS NOT NULL AND BODY IS NOT NULL
ORDER BY case_number, Contention_date;
"""
denodo_join_query = """
SELECT DISTINCT fd.rowno, c.case_number AS Case_No, fd.Contention_date AS Contention_date, c.vin, fd.Case_id AS Case_id, fd.body as Body,
r.recordtype_name AS Case_RecordType_Nm,
c.updt_ts AS Case_Updt_Ts,
c.date_time_opened AS Case_Src_Cret_Ts,
c.date_time_closed AS Case_Closed_Ts,
c.last_reopen_date AS last_reopen_date,
c.vin AS Case_Vin_Id,
c.miles AS Case_Miles_Qty,
c.division_code AS Case_Division_Cd,
c.subdivision AS Case_Subdivision_Cd,
c.current_dealer_code AS Case_Current_Dealer_No,
CONCAT(c.current_dealer_code, c.division_code) AS Case_Mx_Dealer_No,
c.service_district_code AS Case_Service_District_Cd,
c.status AS Case_Status_Cd,
c.method AS Case_Method_Nm,
c.point_of_origin AS Case_Point_Of_Origin_Dc,
c.sales_zone AS Case_Sales_Zone_Nm,
c.service_zone AS Case_Service_Zone_Nm,
c.subject AS Case_Subject_Txt,
'' AS Case_Customer_Nm,
c.city AS Case_City_Nm,
c.state AS Case_State_Cd,
c.zipcode AS Case_Zip_Cd,
'' AS Case_HasApprovedCheck_Flg,
'' AS Case_HasKrExec_Flg,
'' AS Case_IsMediation_Flg,
'' Case_Phone_Carrier_Nm,
'' Case_Phone_Mfg_Nm,
'' Case_Phone_Model_Nm,
'' Case_Phone_Os_Nm,
'' Case_Phone_Carrier_Other_Nm,
'' Case_Other_Phone_Mfg_Nm,
'' Case_Other_Phone_Model_Nm,
'' Case_Other_Phone_Os_Nm,
'' Note_Updt_Ts,
'' PQ_Case_ID,
'' Probing_Question_Id,
'' PQ_RecordType_Nm,
'' PQ_Updt_Ts,
'' PQ_Incident_Ts,
'' PQ_Incident_City_Nm,
'' PQ_Incident_State_Cd,
'' PQ_Incident_Brief_Dc,
'' PQ_Customer_Request_Txt,
i.issue_id AS Issue_Id,
i.issue_name AS Issue_Nm,
i.updt_ts AS Issue_Updt_Ts,
i.source_create_date AS Issue_Src_Cret_Ts,
i.closed_date AS Issue_Closed_Ts,
i.issue_disposition AS Issue_Disposition_Nm,
i.type_1 AS Issue_Type1_Nm,
i.type_2 AS Issue_Type2_Nm,
i.status AS Issue_Status_Cd,
i.title AS Issue_Title_Dc,
i.resolution AS Issue_Resolution_Dc,
i.total_goodwill_amount AS Issue_Total_Goodwill_Amt,
i.affected_part AS Issue_Affected_Part_Cd,
i.symptom AS Issue_Symptom_Cd,
i.labor_code AS Issue_Labor_Cd_Nm,
i.tread_comp_code1 AS Issue_Tread_Comp1_Cd,
i.primary_part_no AS Issue_Primary_Part_No,
i.primary_part_description AS Issue_Primary_Part_Dc,
i.campaign_code AS Issue_Campaign_Cd,
i.campaign_description AS Issue_Campaign_Dc,
i.defect_code AS Issue_Defect_Cd,
v.retail_sales_date AS Rtl_Sls_Dt,
V.assembled_date AS Af_Off_Dt,
'' Eng_Off_Dt,
'' Trmsn_Off_Dt,
m.short_sales_model_code AS shrt_sls_mdl_cd,
m.model_year AS Model_Year,
m.model_name AS CMQ_Model,
m.model_name AS AH_Model,
m.destination_code AS Dest_Code,
m.factory_code AS Factory_Code,
v.product_division_code AS prod_div,
m.doors AS doors,
m.trim_type_code AS Trim_Type,
m.engine_series AS engine_series,
m.grade_short AS grade_short,
m.model_generation AS model_generation,
m.transmission AS Trans_Type,
m.transmission_series AS Trans_Serial_No,
m.four_wheel_drive AS four_wheel_drive_flag,
'' AS issue_check_req_status_cnt
FROM
(
SELECT DISTINCT rowno, case_number, Contention_date, vin, Case_id, body
FROM
(
SELECT rownum() as rowno,c.case_number, fd.created_date AS Contention_date, c.vin, fd.parent_id AS Case_id, fd.body as Body
FROM crrs_feeditem fd JOIN crrs_case c ON fd.parent_id = c.case_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND fd.body IS NOT NULL
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (fd.body NOT LIKE ('%Outbound%') AND fd.body NOT LIKE ('%No campaign data found%') AND fd.body NOT LIKE ('%viewed Campaign Info%') AND fd.body NOT LIKE ('%Recall%'))
AND ((c.DIVISION = 'Honda' AND c.YEAR BETWEEN 2017 AND 2020) OR (c.DIVISION = 'Acura' AND c.YEAR BETWEEN 2014 AND 2020))
AND formatdate('yyyy/MM/dd',fd.created_date) between '2020/01/30' and '2020/01/31'
UNION
select rownum() as rowno,c.case_number, cf.source_created_date AS Contention_date, c.vin, cf.parent_id AS Case_id, cf.body AS Body --cast(body as varchar(10000)) AS case_msg
from crrs_casefeed cf JOIN crrs_case c ON cf.parent_id = c.case_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND cf.body IS NOT NULL AND isdeleted = 0
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (cf.body NOT LIKE ('%Outbound%') AND cf.body NOT LIKE ('%No campaign data found%') AND cf.body NOT LIKE ('%viewed Campaign Info%') AND cf.body NOT LIKE ('%Recall%'))
AND ((c.DIVISION = 'Honda' AND c.YEAR BETWEEN 2017 AND 2020) OR (c.DIVISION = 'Acura' AND c.YEAR BETWEEN 2014 AND 2020))
AND formatdate('yyyy/MM/dd',cf.source_created_date) between '2020/01/30' and '2020/01/31'
UNION
select rownum() as rowno,c.case_number, em.message_timestamp AS Contention_date, c.vin, em.parent_id AS Case_id, cast(em.body_text as varchar(10000)) AS Body
from crrs_email_message em JOIN crrs_case c ON em.parent_id = c.case_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND em.body_text IS NOT NULL
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (em.body_text NOT LIKE ('%Outbound%') AND em.body_text NOT LIKE ('%No campaign data found%') AND em.body_text NOT LIKE ('%viewed Campaign Info%') AND em.body_text NOT LIKE ('%Recall%'))
AND ((c.DIVISION = 'Honda' AND c.YEAR BETWEEN 2017 AND 2020) OR (c.DIVISION = 'Acura' AND c.YEAR BETWEEN 2014 AND 2020))
AND formatdate('yyyy/MM/dd',em.message_timestamp) between '2020/01/30' and '2020/01/31'
UNION
select rownum() as rowno,c.case_number, ch.start_timestamp AS Contention_date, c.vin, c.case_id as Case_id, cast(ch.body_text as varchar(10000)) AS Body
from crrs_livechat_transcript ch JOIN crrs_case c ON ch.case_id = c.case_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND ch.body_text IS NOT NULL
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (ch.body_text NOT LIKE ('%Outbound%') AND ch.body_text NOT LIKE ('%No campaign data found%') AND ch.body_text NOT LIKE ('%viewed Campaign Info%') AND ch.body_text NOT LIKE ('%Recall%'))
AND ((c.DIVISION = 'Honda' AND c.YEAR BETWEEN 2017 AND 2020) OR (c.DIVISION = 'Acura' AND c.YEAR BETWEEN 2014 AND 2020))
AND formatdate('yyyy/MM/dd',ch.start_timestamp) between '2020/01/30' and '2020/01/31'
) t
WHERE case_number IS NOT NULL AND CASE_ID IS NOT NULL AND VIN IS NOT NULL AND BODY IS NOT NULL
-- ORDER BY case_number, Contention_date
) fd
JOIN crrs_case c ON fd.Case_id = c.case_id
LEFT JOIN vehicle v ON c.vin = v.vin
LEFT JOIN model_mto_feature m ON m.mto_model_code = v.mto_model_code AND m.mto_type_code = v.mto_type_code AND m.mto_option_code = v.mto_option_code
LEFT JOIN crrs_issue i ON c.case_id = i.case_id
LEFT JOIN crrs_recordtype r ON c.recordtype_id = r.recordtype_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND fd.body IS NOT NULL
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (fd.body NOT LIKE ('%Outbound%') OR fd.body NOT LIKE ('%No campaign data found%') OR fd.body NOT LIKE ('%viewed Campaign Info%') OR fd.body NOT LIKE ('%Recall%'))
AND ((c.DIVISION = 'Honda' AND c.YEAR BETWEEN 2017 AND 2020) OR (c.DIVISION = 'Acura' AND c.YEAR BETWEEN 2014 AND 2020));
"""
def qpreds(n):
return ["mod(rowno, {np}) = {modulus}".format(np=n, modulus=k) for k in range(n)]
joindf = spark.read.jdbc(url=denodo_jdbc_str, \
table='({sql}) test'.format(sql=denodo_join_query), \
properties=denodo_prop, predicates=qpreds(50))
joindf.cache().count()
joindf.head()
urlPSQL = ("jdbc:postgresql://{host}:{port}/{db}").format( \
host='your_host', \
port='5432', \
db='your_db')
propertiesPSQL = { \
"driver": "org.postgresql.Driver",
"user": 'your_username',
"password": 'your_password'
}
joindf.write.jdbc(table='cmbs_source_from_Jan30to31_2_4_2020', mode='overwrite', \
url=urlPSQL, \
properties=propertiesPSQL)
def qpreds_psql(n):
return ["mod(rowno, {np}) = {modulus}".format(np=n, modulus=k) for k in range(n)]
cmbsdf = spark.read.jdbc(url=urlPSQL, \
table='({sql}) test'.format(sql="select * from cmbs_source_from_Jan30to31_2_4_2020"), \
properties=propertiesPSQL, predicates=qpreds_psql(25)).cache()
cmbsdf.count()
cmbsdf.columns
cmbsdf.count()
cmbsdf.toPandas().to_csv('cmbs_source_from_Jan30to31_2_4_2020.csv')
cmbsdf.toPandas().head()
```
|
github_jupyter
|
import pyspark
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
import os
spark = SparkSession.builder.master("spark://{sparkname}:7077".format(sparkname=os.environ['SPARK_CLUSTER'])).getOrCreate()
DENODO_DRIVER = "com.denodo.vdp.jdbc.Driver"
DENODO_HOST = "your_host"
DENODO_USER = "your_user"
DENODO_PASSWORD = ""
DENODO_PORT = "9999"
DENODO_DB = "your_db"
denodo_jdbc_str = ("jdbc:vdb://{host}:{port}/{database}?queryTimeout=0").format( \
host=DENODO_HOST, \
port=DENODO_PORT, \
database=DENODO_DB)
denodo_prop = \
{"user": DENODO_USER, \
"password": DENODO_PASSWORD, \
"driver": DENODO_DRIVER, \
"sslConnection": "false"}
denodo_join_query123 = """
SELECT DISTINCT rowno, current_date as load_date, case_number, Contention_date, vin, Case_id, fd.body
FROM
(
SELECT rownum() as rowno, c.case_number, fd.created_date AS Contention_date, c.vin, fd.parent_id AS Case_id, fd.body as Body
FROM crrs_feeditem fd JOIN crrs_case c ON fd.parent_id = c.case_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND fd.body IS NOT NULL
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (fd.body NOT LIKE ('%Outbound%') AND fd.body NOT LIKE ('%No campaign data found%') AND fd.body NOT LIKE ('%viewed Campaign Info%') AND fd.body NOT LIKE ('%Recall%'))
AND c.DIVISION IN ('Honda', 'Acura')
AND fd.created_date between '2020/01/29' and '2020/01/31'
UNION
select rownum() as rowno, c.case_number, cf.source_created_date AS Contention_date, c.vin, cf.parent_id AS Case_id, cf.body AS Body --cast(body as varchar(10000)) AS case_msg
from crrs_casefeed cf JOIN crrs_case c ON cf.parent_id = c.case_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND cf.body IS NOT NULL AND isdeleted = 0
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (cf.body NOT LIKE ('%Outbound%') AND cf.body NOT LIKE ('%No campaign data found%') AND cf.body NOT LIKE ('%viewed Campaign Info%') AND cf.body NOT LIKE ('%Recall%'))
AND c.DIVISION IN ('Honda', 'Acura')
AND formatdate('yyyy/MM/dd',cf.source_created_date) between '2020/01/29' and '2020/01/31'
UNION
select rownum() as rowno, c.case_number, em.message_timestamp AS Contention_date, c.vin, em.parent_id AS Case_id, cast(em.body_text as varchar(10000)) AS Body
from crrs_email_message em JOIN crrs_case c ON em.parent_id = c.case_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND em.body_text IS NOT NULL
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (em.body_text NOT LIKE ('%Outbound%') AND em.body_text NOT LIKE ('%No campaign data found%') AND em.body_text NOT LIKE ('%viewed Campaign Info%') AND em.body_text NOT LIKE ('%Recall%'))
AND c.DIVISION IN ('Honda', 'Acura')
AND formatdate('yyyy/MM/dd',em.message_timestamp) between '2020/01/29' and '2020/01/31'
UNION
select rownum() as rowno, c.case_number, ch.start_timestamp AS Contention_date, c.vin, c.case_id as Case_id, cast(ch.body_text as varchar(10000)) AS Body
from crrs_livechat_transcript ch JOIN crrs_case c ON ch.case_id = c.case_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND ch.body_text IS NOT NULL
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (ch.body_text NOT LIKE ('%Outbound%') AND ch.body_text NOT LIKE ('%No campaign data found%') AND ch.body_text NOT LIKE ('%viewed Campaign Info%') AND ch.body_text NOT LIKE ('%Recall%'))
AND c.DIVISION IN ('Honda', 'Acura')
AND formatdate('yyyy/MM/dd',ch.start_timestamp) between '2020/01/29' and '2020/01/31'
) t
WHERE case_number IS NOT NULL AND CASE_ID IS NOT NULL AND VIN IS NOT NULL AND BODY IS NOT NULL
ORDER BY case_number, Contention_date;
"""
denodo_join_query = """
SELECT DISTINCT fd.rowno, c.case_number AS Case_No, fd.Contention_date AS Contention_date, c.vin, fd.Case_id AS Case_id, fd.body as Body,
r.recordtype_name AS Case_RecordType_Nm,
c.updt_ts AS Case_Updt_Ts,
c.date_time_opened AS Case_Src_Cret_Ts,
c.date_time_closed AS Case_Closed_Ts,
c.last_reopen_date AS last_reopen_date,
c.vin AS Case_Vin_Id,
c.miles AS Case_Miles_Qty,
c.division_code AS Case_Division_Cd,
c.subdivision AS Case_Subdivision_Cd,
c.current_dealer_code AS Case_Current_Dealer_No,
CONCAT(c.current_dealer_code, c.division_code) AS Case_Mx_Dealer_No,
c.service_district_code AS Case_Service_District_Cd,
c.status AS Case_Status_Cd,
c.method AS Case_Method_Nm,
c.point_of_origin AS Case_Point_Of_Origin_Dc,
c.sales_zone AS Case_Sales_Zone_Nm,
c.service_zone AS Case_Service_Zone_Nm,
c.subject AS Case_Subject_Txt,
'' AS Case_Customer_Nm,
c.city AS Case_City_Nm,
c.state AS Case_State_Cd,
c.zipcode AS Case_Zip_Cd,
'' AS Case_HasApprovedCheck_Flg,
'' AS Case_HasKrExec_Flg,
'' AS Case_IsMediation_Flg,
'' Case_Phone_Carrier_Nm,
'' Case_Phone_Mfg_Nm,
'' Case_Phone_Model_Nm,
'' Case_Phone_Os_Nm,
'' Case_Phone_Carrier_Other_Nm,
'' Case_Other_Phone_Mfg_Nm,
'' Case_Other_Phone_Model_Nm,
'' Case_Other_Phone_Os_Nm,
'' Note_Updt_Ts,
'' PQ_Case_ID,
'' Probing_Question_Id,
'' PQ_RecordType_Nm,
'' PQ_Updt_Ts,
'' PQ_Incident_Ts,
'' PQ_Incident_City_Nm,
'' PQ_Incident_State_Cd,
'' PQ_Incident_Brief_Dc,
'' PQ_Customer_Request_Txt,
i.issue_id AS Issue_Id,
i.issue_name AS Issue_Nm,
i.updt_ts AS Issue_Updt_Ts,
i.source_create_date AS Issue_Src_Cret_Ts,
i.closed_date AS Issue_Closed_Ts,
i.issue_disposition AS Issue_Disposition_Nm,
i.type_1 AS Issue_Type1_Nm,
i.type_2 AS Issue_Type2_Nm,
i.status AS Issue_Status_Cd,
i.title AS Issue_Title_Dc,
i.resolution AS Issue_Resolution_Dc,
i.total_goodwill_amount AS Issue_Total_Goodwill_Amt,
i.affected_part AS Issue_Affected_Part_Cd,
i.symptom AS Issue_Symptom_Cd,
i.labor_code AS Issue_Labor_Cd_Nm,
i.tread_comp_code1 AS Issue_Tread_Comp1_Cd,
i.primary_part_no AS Issue_Primary_Part_No,
i.primary_part_description AS Issue_Primary_Part_Dc,
i.campaign_code AS Issue_Campaign_Cd,
i.campaign_description AS Issue_Campaign_Dc,
i.defect_code AS Issue_Defect_Cd,
v.retail_sales_date AS Rtl_Sls_Dt,
V.assembled_date AS Af_Off_Dt,
'' Eng_Off_Dt,
'' Trmsn_Off_Dt,
m.short_sales_model_code AS shrt_sls_mdl_cd,
m.model_year AS Model_Year,
m.model_name AS CMQ_Model,
m.model_name AS AH_Model,
m.destination_code AS Dest_Code,
m.factory_code AS Factory_Code,
v.product_division_code AS prod_div,
m.doors AS doors,
m.trim_type_code AS Trim_Type,
m.engine_series AS engine_series,
m.grade_short AS grade_short,
m.model_generation AS model_generation,
m.transmission AS Trans_Type,
m.transmission_series AS Trans_Serial_No,
m.four_wheel_drive AS four_wheel_drive_flag,
'' AS issue_check_req_status_cnt
FROM
(
SELECT DISTINCT rowno, case_number, Contention_date, vin, Case_id, body
FROM
(
SELECT rownum() as rowno,c.case_number, fd.created_date AS Contention_date, c.vin, fd.parent_id AS Case_id, fd.body as Body
FROM crrs_feeditem fd JOIN crrs_case c ON fd.parent_id = c.case_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND fd.body IS NOT NULL
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (fd.body NOT LIKE ('%Outbound%') AND fd.body NOT LIKE ('%No campaign data found%') AND fd.body NOT LIKE ('%viewed Campaign Info%') AND fd.body NOT LIKE ('%Recall%'))
AND ((c.DIVISION = 'Honda' AND c.YEAR BETWEEN 2017 AND 2020) OR (c.DIVISION = 'Acura' AND c.YEAR BETWEEN 2014 AND 2020))
AND formatdate('yyyy/MM/dd',fd.created_date) between '2020/01/30' and '2020/01/31'
UNION
select rownum() as rowno,c.case_number, cf.source_created_date AS Contention_date, c.vin, cf.parent_id AS Case_id, cf.body AS Body --cast(body as varchar(10000)) AS case_msg
from crrs_casefeed cf JOIN crrs_case c ON cf.parent_id = c.case_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND cf.body IS NOT NULL AND isdeleted = 0
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (cf.body NOT LIKE ('%Outbound%') AND cf.body NOT LIKE ('%No campaign data found%') AND cf.body NOT LIKE ('%viewed Campaign Info%') AND cf.body NOT LIKE ('%Recall%'))
AND ((c.DIVISION = 'Honda' AND c.YEAR BETWEEN 2017 AND 2020) OR (c.DIVISION = 'Acura' AND c.YEAR BETWEEN 2014 AND 2020))
AND formatdate('yyyy/MM/dd',cf.source_created_date) between '2020/01/30' and '2020/01/31'
UNION
select rownum() as rowno,c.case_number, em.message_timestamp AS Contention_date, c.vin, em.parent_id AS Case_id, cast(em.body_text as varchar(10000)) AS Body
from crrs_email_message em JOIN crrs_case c ON em.parent_id = c.case_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND em.body_text IS NOT NULL
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (em.body_text NOT LIKE ('%Outbound%') AND em.body_text NOT LIKE ('%No campaign data found%') AND em.body_text NOT LIKE ('%viewed Campaign Info%') AND em.body_text NOT LIKE ('%Recall%'))
AND ((c.DIVISION = 'Honda' AND c.YEAR BETWEEN 2017 AND 2020) OR (c.DIVISION = 'Acura' AND c.YEAR BETWEEN 2014 AND 2020))
AND formatdate('yyyy/MM/dd',em.message_timestamp) between '2020/01/30' and '2020/01/31'
UNION
select rownum() as rowno,c.case_number, ch.start_timestamp AS Contention_date, c.vin, c.case_id as Case_id, cast(ch.body_text as varchar(10000)) AS Body
from crrs_livechat_transcript ch JOIN crrs_case c ON ch.case_id = c.case_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND ch.body_text IS NOT NULL
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (ch.body_text NOT LIKE ('%Outbound%') AND ch.body_text NOT LIKE ('%No campaign data found%') AND ch.body_text NOT LIKE ('%viewed Campaign Info%') AND ch.body_text NOT LIKE ('%Recall%'))
AND ((c.DIVISION = 'Honda' AND c.YEAR BETWEEN 2017 AND 2020) OR (c.DIVISION = 'Acura' AND c.YEAR BETWEEN 2014 AND 2020))
AND formatdate('yyyy/MM/dd',ch.start_timestamp) between '2020/01/30' and '2020/01/31'
) t
WHERE case_number IS NOT NULL AND CASE_ID IS NOT NULL AND VIN IS NOT NULL AND BODY IS NOT NULL
-- ORDER BY case_number, Contention_date
) fd
JOIN crrs_case c ON fd.Case_id = c.case_id
LEFT JOIN vehicle v ON c.vin = v.vin
LEFT JOIN model_mto_feature m ON m.mto_model_code = v.mto_model_code AND m.mto_type_code = v.mto_type_code AND m.mto_option_code = v.mto_option_code
LEFT JOIN crrs_issue i ON c.case_id = i.case_id
LEFT JOIN crrs_recordtype r ON c.recordtype_id = r.recordtype_id
WHERE c.case_number IS NOT NULL AND c.case_id IS NOT NULL AND c.vin IS NOT NULL AND fd.body IS NOT NULL
AND c.method NOT IN ('Outreach', 'Pro-Active O/B', 'Robo-Call')
AND c.subdivision NOT IN ('Alpha','Campaign','Marine Dealer Support','Marine Sales Support','MC Mediation','PCRM','PCRM - Torrance','PCRM Chino','PCRM-Chino','PCRM-Torrance')
AND c.point_of_origin NOT IN ('Outreach')
AND (fd.body NOT LIKE ('%Outbound%') OR fd.body NOT LIKE ('%No campaign data found%') OR fd.body NOT LIKE ('%viewed Campaign Info%') OR fd.body NOT LIKE ('%Recall%'))
AND ((c.DIVISION = 'Honda' AND c.YEAR BETWEEN 2017 AND 2020) OR (c.DIVISION = 'Acura' AND c.YEAR BETWEEN 2014 AND 2020));
"""
def qpreds(n):
return ["mod(rowno, {np}) = {modulus}".format(np=n, modulus=k) for k in range(n)]
joindf = spark.read.jdbc(url=denodo_jdbc_str, \
table='({sql}) test'.format(sql=denodo_join_query), \
properties=denodo_prop, predicates=qpreds(50))
joindf.cache().count()
joindf.head()
urlPSQL = ("jdbc:postgresql://{host}:{port}/{db}").format( \
host='your_host', \
port='5432', \
db='your_db')
propertiesPSQL = { \
"driver": "org.postgresql.Driver",
"user": 'your_username',
"password": 'your_password'
}
joindf.write.jdbc(table='cmbs_source_from_Jan30to31_2_4_2020', mode='overwrite', \
url=urlPSQL, \
properties=propertiesPSQL)
def qpreds_psql(n):
return ["mod(rowno, {np}) = {modulus}".format(np=n, modulus=k) for k in range(n)]
cmbsdf = spark.read.jdbc(url=urlPSQL, \
table='({sql}) test'.format(sql="select * from cmbs_source_from_Jan30to31_2_4_2020"), \
properties=propertiesPSQL, predicates=qpreds_psql(25)).cache()
cmbsdf.count()
cmbsdf.columns
cmbsdf.count()
cmbsdf.toPandas().to_csv('cmbs_source_from_Jan30to31_2_4_2020.csv')
cmbsdf.toPandas().head()
| 0.306838 | 0.173761 |
```
import random
import os
import shutil
# Merge files from the source dirs to a unified data directory
data_dir = 'all_data'
filenames = data_dir + '/filename_list.txt'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
os.makedirs(data_dir + '/images')
os.makedirs(data_dir + '/annotations')
filelist = []
source_dirs = ['bosch_mini', 'sim', 'rosbag']
# source_dirs = ['sim', 'rosbag']
for source_dir in source_dirs:
# Read source filename_list.txt
source_filenames = open(source_dir + '_data/filename_list.txt', 'r').read().split('\n')
source_filenames.remove('')
# copying image + annotation files
for f in source_filenames:
try:
# copy image
src = source_dir + '_data/images/' + f + '.jpg'
dst = 'all_data/images/' + source_dir + '_' + f + '.jpg'
shutil.copyfile(src, dst)
except:
print("Error: {} -- file not found".format(source_dir + '_data/images/' + f + '.jpg') )
continue
try:
src = source_dir + '_data/annotations/' + f + '.xml'
dst = 'all_data/annotations/' + source_dir + '_' + f + '.xml'
# copy annotation and also modify the filename reference in the xml
xml_content = open(src, 'r').read()
repl_src = '<filename>' + f + '.jpg</filename>'
repl_dst = '<filename>' + source_dir + '_' + f + '.jpg</filename>'
xml_content = xml_content.replace(repl_src, repl_dst)
fh = open(dst, "w")
fh.write(xml_content)
fh.close()
except:
print("Error: {} -- file not found".format(source_dir + '_data/annotations/' + f + '.xml') )
continue
filelist.append(source_dir + '_' + f)
# dumping unified filenames
with open(filenames, mode='wt', encoding='utf-8') as myfile:
myfile.write('\n'.join(filelist))
# usage: put this Jupyter notebook in the same directory as the train+valid dataset
# set val_percent and run.
# data_dir = 'rosbag_data' # directory for train + val data
# data_dir = 'bosch_mini_data' # directory for train + val data
# data_dir = 'sim_data' # directory for train + val data
data_dir = 'all_data' # directory for train + val data
filename = data_dir+'/filename_list.txt' # file names are in it
val_percent = 0.2 # percentage of validation set
random.seed(1789) # random seed
# read all the file names and randomize
filename_trainval = open(filename,'r').read().split('\n')
if '' in filename_trainval:
filename_trainval.remove('')
random.shuffle(filename_trainval) # randomize
n_samples = len(filename_trainval)
print("number of samples: ", n_samples)
# create list of file names for validation set
filename_val = random.sample(filename_trainval, int(val_percent*n_samples))
# create list of file names for training set
filename_train= [f for f in filename_trainval if f not in filename_val]
print("number of samples in training set:", len(filename_train))
print("number of samples in validation set:", len(filename_val))
# create a folder for training and validation sets,
# and put image and annotation files
for set_ in ['train', 'val']:
# createa a directory and subdirectory `images`, `annotations`
set_dir = data_dir+'_'+set_
if not os.path.exists(set_dir):
os.makedirs(set_dir)
annotation_dir = set_dir+'/annotations'
image_dir = set_dir+'/images'
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(annotation_dir):
os.makedirs(annotation_dir)
if set_ == 'train':
filename_set = filename_train
elif set_ == 'val':
filename_set = filename_val
# create text file listing file names
with open(set_dir+'/filename_list.txt', 'w') as fl:
for f in filename_set:
fl.write('%s\n' % f)
fl.close()
for f in filename_set:
# copy image files
try:
image_name = f+'.jpg'
shutil.copyfile(data_dir+'/images/'+image_name, image_dir+'/'+image_name)
except:
print( "[IMAGE] error: {} not found".format(data_dir+'/images/'+f+'.jpg'))
# copy annotation files
try:
annotation_name = f+'.xml'
shutil.copyfile(data_dir+'/annotations/'+annotation_name, annotation_dir+'/'+annotation_name)
except:
print( "[ANNOTATION] error: {} not found".format(data_dir+'/images/'+f+'.xml'))
```
|
github_jupyter
|
import random
import os
import shutil
# Merge files from the source dirs to a unified data directory
data_dir = 'all_data'
filenames = data_dir + '/filename_list.txt'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
os.makedirs(data_dir + '/images')
os.makedirs(data_dir + '/annotations')
filelist = []
source_dirs = ['bosch_mini', 'sim', 'rosbag']
# source_dirs = ['sim', 'rosbag']
for source_dir in source_dirs:
# Read source filename_list.txt
source_filenames = open(source_dir + '_data/filename_list.txt', 'r').read().split('\n')
source_filenames.remove('')
# copying image + annotation files
for f in source_filenames:
try:
# copy image
src = source_dir + '_data/images/' + f + '.jpg'
dst = 'all_data/images/' + source_dir + '_' + f + '.jpg'
shutil.copyfile(src, dst)
except:
print("Error: {} -- file not found".format(source_dir + '_data/images/' + f + '.jpg') )
continue
try:
src = source_dir + '_data/annotations/' + f + '.xml'
dst = 'all_data/annotations/' + source_dir + '_' + f + '.xml'
# copy annotation and also modify the filename reference in the xml
xml_content = open(src, 'r').read()
repl_src = '<filename>' + f + '.jpg</filename>'
repl_dst = '<filename>' + source_dir + '_' + f + '.jpg</filename>'
xml_content = xml_content.replace(repl_src, repl_dst)
fh = open(dst, "w")
fh.write(xml_content)
fh.close()
except:
print("Error: {} -- file not found".format(source_dir + '_data/annotations/' + f + '.xml') )
continue
filelist.append(source_dir + '_' + f)
# dumping unified filenames
with open(filenames, mode='wt', encoding='utf-8') as myfile:
myfile.write('\n'.join(filelist))
# usage: put this Jupyter notebook in the same directory as the train+valid dataset
# set val_percent and run.
# data_dir = 'rosbag_data' # directory for train + val data
# data_dir = 'bosch_mini_data' # directory for train + val data
# data_dir = 'sim_data' # directory for train + val data
data_dir = 'all_data' # directory for train + val data
filename = data_dir+'/filename_list.txt' # file names are in it
val_percent = 0.2 # percentage of validation set
random.seed(1789) # random seed
# read all the file names and randomize
filename_trainval = open(filename,'r').read().split('\n')
if '' in filename_trainval:
filename_trainval.remove('')
random.shuffle(filename_trainval) # randomize
n_samples = len(filename_trainval)
print("number of samples: ", n_samples)
# create list of file names for validation set
filename_val = random.sample(filename_trainval, int(val_percent*n_samples))
# create list of file names for training set
filename_train= [f for f in filename_trainval if f not in filename_val]
print("number of samples in training set:", len(filename_train))
print("number of samples in validation set:", len(filename_val))
# create a folder for training and validation sets,
# and put image and annotation files
for set_ in ['train', 'val']:
# createa a directory and subdirectory `images`, `annotations`
set_dir = data_dir+'_'+set_
if not os.path.exists(set_dir):
os.makedirs(set_dir)
annotation_dir = set_dir+'/annotations'
image_dir = set_dir+'/images'
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(annotation_dir):
os.makedirs(annotation_dir)
if set_ == 'train':
filename_set = filename_train
elif set_ == 'val':
filename_set = filename_val
# create text file listing file names
with open(set_dir+'/filename_list.txt', 'w') as fl:
for f in filename_set:
fl.write('%s\n' % f)
fl.close()
for f in filename_set:
# copy image files
try:
image_name = f+'.jpg'
shutil.copyfile(data_dir+'/images/'+image_name, image_dir+'/'+image_name)
except:
print( "[IMAGE] error: {} not found".format(data_dir+'/images/'+f+'.jpg'))
# copy annotation files
try:
annotation_name = f+'.xml'
shutil.copyfile(data_dir+'/annotations/'+annotation_name, annotation_dir+'/'+annotation_name)
except:
print( "[ANNOTATION] error: {} not found".format(data_dir+'/images/'+f+'.xml'))
| 0.141934 | 0.132318 |
<a href="https://colab.research.google.com/github/LoveMeWithoutAll/keraspp/blob/feature%2Fch03/DNN_classification_cifar.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import numpy as np
from keras import datasets
from keras.utils import np_utils
def Data_func():
(X_train, y_train), (X_test, y_test) = datasets.cifar10.load_data()
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
L, W, H, C = X_train.shape
X_train = X_train.reshape(-1, W * H * C)
X_test = X_test.reshape(-1, W * H * C)
X_train = X_train / 255.0
X_test = X_test / 255.0
return (X_train, Y_train), (X_test, Y_test)
from keras import layers, models
class DNN(models.Sequential):
def __init__(self, Nin, Nh_l, Pd_l, Nout):
super().__init__()
self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1'))
self.add(layers.Dropout(Pd_l[0]))
self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2'))
self.add(layers.Dropout(Pd_l[1]))
self.add(layers.Dense(Nout, activation='softmax'))
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
import matplotlib.pyplot as plt
def plot_acc(history, title=None):
# summarize history for accuracy
if not isinstance(history, dict):
history = history.history
plt.plot(history['acc'])
plt.plot(history['val_acc'])
if title is not None:
plt.title(title)
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
# plt.show()
def plot_loss(history, title=None):
# summarize history for loss
if not isinstance(history, dict):
history = history.history
plt.plot(history['loss'])
plt.plot(history['val_loss'])
if title is not None:
plt.title(title)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
# plt.show()
import matplotlib.pyplot as plt
def plot_acc(history, title=None):
# summarize history for accuracy
if not isinstance(history, dict):
history = history.history
plt.plot(history['acc'])
plt.plot(history['val_acc'])
if title is not None:
plt.title(title)
plt.ylabel('Accracy')
plt.xlabel('Epoch')
plt.legend(['Training data', 'Validation data'], loc=0)
# plt.show()
def plot_loss(history, title=None):
# summarize history for loss
if not isinstance(history, dict):
history = history.history
plt.plot(history['loss'])
plt.plot(history['val_loss'])
if title is not None:
plt.title(title)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training data', 'Validation data'], loc=0)
# plt.show()
def main(Pd_l=[0.0, 0.0]):
Nh_l = [100, 50]
number_of_class = 10
Nout = number_of_class
(X_train, Y_train), (X_test, Y_test) = Data_func()
model = DNN(X_train.shape[1], Nh_l, Pd_l=Pd_l, Nout=Nout)
history = model.fit(X_train, Y_train, epochs=100, batch_size=100, validation_split=0.2)
performance_test = model.evaluate(X_test, Y_test, batch_size=100)
print('Test Loss and Accuracy ->', performance_test)
plot_acc(history)
plt.show()
plot_loss(history=history)
plt.show()
if __name__ == '__main__':
main(Pd_l=[0.0, 0.0])
if __name__ == '__main__':
main(Pd_l=[0.05, 0.5])
```
|
github_jupyter
|
import numpy as np
from keras import datasets
from keras.utils import np_utils
def Data_func():
(X_train, y_train), (X_test, y_test) = datasets.cifar10.load_data()
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
L, W, H, C = X_train.shape
X_train = X_train.reshape(-1, W * H * C)
X_test = X_test.reshape(-1, W * H * C)
X_train = X_train / 255.0
X_test = X_test / 255.0
return (X_train, Y_train), (X_test, Y_test)
from keras import layers, models
class DNN(models.Sequential):
def __init__(self, Nin, Nh_l, Pd_l, Nout):
super().__init__()
self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1'))
self.add(layers.Dropout(Pd_l[0]))
self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2'))
self.add(layers.Dropout(Pd_l[1]))
self.add(layers.Dense(Nout, activation='softmax'))
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
import matplotlib.pyplot as plt
def plot_acc(history, title=None):
# summarize history for accuracy
if not isinstance(history, dict):
history = history.history
plt.plot(history['acc'])
plt.plot(history['val_acc'])
if title is not None:
plt.title(title)
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
# plt.show()
def plot_loss(history, title=None):
# summarize history for loss
if not isinstance(history, dict):
history = history.history
plt.plot(history['loss'])
plt.plot(history['val_loss'])
if title is not None:
plt.title(title)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
# plt.show()
import matplotlib.pyplot as plt
def plot_acc(history, title=None):
# summarize history for accuracy
if not isinstance(history, dict):
history = history.history
plt.plot(history['acc'])
plt.plot(history['val_acc'])
if title is not None:
plt.title(title)
plt.ylabel('Accracy')
plt.xlabel('Epoch')
plt.legend(['Training data', 'Validation data'], loc=0)
# plt.show()
def plot_loss(history, title=None):
# summarize history for loss
if not isinstance(history, dict):
history = history.history
plt.plot(history['loss'])
plt.plot(history['val_loss'])
if title is not None:
plt.title(title)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training data', 'Validation data'], loc=0)
# plt.show()
def main(Pd_l=[0.0, 0.0]):
Nh_l = [100, 50]
number_of_class = 10
Nout = number_of_class
(X_train, Y_train), (X_test, Y_test) = Data_func()
model = DNN(X_train.shape[1], Nh_l, Pd_l=Pd_l, Nout=Nout)
history = model.fit(X_train, Y_train, epochs=100, batch_size=100, validation_split=0.2)
performance_test = model.evaluate(X_test, Y_test, batch_size=100)
print('Test Loss and Accuracy ->', performance_test)
plot_acc(history)
plt.show()
plot_loss(history=history)
plt.show()
if __name__ == '__main__':
main(Pd_l=[0.0, 0.0])
if __name__ == '__main__':
main(Pd_l=[0.05, 0.5])
| 0.87584 | 0.913561 |
## Downloading specific files.
```
!pip install boto3
import boto3
import h5py
import pandas as pd
from botocore.handlers import disable_signing
resource = boto3.resource('s3')
resource.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)
bucket=resource.Bucket('sevir')
objs=bucket.objects.filter(Prefix='')
for o in objs:
if o.key == '/data/vil/2019/SEVIR_VIL_STORMEVENTS_2019_0101_0630.h5':
print(o.key)
satellite = pd.read_csv("/content/drive/MyDrive/CATALOG.csv")
files = list(satellite[satellite.event_id == 781628].file_name)
event_subset = satellite.loc[satellite['event_id'].isin([781628])]
event_subset = event_subset.loc[~event_subset['img_type'].isin(['vis'])]
event_subset
event_subset.to_csv('/content/event_subset.csv')
!ls
print(files)
for file in files:
key = 'data/' + file
print(key)
filename = file.split('/')
bucket.download_file(key,filename[2])
!ls
!mv SEVIR_IR069_STORMEVENTS_2018_0701_1231.h5 /content/sample_data/ir069/2018
!mv SEVIR_IR107_STORMEVENTS_2018_0701_1231.h5 /content/sample_data/ir107/2018
!mv SEVIR_LGHT_ALLEVENTS_2018_0801_0901.h5 /content/sample_data/lght/2018
!mv SEVIR_VIL_STORMEVENTS_2018_0701_1231.h5 /content/sample_data/vil/2018
!mv SEVIR_VIS_STORMEVENTS_2018_0801_0831.h5 /content/sample_data/vis/2018
!ls
```
## Generating Test Data
```
!python /content/make_synrad_dataset.py --sevir_data /content/sample_data/ --sevir_catalog /content/drive/MyDrive/Cat.csv --output_location /content/drive/MyDrive/Output_synrad
```
## Downloading pre-trained models.
```
import pandas as pd
import urllib.request
import os
os.environ["HDF5_USE_FILE_LOCKING"]='FALSE'
import sys
import h5py
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def main():
model_info = pd.read_csv('/content/drive/MyDrive/neurips-2020-sevir-master/models/model_urls.csv')
for i,r in model_info.iterrows():
print(f'Downloading {r.model}...')
download_file(r.url,f'{r.application}/{r.model}')
def download_file(url,filename):
print(f'wget {url}')
os.system(f'wget {url}')
if __name__=='__main__':
main()
```
## Load sample test data
```
# Load weights from best model on val set
mse_weights_file = '/content/mse_weights.h5?dl=0'
mse_model = tf.keras.models.load_model(mse_weights_file,compile=False,custom_objects={"tf": tf})
mse_vgg_weights_file = '/content/mse_vgg_weights.h5?dl=0'
mse_vgg_model = tf.keras.models.load_model(mse_vgg_weights_file,compile=False,custom_objects={"tf": tf})
gan_weights_file = '/content/gan_mae_weights.h5?dl=0'
gan_model = tf.keras.models.load_model(gan_weights_file,compile=False,custom_objects={"tf": tf})
synrad_testing = '/content/drive/MyDrive/Output_synrad/synrad_testing.h5'
import sys
sys.path.append('neurips-2020-sevir/src/')
from synrad_reader import read_data
x_test,y_test = read_data(synrad_testing,end=1000)
```
## Visualize results on some test samples
```
def run_synrad(model,x_test,batch_size=32):
return model.predict([x_test[k] for k in ['ir069','ir107','lght']],batch_size=batch_size)
y_pred_mse = run_synrad(mse_model,x_test)
y_pred_mse_vgg = run_synrad(mse_vgg_model,x_test)
y_pred_gan = run_synrad(gan_model,x_test)
# Plot using default cmap
from display import get_cmap
def visualize_result(y_test,y_preds,idx,ax):
cmap_dict = lambda s: {'cmap':get_cmap(s,encoded=True)[0], 'norm':get_cmap(s,encoded=True)[1],
'vmin':get_cmap(s,encoded=True)[2], 'vmax':get_cmap(s,encoded=True)[3]}
ax[0].imshow(x_test['ir069'][idx,:,:,0],**cmap_dict('ir069'))
ax[1].imshow(x_test['ir107'][idx,:,:,0],**cmap_dict('ir107'))
ax[2].imshow(x_test['lght'][idx,:,:,0],cmap='hot',vmin=0,vmax=10)
ax[3].imshow(y_test['vil'][idx,:,:,0],**cmap_dict('vil'))
for k in range(len(y_preds)):
if isinstance(y_preds[k],(list,)):
yp=y_preds[k][0]
else:
yp=y_preds[k]
ax[4+k].imshow(yp[idx,:,:,0],**cmap_dict('vil'))
for i in range(len(ax)):
ax[i].xaxis.set_ticks([])
ax[i].yaxis.set_ticks([])
test_idx = [123,456,789]
N=len(test_idx)
fig,ax = plt.subplots(N,7,figsize=(12,4))
for k,i in enumerate(test_idx):
visualize_result(y_test,[y_pred_mse,y_pred_mse_vgg,y_pred_gan], i, ax[k] )
ax[0][0].set_title('Input ir069')
ax[0][1].set_title('Input ir107')
ax[0][2].set_title('Input lght')
ax[0][3].set_title('Truth')
ax[0][4].set_title('Output\nMSE Loss')
ax[0][5].set_title('Output\nMSE+VGG Loss')
ax[0][6].set_title('Output\nGAN+MAE Loss')
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.05,
wspace=0.35)
```
|
github_jupyter
|
!pip install boto3
import boto3
import h5py
import pandas as pd
from botocore.handlers import disable_signing
resource = boto3.resource('s3')
resource.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)
bucket=resource.Bucket('sevir')
objs=bucket.objects.filter(Prefix='')
for o in objs:
if o.key == '/data/vil/2019/SEVIR_VIL_STORMEVENTS_2019_0101_0630.h5':
print(o.key)
satellite = pd.read_csv("/content/drive/MyDrive/CATALOG.csv")
files = list(satellite[satellite.event_id == 781628].file_name)
event_subset = satellite.loc[satellite['event_id'].isin([781628])]
event_subset = event_subset.loc[~event_subset['img_type'].isin(['vis'])]
event_subset
event_subset.to_csv('/content/event_subset.csv')
!ls
print(files)
for file in files:
key = 'data/' + file
print(key)
filename = file.split('/')
bucket.download_file(key,filename[2])
!ls
!mv SEVIR_IR069_STORMEVENTS_2018_0701_1231.h5 /content/sample_data/ir069/2018
!mv SEVIR_IR107_STORMEVENTS_2018_0701_1231.h5 /content/sample_data/ir107/2018
!mv SEVIR_LGHT_ALLEVENTS_2018_0801_0901.h5 /content/sample_data/lght/2018
!mv SEVIR_VIL_STORMEVENTS_2018_0701_1231.h5 /content/sample_data/vil/2018
!mv SEVIR_VIS_STORMEVENTS_2018_0801_0831.h5 /content/sample_data/vis/2018
!ls
!python /content/make_synrad_dataset.py --sevir_data /content/sample_data/ --sevir_catalog /content/drive/MyDrive/Cat.csv --output_location /content/drive/MyDrive/Output_synrad
import pandas as pd
import urllib.request
import os
os.environ["HDF5_USE_FILE_LOCKING"]='FALSE'
import sys
import h5py
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def main():
model_info = pd.read_csv('/content/drive/MyDrive/neurips-2020-sevir-master/models/model_urls.csv')
for i,r in model_info.iterrows():
print(f'Downloading {r.model}...')
download_file(r.url,f'{r.application}/{r.model}')
def download_file(url,filename):
print(f'wget {url}')
os.system(f'wget {url}')
if __name__=='__main__':
main()
# Load weights from best model on val set
mse_weights_file = '/content/mse_weights.h5?dl=0'
mse_model = tf.keras.models.load_model(mse_weights_file,compile=False,custom_objects={"tf": tf})
mse_vgg_weights_file = '/content/mse_vgg_weights.h5?dl=0'
mse_vgg_model = tf.keras.models.load_model(mse_vgg_weights_file,compile=False,custom_objects={"tf": tf})
gan_weights_file = '/content/gan_mae_weights.h5?dl=0'
gan_model = tf.keras.models.load_model(gan_weights_file,compile=False,custom_objects={"tf": tf})
synrad_testing = '/content/drive/MyDrive/Output_synrad/synrad_testing.h5'
import sys
sys.path.append('neurips-2020-sevir/src/')
from synrad_reader import read_data
x_test,y_test = read_data(synrad_testing,end=1000)
def run_synrad(model,x_test,batch_size=32):
return model.predict([x_test[k] for k in ['ir069','ir107','lght']],batch_size=batch_size)
y_pred_mse = run_synrad(mse_model,x_test)
y_pred_mse_vgg = run_synrad(mse_vgg_model,x_test)
y_pred_gan = run_synrad(gan_model,x_test)
# Plot using default cmap
from display import get_cmap
def visualize_result(y_test,y_preds,idx,ax):
cmap_dict = lambda s: {'cmap':get_cmap(s,encoded=True)[0], 'norm':get_cmap(s,encoded=True)[1],
'vmin':get_cmap(s,encoded=True)[2], 'vmax':get_cmap(s,encoded=True)[3]}
ax[0].imshow(x_test['ir069'][idx,:,:,0],**cmap_dict('ir069'))
ax[1].imshow(x_test['ir107'][idx,:,:,0],**cmap_dict('ir107'))
ax[2].imshow(x_test['lght'][idx,:,:,0],cmap='hot',vmin=0,vmax=10)
ax[3].imshow(y_test['vil'][idx,:,:,0],**cmap_dict('vil'))
for k in range(len(y_preds)):
if isinstance(y_preds[k],(list,)):
yp=y_preds[k][0]
else:
yp=y_preds[k]
ax[4+k].imshow(yp[idx,:,:,0],**cmap_dict('vil'))
for i in range(len(ax)):
ax[i].xaxis.set_ticks([])
ax[i].yaxis.set_ticks([])
test_idx = [123,456,789]
N=len(test_idx)
fig,ax = plt.subplots(N,7,figsize=(12,4))
for k,i in enumerate(test_idx):
visualize_result(y_test,[y_pred_mse,y_pred_mse_vgg,y_pred_gan], i, ax[k] )
ax[0][0].set_title('Input ir069')
ax[0][1].set_title('Input ir107')
ax[0][2].set_title('Input lght')
ax[0][3].set_title('Truth')
ax[0][4].set_title('Output\nMSE Loss')
ax[0][5].set_title('Output\nMSE+VGG Loss')
ax[0][6].set_title('Output\nGAN+MAE Loss')
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.05,
wspace=0.35)
| 0.391057 | 0.247328 |
# Logistic Regression
Notebook version: 2.0 (Nov 21, 2017)
2.1 (Oct 19, 2018)
Author: Jesús Cid Sueiro ([email protected])
Jerónimo Arenas García ([email protected])
Changes: v.1.0 - First version
v.1.1 - Typo correction. Prepared for slide presentation
v.2.0 - Prepared for Python 3.0 (backcompmatible with 2.7)
Assumptions for regression model modified
v.2.1 - Minor changes regarding notation and assumptions
```
from __future__ import print_function
# To visualize plots in the notebook
%matplotlib inline
# Imported libraries
import csv
import random
import matplotlib
import matplotlib.pyplot as plt
import pylab
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import PolynomialFeatures
from sklearn import linear_model
```
# Logistic Regression
## 1. Introduction
### 1.1. Binary classification and decision theory. The MAP criterion
The goal of a classification problem is to assign a *class* or *category* to every *instance* or *observation* of a data collection. Here, we will assume that every instance ${\bf x}$ is an $N$-dimensional vector in $\mathbb{R}^N$, and that the class $y$ of sample ${\bf x}$ is an element of a binary set ${\mathcal Y} = \{0, 1\}$. The goal of a classifier is to predict the true value of $y$ after observing ${\bf x}$.
We will denote as $\hat{y}$ the classifier output or *decision*. If $y=\hat{y}$, the decision is a *hit*, otherwise $y\neq \hat{y}$ and the decision is an *error*.
Decision theory provides a solution to the classification problem in situations where the relation between instance ${\bf x}$ and its class $y$ is given by a known probabilistic model: assume that every tuple $({\bf x}, y)$ is an outcome of a random vector $({\bf X}, Y)$ with joint distribution $p_{{\bf X},Y}({\bf x}, y)$. A natural criteria for classification is to select predictor $\hat{Y}=f({\bf x})$ in such a way that the probability or error, $P\{\hat{Y} \neq Y\}$ is minimum. Noting that
$$
P\{\hat{Y} \neq Y\} = \int P\{\hat{Y} \neq Y | {\bf x}\} p_{\bf X}({\bf x}) d{\bf x}
$$
the optimal decision is got if, for every sample ${\bf x}$, we make decision minimizing the conditional error probability:
\begin{align}
\hat{y}^* &= \arg\min_{\hat{y}} P\{\hat{y} \neq Y |{\bf x}\} \\
&= \arg\max_{\hat{y}} P\{\hat{y} = Y |{\bf x}\} \\
\end{align}
Thus, the optimal decision rule can be expressed as
$$
P_{Y|{\bf X}}(1|{\bf x}) \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0}\quad P_{Y|{\bf X}}(0|{\bf x})
$$
or, equivalently
$$
P_{Y|{\bf X}}(1|{\bf x}) \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0}\quad \frac{1}{2}
$$
The classifier implementing this decision rule is usually named MAP (*Maximum A Posteriori*). As we have seen, the MAP classifier minimizes the error probability for binary classification, but the result can also be generalized to multiclass classification problems.
### 1.2. Parametric classification.
Classical decision theory is grounded on the assumption that the probabilistic model relating the observed sample ${\bf X}$ and the true hypothesis $Y$ is known. Unfortunately, this is unrealistic in many applications, where the only available information to construct the classifier is a dataset $\mathcal D = \{{\bf x}^{(k)}, y^{(k)}\}_{k=0}^{K-1}$ of instances and their respective class labels.
A more realistic formulation of the classification problem is the following: given a dataset $\mathcal D = \{({\bf x}^{(k)}, y^{(k)}) \in {\mathbb{R}}^N \times {\mathcal Y}, \, k=0,\ldots,{K-1}\}$ of independent and identically distributed (i.i.d.) samples from an ***unknown*** distribution $p_{{\bf X},Y}({\bf x}, y)$, predict the class $y$ of a new sample ${\bf x}$ with the minimum probability of error.
Since the probabilistic model generating the data is unknown, the MAP decision rule cannot be applied. However, many classification algorithms use the dataset to obtain an estimate of the posterior class probabilities, and apply it to implement an approximation to the MAP decision maker.
Parametric classifiers based on this idea assume, additionally, that the posterior class probabilty satisfies some parametric formula:
$$
P_{Y|X}(1|{\bf x},{\bf w}) = f_{\bf w}({\bf x})
$$
where ${\bf w}$ is a vector of parameters. Given the expression of the MAP decision maker, classification consists in comparing the value of $f_{\bf w}({\bf x})$ with the threshold $\frac{1}{2}$, and each parameter vector would be associated to a different decision maker.
In practice, the dataset ${\mathcal S}$ is used to select a particular parameter vector $\hat{\bf w}$ according to certain criterion. Accordingly, the decision rule becomes
$$
f_{\hat{\bf w}}({\bf x}) \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0}\quad \frac{1}{2}
$$
In this lesson, we explore one of the most popular model-based parametric classification methods: **logistic regression**.
<img src="./figs/parametric_decision.png", width=400>
## 2. Logistic regression.
### 2.1. The logistic function
The logistic regression model assumes that the binary class label $Y \in \{0,1\}$ of observation $X\in \mathbb{R}^N$ satisfies the expression.
$$P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g({\bf w}^\intercal{\bf x})$$
$$P_{Y|{\bf,X}}(0|{\bf x}, {\bf w}) = 1-g({\bf w}^\intercal{\bf x})$$
where ${\bf w}$ is a parameter vector and $g(·)$ is the *logistic* function, which is defined by
$$g(t) = \frac{1}{1+\exp(-t)}$$
It is straightforward to see that the logistic function has the following properties:
- **P1**: Probabilistic output: $\quad 0 \le g(t) \le 1$
- **P2**: Symmetry: $\quad g(-t) = 1-g(t)$
- **P3**: Monotonicity: $\quad g'(t) = g(t)·[1-g(t)] \ge 0$
In the following we define a logistic function in python, and use it to plot a graphical representation.
**Exercise 1**: Verify properties P2 and P3.
**Exercise 2**: Implement a function to compute the logistic function, and use it to plot such function in the inverval $[-6,6]$.
```
# Define the logistic function
def logistic(t):
#<SOL>
return 1.0 / (1 + np.exp(-t))
#</SOL>
# Plot the logistic function
t = np.arange(-6, 6, 0.1)
z = logistic(t)
plt.plot(t, z)
plt.xlabel('$t$', fontsize=14)
plt.ylabel('$g(t)$', fontsize=14)
plt.title('The logistic function')
plt.grid()
```
### 2.2. Classifiers based on the logistic model.
The MAP classifier under a logistic model will have the form
$$P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g({\bf w}^\intercal{\bf x}) \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0} \quad \frac{1}{2} $$
Therefore
$$
2 \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0} \quad
1 + \exp(-{\bf w}^\intercal{\bf x}) $$
which is equivalent to
$${\bf w}^\intercal{\bf x}
\quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0}\quad
0 $$
Therefore, the classifiers based on the logistic model are given by linear decision boundaries passing through the origin, ${\bf x} = {\bf 0}$.
```
# Weight vector:
w = [4, 8] # Try different weights
# Create a rectangular grid.
x_min = -1
x_max = 1
dx = x_max - x_min
h = float(dx) / 200
xgrid = np.arange(x_min, x_max, h)
xx0, xx1 = np.meshgrid(xgrid, xgrid)
# Compute the logistic map for the given weights
Z = logistic(w[0]*xx0 + w[1]*xx1)
# Plot the logistic map
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(xx0, xx1, Z, cmap=plt.cm.copper)
ax.contour(xx0, xx1, Z, levels=[0.5], colors='b', linewidths=(3,))
plt.xlabel('$x_0$')
plt.ylabel('$x_1$')
ax.set_zlabel('P(1|x,w)')
plt.show()
```
The next code fragment represents the output of the same classifier, representing the output of the logistic function in the $x_0$-$x_1$ plane, encoding the value of the logistic function in the representation color.
```
CS = plt.contourf(xx0, xx1, Z)
CS2 = plt.contour(CS, levels=[0.5],
colors='m', linewidths=(3,))
plt.xlabel('$x_0$')
plt.ylabel('$x_1$')
plt.colorbar(CS, ticks=[0, 0.5, 1])
plt.show()
```
### 3.3. Nonlinear classifiers.
The logistic model can be extended to construct non-linear classifiers by using non-linear data transformations. A general form for a nonlinear logistic regression model is
$$P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g[{\bf w}^\intercal{\bf z}({\bf x})] $$
where ${\bf z}({\bf x})$ is an arbitrary nonlinear transformation of the original variables. The boundary decision in that case is given by equation
$$
{\bf w}^\intercal{\bf z} = 0
$$
** Exercise 2**: Modify the code above to generate a 3D surface plot of the polynomial logistic regression model given by
$$
P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g(1 + 10 x_0 + 10 x_1 - 20 x_0^2 + 5 x_0 x_1 + x_1^2)
$$
```
# Weight vector:
w = [1, 10, 10, -20, 5, 1] # Try different weights
# Create a regtangular grid.
x_min = -1
x_max = 1
dx = x_max - x_min
h = float(dx) / 200
xgrid = np.arange(x_min, x_max, h)
xx0, xx1 = np.meshgrid(xgrid, xgrid)
# Compute the logistic map for the given weights
# Z = <FILL IN>
Z = logistic(w[0] + w[1]*xx0 + w[2]*xx1 + w[3]*np.multiply(xx0,xx0) + w[4]*np.multiply(xx0,xx1) + w[5]*np.multiply(xx1,xx1))
# Plot the logistic map
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(xx0, xx1, Z, cmap=plt.cm.copper)
plt.xlabel('$x_0$')
plt.ylabel('$x_1$')
ax.set_zlabel('P(1|x,w)')
plt.show()
CS = plt.contourf(xx0, xx1, Z)
CS2 = plt.contour(CS, levels=[0.5],
colors='m', linewidths=(3,))
plt.xlabel('$x_0$')
plt.ylabel('$x_1$')
plt.colorbar(CS, ticks=[0, 0.5, 1])
plt.show()
```
## 3. Inference
Remember that the idea of parametric classification is to use the training data set $\mathcal D = \{({\bf x}^{(k)}, y^{(k)}) \in {\mathbb{R}}^N \times \{0,1\}, k=0,\ldots,{K-1}\}$ to set the parameter vector ${\bf w}$ according to certain criterion. Then, the estimate $\hat{\bf w}$ can be used to compute the label prediction for any new observation as
$$\hat{y} = \arg\max_y P_{Y|{\bf X}}(y|{\bf x},\hat{\bf w}).$$
<img src="figs/parametric_decision.png", width=400>
We need still to choose a criterion to optimize with the selection of the parameter vector. In the notebook, we will discuss two different approaches to the estimation of ${\bf w}$:
* Maximum Likelihood (ML): $\hat{\bf w}_{\text{ML}} = \arg\max_{\bf w} P_{{\mathcal D}|{\bf W}}({\mathcal D}|{\bf w})$
* Maximum *A Posteriori* (MAP): $\hat{\bf w}_{\text{MAP}} = \arg\max_{\bf w} p_{{\bf W}|{\mathcal D}}({\bf w}|{\mathcal D})$
For the mathematical derivation of the logistic regression algorithm, the following representation of the logistic model will be useful: noting that
$$P_{Y|{\bf X}}(0|{\bf x}, {\bf w}) = 1-g[{\bf w}^\intercal{\bf z}({\bf x})]
= g[-{\bf w}^\intercal{\bf z}({\bf x})]$$
we can write
$$P_{Y|{\bf X}}(y|{\bf x}, {\bf w}) = g[\overline{y}{\bf w}^\intercal{\bf z}({\bf x})]$$
where $\overline{y} = 2y-1$ is a *symmetrized label* ($\overline{y}\in\{-1, 1\}$).
### 3.1. Model assumptions
In the following, we will make the following assumptions:
- **A1**. (Logistic Regression): We assume a logistic model for the *a posteriori* probability of ${Y=1}$ given ${\bf X}$, i.e.,
$$P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g[{\bf w}^\intercal{\bf z}({\bf x})].$$
- **A2**. All samples in ${\mathcal D}$ have been generated by the same distribution, $p_{{\bf X}, Y}({\bf x}, y)$.
- **A3**. Input variables $\bf x$ do not depend on $\bf w$. This implies that
$$p({\bf x}|{\bf w}) = p({\bf x})$$
- **A4**. Targets $y^{(0)}, \cdots, y^{(K-1)}$ are statistically independent given $\bf w$ and the inputs ${\bf x}^{(0)}, \cdots, {\bf x}^{(K-1)}$, that is:
$$p(y^{(0)}, \cdots, y^{(K-1)} | {\bf x}^{(0)}, \cdots, {\bf x}^{(K-1)}, {\bf w}) = \prod_{k=0}^{K-1} p(s^{(k)} | {\bf x}^{(k)}, {\bf w})$$
### 3.2. ML estimation.
The ML estimate is defined as
$$\hat{\bf w}_{\text{ML}} = \arg\max_{\bf w} P_{{\mathcal D}|{\bf W}}({\mathcal D}|{\bf w})$$
Ussing assumptions A2 and A3 above, we have that
\begin{align}
P_{{\mathcal D}|{\bf W}}({\mathcal D}|{\bf w}) & = p(y^{(0)}, \cdots, y^{(K-1)},{\bf x}^{(0)}, \cdots, {\bf x}^{(K-1)}| {\bf w}) \\
& = P(y^{(0)}, \cdots, y^{(K-1)}|{\bf x}^{(0)}, \cdots, {\bf x}^{(K-1)}, {\bf w}) \; p({\bf x}^{(0)}, \cdots, {\bf x}^{(K-1)}| {\bf w}) \\
& = P(y^{(0)}, \cdots, y^{(K-1)}|{\bf x}^{(0)}, \cdots, {\bf x}^{(K-1)}, {\bf w}) \; p({\bf x}^{(0)}, \cdots, {\bf x}^{(K-1)})\end{align}
Finally, using assumption A4, we can formulate the ML estimation of $\bf w$ as the resolution of the following optimization problem
\begin{align}
\hat {\bf w}_\text{ML} & = \arg \max_{\bf w} p(y^{(0)}, \cdots, y^{(K-1)}|{\bf x}^{(0)}, \cdots, {\bf x}^{(K-1)}, {\bf w}) \\
& = \arg \max_{\bf w} \prod_{k=0}^{K-1} P(y^{(k)}|{\bf x}^{(k)}, {\bf w}) \\
& = \arg \max_{\bf w} \sum_{k=0}^{K-1} \log P(y^{(k)}|{\bf x}^{(k)}, {\bf w}) \\
& = \arg \min_{\bf w} \sum_{k=0}^{K-1} - \log P(y^{(k)}|{\bf x}^{(k)}, {\bf w})
\end{align}
where the arguments of the maximization or minimization problems of the last three lines are usually referred to as the **likelihood**, **log-likelihood** $\left[L(\bf w)\right]$, and **negative log-likelihood** $\left[\text{NLL}(\bf w)\right]$, respectively.
Now, using A1 (the logistic model)
\begin{align}
\text{NLL}({\bf w})
&= - \sum_{k=0}^{K-1}\log\left[g\left(\overline{y}^{(k)}{\bf w}^\intercal {\bf z}^{(k)}\right)\right] \\
&= \sum_{k=0}^{K-1}\log\left[1+\exp\left(-\overline{y}^{(k)}{\bf w}^\intercal {\bf z}^{(k)}\right)\right]
\end{align}
where ${\bf z}^{(k)}={\bf z}({\bf x}^{(k)})$.
It can be shown that $\text{NLL}({\bf w})$ is a convex and differentiable function of ${\bf w}$. Therefore, its minimum is a point with zero gradient.
\begin{align}
\nabla_{\bf w} \text{NLL}(\hat{\bf w}_{\text{ML}})
&= - \sum_{k=0}^{K-1}
\frac{\exp\left(-\overline{y}^{(k)}\hat{\bf w}_{\text{ML}}^\intercal {\bf z}^{(k)}\right) \overline{y}^{(k)} {\bf z}^{(k)}}
{1+\exp\left(-\overline{y}^{(k)}\hat{\bf w}_{\text{ML}}^\intercal {\bf z}^{(k)}
\right)} = \\
&= - \sum_{k=0}^{K-1} \left[y^{(k)}-g(\hat{\bf w}_{\text{ML}}^T {\bf z}^{(k)})\right] {\bf z}^{(k)} = 0
\end{align}
Unfortunately, $\hat{\bf w}_{\text{ML}}$ cannot be taken out from the above equation, and some iterative optimization algorithm must be used to search for the minimum.
### 3.2. Gradient descent.
A simple iterative optimization algorithm is <a href = https://en.wikipedia.org/wiki/Gradient_descent> gradient descent</a>.
\begin{align}
{\bf w}_{n+1} = {\bf w}_n - \rho_n \nabla_{\bf w} L({\bf w}_n)
\end{align}
where $\rho_n >0$ is the *learning step*.
Applying the gradient descent rule to logistic regression, we get the following algorithm:
\begin{align}
{\bf w}_{n+1} &= {\bf w}_n
+ \rho_n \sum_{k=0}^{K-1} \left[y^{(k)}-g({\bf w}_n^\intercal {\bf z}^{(k)})\right] {\bf z}^{(k)}
\end{align}
Defining vectors
\begin{align}
{\bf y} &= [y^{(0)},\ldots,y^{(K-1)}]^\intercal \\
\hat{\bf p}_n &= [g({\bf w}_n^\intercal {\bf z}^{(0)}), \ldots, g({\bf w}_n^\intercal {\bf z}^{(K-1)})]^\intercal
\end{align}
and matrix
\begin{align}
{\bf Z} = \left[{\bf z}^{(0)},\ldots,{\bf z}^{(K-1)}\right]^\intercal
\end{align}
we can write
\begin{align}
{\bf w}_{n+1} &= {\bf w}_n
+ \rho_n {\bf Z}^\intercal \left({\bf y}-\hat{\bf p}_n\right)
\end{align}
In the following, we will explore the behavior of the gradient descend method using the Iris Dataset.
#### 3.2.1 Example: Iris Dataset.
As an illustration, consider the <a href = http://archive.ics.uci.edu/ml/datasets/Iris> Iris dataset </a>, taken from the <a href=http://archive.ics.uci.edu/ml/> UCI Machine Learning repository</a>. This data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant (*setosa*, *versicolor* or *virginica*). Each instance contains 4 measurements of given flowers: sepal length, sepal width, petal length and petal width, all in centimeters.
We will try to fit the logistic regression model to discriminate between two classes using only two attributes.
First, we load the dataset and split them in training and test subsets.
```
# Adapted from a notebook by Jason Brownlee
def loadDataset(filename, split):
xTrain = []
cTrain = []
xTest = []
cTest = []
with open(filename, 'r') as csvfile:
lines = csv.reader(csvfile)
dataset = list(lines)
for i in range(len(dataset)-1):
for y in range(4):
dataset[i][y] = float(dataset[i][y])
item = dataset[i]
if random.random() < split:
xTrain.append(item[0:4])
cTrain.append(item[4])
else:
xTest.append(item[0:4])
cTest.append(item[4])
return xTrain, cTrain, xTest, cTest
xTrain_all, cTrain_all, xTest_all, cTest_all = loadDataset('iris.data', 0.66)
nTrain_all = len(xTrain_all)
nTest_all = len(xTest_all)
print('Train:', nTrain_all)
print('Test:', nTest_all)
```
Now, we select two classes and two attributes.
```
# Select attributes
i = 0 # Try 0,1,2,3
j = 1 # Try 0,1,2,3 with j!=i
# Select two classes
c0 = 'Iris-versicolor'
c1 = 'Iris-virginica'
# Select two coordinates
ind = [i, j]
# Take training test
X_tr = np.array([[xTrain_all[n][i] for i in ind] for n in range(nTrain_all)
if cTrain_all[n]==c0 or cTrain_all[n]==c1])
C_tr = [cTrain_all[n] for n in range(nTrain_all)
if cTrain_all[n]==c0 or cTrain_all[n]==c1]
Y_tr = np.array([int(c==c1) for c in C_tr])
n_tr = len(X_tr)
# Take test set
X_tst = np.array([[xTest_all[n][i] for i in ind] for n in range(nTest_all)
if cTest_all[n]==c0 or cTest_all[n]==c1])
C_tst = [cTest_all[n] for n in range(nTest_all)
if cTest_all[n]==c0 or cTest_all[n]==c1]
Y_tst = np.array([int(c==c1) for c in C_tst])
n_tst = len(X_tst)
```
#### 3.2.2. Data normalization
Normalization of data is a common pre-processing step in many machine learning algorithms. Its goal is to get a dataset where all input coordinates have a similar scale. Learning algorithms usually show less instabilities and convergence problems when data are normalized.
We will define a normalization function that returns a training data matrix with zero sample mean and unit sample variance.
```
def normalize(X, mx=None, sx=None):
# Compute means and standard deviations
if mx is None:
mx = np.mean(X, axis=0)
if sx is None:
sx = np.std(X, axis=0)
# Normalize
X0 = (X-mx)/sx
return X0, mx, sx
```
Now, we can normalize training and test data. Observe in the code that the same transformation should be applied to training and test data. This is the reason why normalization with the test data is done using the means and the variances computed with the training set.
```
# Normalize data
Xn_tr, mx, sx = normalize(X_tr)
Xn_tst, mx, sx = normalize(X_tst, mx, sx)
```
The following figure generates a plot of the normalized training data.
```
# Separate components of x into different arrays (just for the plots)
x0c0 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==0]
x1c0 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==0]
x0c1 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==1]
x1c1 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==1]
# Scatterplot.
labels = {'Iris-setosa': 'Setosa',
'Iris-versicolor': 'Versicolor',
'Iris-virginica': 'Virginica'}
plt.plot(x0c0, x1c0,'r.', label=labels[c0])
plt.plot(x0c1, x1c1,'g+', label=labels[c1])
plt.xlabel('$x_' + str(ind[0]) + '$')
plt.ylabel('$x_' + str(ind[1]) + '$')
plt.legend(loc='best')
plt.axis('equal')
plt.show()
```
In order to apply the gradient descent rule, we need to define two methods:
- A `fit` method, that receives the training data and returns the model weights and the value of the negative log-likelihood during all iterations.
- A `predict` method, that receives the model weight and a set of inputs, and returns the posterior class probabilities for that input, as well as their corresponding class predictions.
```
def logregFit(Z_tr, Y_tr, rho, n_it):
# Data dimension
n_dim = Z_tr.shape[1]
# Initialize variables
nll_tr = np.zeros(n_it)
pe_tr = np.zeros(n_it)
Y_tr2 = 2*Y_tr - 1 # Transform labels into binary symmetric.
w = np.random.randn(n_dim,1)
# Running the gradient descent algorithm
for n in range(n_it):
# Compute posterior probabilities for weight w
p1_tr = logistic(np.dot(Z_tr, w))
# Compute negative log-likelihood
# (note that this is not required for the weight update, only for nll tracking)
nll_tr[n] = np.sum(np.log(1 + np.exp(-np.dot(Y_tr2*Z_tr, w))))
# Update weights
w += rho*np.dot(Z_tr.T, Y_tr - p1_tr)
return w, nll_tr
def logregPredict(Z, w):
# Compute posterior probability of class 1 for weights w.
p = logistic(np.dot(Z, w)).flatten()
# Class
D = [int(round(pn)) for pn in p]
return p, D
```
We can test the behavior of the gradient descent method by fitting a logistic regression model with ${\bf z}({\bf x}) = (1, {\bf x}^\intercal)^\intercal$.
```
# Parameters of the algorithms
rho = float(1)/50 # Learning step
n_it = 200 # Number of iterations
# Compute Z's
Z_tr = np.c_[np.ones(n_tr), Xn_tr]
Z_tst = np.c_[np.ones(n_tst), Xn_tst]
n_dim = Z_tr.shape[1]
# Convert target arrays to column vectors
Y_tr2 = Y_tr[np.newaxis].T
Y_tst2 = Y_tst[np.newaxis].T
# Running the gradient descent algorithm
w, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it)
# Classify training and test data
p_tr, D_tr = logregPredict(Z_tr, w)
p_tst, D_tst = logregPredict(Z_tst, w)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
# NLL plot.
plt.plot(range(n_it), nll_tr,'b.:', label='Train')
plt.xlabel('Iteration')
plt.ylabel('Negative Log-Likelihood')
plt.legend()
print('The optimal weights are:')
print(w)
print('The final error rates are:')
print('- Training:', pe_tr)
print('- Test:', pe_tst)
print('The NLL after training is', nll_tr[len(nll_tr)-1])
```
#### 3.2.3. Free parameters
Under certain conditions, the gradient descent method can be shown to converge asymptotically (i.e. as the number of iterations goes to infinity) to the ML estimate of the logistic model. However, in practice, the final estimate of the weights ${\bf w}$ depend on several factors:
- Number of iterations
- Initialization
- Learning step
**Exercise**: Visualize the variability of gradient descent caused by initializations. To do so, fix the number of iterations to 200 and the learning step, and execute the gradient descent 100 times, storing the training error rate of each execution. Plot the histogram of the error rate values.
Note that you can do this exercise with a loop over the 100 executions, including the code in the previous code slide inside the loop, with some proper modifications. To plot a histogram of the values in array `p` with `n`bins, you can use `plt.hist(p, n)`
##### 3.2.3.1. Learning step
The learning step, $\rho$, is a free parameter of the algorithm. Its choice is critical for the convergence of the algorithm. Too large values of $\rho$ make the algorithm diverge. For too small values, the convergence gets very slow and more iterations are required for a good convergence.
**Exercise 3**: Observe the evolution of the negative log-likelihood with the number of iterations for different values of $\rho$. It is easy to check that, for large enough $\rho$, the gradient descent method does not converge. Can you estimate (through manual observation) an approximate value of $\rho$ stating a boundary between convergence and divergence?
**Exercise 4**: In this exercise we explore the influence of the learning step more sistematically. Use the code in the previouse exercises to compute, for every value of $\rho$, the average error rate over 100 executions. Plot the average error rate vs. $\rho$.
Note that you should explore the values of $\rho$ in a logarithmic scale. For instance, you can take $\rho = 1, 1/10, 1/100, 1/1000, \ldots$
In practice, the selection of $\rho$ may be a matter of trial an error. Also there is some theoretical evidence that the learning step should decrease along time up to cero, and the sequence $\rho_n$ should satisfy two conditions:
- C1: $\sum_{n=0}^{\infty} \rho_n^2 < \infty$ (decrease slowly)
- C2: $\sum_{n=0}^{\infty} \rho_n = \infty$ (but not too slowly)
For instance, we can take $\rho_n= 1/n$. Another common choice is $\rho_n = \alpha/(1+\beta n)$ where $\alpha$ and $\beta$ are also free parameters that can be selected by trial and error with some heuristic method.
#### 3.2.4. Visualizing the posterior map.
We can also visualize the posterior probability map estimated by the logistic regression model for the estimated weights.
```
# Create a regtangular grid.
x_min, x_max = Xn_tr[:, 0].min(), Xn_tr[:, 0].max()
y_min, y_max = Xn_tr[:, 1].min(), Xn_tr[:, 1].max()
dx = x_max - x_min
dy = y_max - y_min
h = dy /400
xx, yy = np.meshgrid(np.arange(x_min - 0.1 * dx, x_max + 0.1 * dx, h),
np.arange(y_min - 0.1 * dx, y_max + 0.1 * dy, h))
X_grid = np.array([xx.ravel(), yy.ravel()]).T
# Compute Z's
Z_grid = np.c_[np.ones(X_grid.shape[0]), X_grid]
# Compute the classifier output for all samples in the grid.
pp, dd = logregPredict(Z_grid, w)
# Paint output maps
pylab.rcParams['figure.figsize'] = 6, 6 # Set figure size
# Put the result into a color plot
plt.plot(x0c0, x1c0,'r.', label=labels[c0])
plt.plot(x0c1, x1c1,'g+', label=labels[c1])
plt.xlabel('$x_' + str(ind[0]) + '$')
plt.ylabel('$x_' + str(ind[1]) + '$')
plt.legend(loc='best')
plt.axis('equal')
pp = pp.reshape(xx.shape)
CS = plt.contourf(xx, yy, pp, cmap=plt.cm.copper)
plt.contour(xx, yy, pp, levels=[0.5],
colors='b', linewidths=(3,))
plt.colorbar(CS, ticks=[0, 0.5, 1])
plt.show()
```
#### 3.2.5. Polynomial Logistic Regression
The error rates of the logistic regression model can be potentially reduced by using polynomial transformations.
To compute the polynomial transformation up to a given degree, we can use the `PolynomialFeatures` method in `sklearn.preprocessing`.
```
# Parameters of the algorithms
rho = float(1)/50 # Learning step
n_it = 500 # Number of iterations
g = 5 # Degree of polynomial
# Compute Z_tr
poly = PolynomialFeatures(degree=g)
Z_tr = poly.fit_transform(Xn_tr)
# Normalize columns (this is useful to make algorithms more stable).)
Zn, mz, sz = normalize(Z_tr[:,1:])
Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)
# Compute Z_tst
Z_tst = poly.fit_transform(Xn_tst)
Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz)
Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)
# Convert target arrays to column vectors
Y_tr2 = Y_tr[np.newaxis].T
Y_tst2 = Y_tst[np.newaxis].T
# Running the gradient descent algorithm
w, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it)
# Classify training and test data
p_tr, D_tr = logregPredict(Z_tr, w)
p_tst, D_tst = logregPredict(Z_tst, w)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
# NLL plot.
plt.plot(range(n_it), nll_tr,'b.:', label='Train')
plt.xlabel('Iteration')
plt.ylabel('Negative Log-Likelihood')
plt.legend()
print('The optimal weights are:')
print(w)
print('The final error rates are:')
print('- Training:', pe_tr)
print('- Test:', pe_tst)
print('The NLL after training is', nll_tr[len(nll_tr)-1])
```
Visualizing the posterior map we can se that the polynomial transformation produces nonlinear decision boundaries.
```
# Compute Z_grid
Z_grid = poly.fit_transform(X_grid)
n_grid = Z_grid.shape[0]
Zn, mz, sz = normalize(Z_grid[:,1:], mz, sz)
Z_grid = np.concatenate((np.ones((n_grid,1)), Zn), axis=1)
# Compute the classifier output for all samples in the grid.
pp, dd = logregPredict(Z_grid, w)
pp = pp.reshape(xx.shape)
# Paint output maps
pylab.rcParams['figure.figsize'] = 6, 6 # Set figure size
plt.plot(x0c0, x1c0,'r.', label=labels[c0])
plt.plot(x0c1, x1c1,'g+', label=labels[c1])
plt.xlabel('$x_' + str(ind[0]) + '$')
plt.ylabel('$x_' + str(ind[1]) + '$')
plt.axis('equal')
plt.legend(loc='best')
CS = plt.contourf(xx, yy, pp, cmap=plt.cm.copper)
plt.contour(xx, yy, pp, levels=[0.5],
colors='b', linewidths=(3,))
plt.colorbar(CS, ticks=[0, 0.5, 1])
plt.show()
```
## 4. Regularization and MAP estimation.
An alternative to the ML estimation of the weights in logistic regression is Maximum A Posteriori estimation. Modelling the logistic regression weights as a random variable with prior distribution $p_{\bf W}({\bf w})$, the MAP estimate is defined as
$$
\hat{\bf w}_{\text{MAP}} = \arg\max_{\bf w} p({\bf w}|{\mathcal D})
$$
The posterior density $p({\bf w}|{\mathcal D})$ is related to the likelihood function and the prior density of the weights, $p_{\bf W}({\bf w})$ through the Bayes rule
$$
p({\bf w}|{\mathcal D}) =
\frac{P\left({\mathcal D}|{\bf w}\right) \; p_{\bf W}({\bf w})}
{p\left({\mathcal D}\right)}
$$
In general, the denominator in this expression cannot be computed analytically. However, it is not required for MAP estimation because it does not depend on ${\bf w}$. Therefore, the MAP solution is given by
\begin{align}
\hat{\bf w}_{\text{MAP}} & = \arg\max_{\bf w} P\left({\mathcal D}|{\bf w}\right) \; p_{\bf W}({\bf w}) \\
& = \arg\max_{\bf w} \left\{ L({\mathbf w}) + \log p_{\bf W}({\bf w})\right\} \\
& = \arg\min_{\bf w} \left\{ \text{NLL}({\mathbf w}) - \log p_{\bf W}({\bf w})\right\}
\end{align}
In the light of this expression, we can conclude that the MAP solution is affected by two terms:
- The likelihood, which takes large values for parameter vectors $\bf w$ that fit well the training data
- The prior distribution of weights $p_{\bf W}({\bf w})$, which expresses our *a priori* preference for some solutions. Usually, we recur to prior distributions that take large values when $\|{\bf w}\|$ is small (associated to smooth classification borders).
We can check that the MAP criterion adds a penalty term to the ML objective, that penalizes parameter vectors for which the prior distribution of weights takes small values.
### 4.1 MAP estimation with Gaussian prior
If we assume that ${\bf W}$ is a zero-mean Gaussian random variable with variance matrix $v{\bf I}$,
$$
p_{\bf W}({\bf w}) = \frac{1}{(2\pi v)^{N/2}} \exp\left(-\frac{1}{2v}\|{\bf w}\|^2\right)
$$
the MAP estimate becomes
\begin{align}
\hat{\bf w}_{\text{MAP}}
&= \arg\min_{\bf w} \left\{L({\bf w}) + \frac{1}{C}\|{\bf w}\|^2
\right\}
\end{align}
where $C = 2v$. Noting that
$$\nabla_{\bf w}\left\{L({\bf w}) + \frac{1}{C}\|{\bf w}\|^2\right\}
= - {\bf Z} \left({\bf y}-\hat{\bf p}_n\right) + \frac{2}{C}{\bf w},
$$
we obtain the following gradient descent rule for MAP estimation
\begin{align}
{\bf w}_{n+1} &= \left(1-\frac{2\rho_n}{C}\right){\bf w}_n
+ \rho_n {\bf Z} \left({\bf y}-\hat{\bf p}_n\right)
\end{align}
### 4.2 MAP estimation with Laplacian prior
If we assume that ${\bf W}$ follows a multivariate zero-mean Laplacian distribution given by
$$
p_{\bf W}({\bf w}) = \frac{1}{(2 C)^{N}} \exp\left(-\frac{1}{C}\|{\bf w}\|_1\right)
$$
(where $\|{\bf w}\|=|w_1|+\ldots+|w_N|$ is the $L_1$ norm of ${\bf w}$), the MAP estimate is
\begin{align}
\hat{\bf w}_{\text{MAP}}
&= \arg\min_{\bf w} \left\{L({\bf w}) + \frac{1}{C}\|{\bf w}\|_1
\right\}
\end{align}
The additional term introduced by the prior in the optimization algorithm is usually named the *regularization term*. It is usually very effective to avoid overfitting when the dimension of the weight vectors is high. Parameter $C$ is named the *inverse regularization strength*.
**Exercise 5**: Derive the gradient descent rules for MAP estimation of the logistic regression weights with Laplacian prior.
## 5. Other optimization algorithms
### 5.1. Stochastic Gradient descent.
Stochastic gradient descent (SGD) is based on the idea of using a single sample at each iteration of the learning algorithm. The SGD rule for ML logistic regression is
\begin{align}
{\bf w}_{n+1} &= {\bf w}_n
+ \rho_n {\bf z}^{(n)} \left(y^{(n)}-\hat{p}^{(n)}_n\right)
\end{align}
Once all samples in the training set have been applied, the algorith can continue by applying the training set several times.
The computational cost of each iteration of SGD is much smaller than that of gradient descent, though it usually needs more iterations to converge.
**Exercise 6**: Modify logregFit to implement an algorithm that applies the SGD rule.
### 5.2. Newton's method
Assume that the function to be minimized, $C({\bf w})$, can be approximated by its second order Taylor series expansion around ${\bf w}_0$
$$
C({\bf w}) \approx C({\bf w}_0)
+ \nabla_{\bf w}^\intercal C({\bf w}_0)({\bf w}-{\bf w}_0)
+ \frac{1}{2}({\bf w}-{\bf w}_0)^\intercal{\bf H}({\bf w}_0)({\bf w}-{\bf w}_0)
$$
where ${\bf H}({\bf w}_k)$ is the <a href=https://en.wikipedia.org/wiki/Hessian_matrix> *Hessian* matrix</a> of $C$ at ${\bf w}_k$. Taking the gradient of $C({\bf w})$, and setting the result to ${\bf 0}$, the minimum of C around ${\bf w}_0$ can be approximated as
$$
{\bf w}^* = {\bf w}_0 - {\bf H}({\bf w}_0)^{-1} \nabla_{\bf w}^\intercal C({\bf w}_0)
$$
Since the second order polynomial is only an approximation to $C$, ${\bf w}^*$ is only an approximation to the optimal weight vector, but we can expect ${\bf w}^*$ to be closer to the minimizer of $C$ than ${\bf w}_0$. Thus, we can repeat the process, computing a second order approximation around ${\bf w}^*$ and a new approximation to the minimizer.
<a href=https://en.wikipedia.org/wiki/Newton%27s_method_in_optimization> Newton's method</a> is based on this idea. At each optization step, the function to be minimized is approximated by a second order approximation using a Taylor series expansion around the current estimate. As a result, the learning rules becomes
$$\hat{\bf w}_{n+1} = \hat{\bf w}_{n} - \rho_n {\bf H}({\bf w}_k)^{-1} \nabla_{{\bf w}}C({\bf w}_k)
$$
For instance, for the MAP estimate with Gaussian prior, the *Hessian* matrix becomes
$$
{\bf H}({\bf w})
= \frac{2}{C}{\bf I} + \sum_{k=1}^K f({\bf w}^T {\bf z}^{(k)}) \left(1-f({\bf w}^T {\bf z}^{(k)})\right){\bf z}^{(k)} ({\bf z}^{(k)})^\intercal
$$
Defining diagonal matrix
$$
{\mathbf S}({\bf w}) = \text{diag}\left(f({\bf w}^T {\bf z}^{(k)}) \left(1-f({\bf w}^T {\bf z}^{(k)})\right)\right)
$$
the Hessian matrix can be written in more compact form as
$$
{\bf H}({\bf w})
= \frac{2}{C}{\bf I} + {\bf Z}^\intercal {\bf S}({\bf w}) {\bf Z}
$$
Therefore, the Newton's algorithm for logistic regression becomes
\begin{align}
\hat{\bf w}_{n+1} = \hat{\bf w}_{n} +
\rho_n
\left(\frac{2}{C}{\bf I} + {\bf Z}^\intercal {\bf S}(\hat{\bf w}_{n})
{\bf Z}
\right)^{-1}
{\bf Z}^\intercal \left({\bf y}-\hat{\bf p}_n\right)
\end{align}
Some variants of the Newton method are implemented in the <a href="http://scikit-learn.org/stable/"> Scikit-learn </a> package.
```
def logregFit2(Z_tr, Y_tr, rho, n_it, C=1e4):
# Compute Z's
r = 2.0/C
n_dim = Z_tr.shape[1]
# Initialize variables
nll_tr = np.zeros(n_it)
pe_tr = np.zeros(n_it)
w = np.random.randn(n_dim,1)
# Running the gradient descent algorithm
for n in range(n_it):
p_tr = logistic(np.dot(Z_tr, w))
sk = np.multiply(p_tr, 1-p_tr)
S = np.diag(np.ravel(sk.T))
# Compute negative log-likelihood
nll_tr[n] = - np.dot(Y_tr.T, np.log(p_tr)) - np.dot((1-Y_tr).T, np.log(1-p_tr))
# Update weights
invH = np.linalg.inv(r*np.identity(n_dim) + np.dot(Z_tr.T, np.dot(S, Z_tr)))
w += rho*np.dot(invH, np.dot(Z_tr.T, Y_tr - p_tr))
return w, nll_tr
# Parameters of the algorithms
rho = float(1)/50 # Learning step
n_it = 500 # Number of iterations
C = 1000
g = 4
# Compute Z_tr
poly = PolynomialFeatures(degree=g)
Z_tr = poly.fit_transform(X_tr)
# Normalize columns (this is useful to make algorithms more stable).)
Zn, mz, sz = normalize(Z_tr[:,1:])
Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)
# Compute Z_tst
Z_tst = poly.fit_transform(X_tst)
Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz)
Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)
# Convert target arrays to column vectors
Y_tr2 = Y_tr[np.newaxis].T
Y_tst2 = Y_tst[np.newaxis].T
# Running the gradient descent algorithm
w, nll_tr = logregFit2(Z_tr, Y_tr2, rho, n_it, C)
# Classify training and test data
p_tr, D_tr = logregPredict(Z_tr, w)
p_tst, D_tst = logregPredict(Z_tst, w)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
# NLL plot.
plt.plot(range(n_it), nll_tr,'b.:', label='Train')
plt.xlabel('Iteration')
plt.ylabel('Negative Log-Likelihood')
plt.legend()
print('The final error rates are:')
print('- Training:', str(pe_tr))
print('- Test:', str(pe_tst))
print('The NLL after training is:', str(nll_tr[len(nll_tr)-1]))
```
## 6. Logistic regression in Scikit Learn.
The <a href="http://scikit-learn.org/stable/"> scikit-learn </a> package includes an efficient implementation of <a href="http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression"> logistic regression</a>. To use it, we must first create a classifier object, specifying the parameters of the logistic regression algorithm.
```
# Create a logistic regression object.
LogReg = linear_model.LogisticRegression(C=1.0)
# Compute Z_tr
poly = PolynomialFeatures(degree=g)
Z_tr = poly.fit_transform(Xn_tr)
# Normalize columns (this is useful to make algorithms more stable).)
Zn, mz, sz = normalize(Z_tr[:,1:])
Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)
# Compute Z_tst
Z_tst = poly.fit_transform(Xn_tst)
Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz)
Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)
# Fit model to data.
LogReg.fit(Z_tr, Y_tr)
# Classify training and test data
D_tr = LogReg.predict(Z_tr)
D_tst = LogReg.predict(Z_tst)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
print('The final error rates are:')
print('- Training:', str(pe_tr))
print('- Test:', str(pe_tst))
# Compute Z_grid
Z_grid = poly.fit_transform(X_grid)
n_grid = Z_grid.shape[0]
Zn, mz, sz = normalize(Z_grid[:,1:], mz, sz)
Z_grid = np.concatenate((np.ones((n_grid,1)), Zn), axis=1)
# Compute the classifier output for all samples in the grid.
dd = LogReg.predict(Z_grid)
pp = LogReg.predict_proba(Z_grid)[:,1]
pp = pp.reshape(xx.shape)
# Paint output maps
pylab.rcParams['figure.figsize'] = 6, 6 # Set figure size
plt.plot(x0c0, x1c0,'r.', label=labels[c0])
plt.plot(x0c1, x1c1,'g+', label=labels[c1])
plt.xlabel('$x_' + str(ind[0]) + '$')
plt.ylabel('$x_' + str(ind[1]) + '$')
plt.axis('equal')
plt.contourf(xx, yy, pp, cmap=plt.cm.copper)
plt.legend(loc='best')
plt.contour(xx, yy, pp, levels=[0.5],
colors='b', linewidths=(3,))
plt.colorbar(CS, ticks=[0, 0.5, 1])
plt.show()
```
|
github_jupyter
|
from __future__ import print_function
# To visualize plots in the notebook
%matplotlib inline
# Imported libraries
import csv
import random
import matplotlib
import matplotlib.pyplot as plt
import pylab
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import PolynomialFeatures
from sklearn import linear_model
# Define the logistic function
def logistic(t):
#<SOL>
return 1.0 / (1 + np.exp(-t))
#</SOL>
# Plot the logistic function
t = np.arange(-6, 6, 0.1)
z = logistic(t)
plt.plot(t, z)
plt.xlabel('$t$', fontsize=14)
plt.ylabel('$g(t)$', fontsize=14)
plt.title('The logistic function')
plt.grid()
# Weight vector:
w = [4, 8] # Try different weights
# Create a rectangular grid.
x_min = -1
x_max = 1
dx = x_max - x_min
h = float(dx) / 200
xgrid = np.arange(x_min, x_max, h)
xx0, xx1 = np.meshgrid(xgrid, xgrid)
# Compute the logistic map for the given weights
Z = logistic(w[0]*xx0 + w[1]*xx1)
# Plot the logistic map
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(xx0, xx1, Z, cmap=plt.cm.copper)
ax.contour(xx0, xx1, Z, levels=[0.5], colors='b', linewidths=(3,))
plt.xlabel('$x_0$')
plt.ylabel('$x_1$')
ax.set_zlabel('P(1|x,w)')
plt.show()
CS = plt.contourf(xx0, xx1, Z)
CS2 = plt.contour(CS, levels=[0.5],
colors='m', linewidths=(3,))
plt.xlabel('$x_0$')
plt.ylabel('$x_1$')
plt.colorbar(CS, ticks=[0, 0.5, 1])
plt.show()
# Weight vector:
w = [1, 10, 10, -20, 5, 1] # Try different weights
# Create a regtangular grid.
x_min = -1
x_max = 1
dx = x_max - x_min
h = float(dx) / 200
xgrid = np.arange(x_min, x_max, h)
xx0, xx1 = np.meshgrid(xgrid, xgrid)
# Compute the logistic map for the given weights
# Z = <FILL IN>
Z = logistic(w[0] + w[1]*xx0 + w[2]*xx1 + w[3]*np.multiply(xx0,xx0) + w[4]*np.multiply(xx0,xx1) + w[5]*np.multiply(xx1,xx1))
# Plot the logistic map
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(xx0, xx1, Z, cmap=plt.cm.copper)
plt.xlabel('$x_0$')
plt.ylabel('$x_1$')
ax.set_zlabel('P(1|x,w)')
plt.show()
CS = plt.contourf(xx0, xx1, Z)
CS2 = plt.contour(CS, levels=[0.5],
colors='m', linewidths=(3,))
plt.xlabel('$x_0$')
plt.ylabel('$x_1$')
plt.colorbar(CS, ticks=[0, 0.5, 1])
plt.show()
# Adapted from a notebook by Jason Brownlee
def loadDataset(filename, split):
xTrain = []
cTrain = []
xTest = []
cTest = []
with open(filename, 'r') as csvfile:
lines = csv.reader(csvfile)
dataset = list(lines)
for i in range(len(dataset)-1):
for y in range(4):
dataset[i][y] = float(dataset[i][y])
item = dataset[i]
if random.random() < split:
xTrain.append(item[0:4])
cTrain.append(item[4])
else:
xTest.append(item[0:4])
cTest.append(item[4])
return xTrain, cTrain, xTest, cTest
xTrain_all, cTrain_all, xTest_all, cTest_all = loadDataset('iris.data', 0.66)
nTrain_all = len(xTrain_all)
nTest_all = len(xTest_all)
print('Train:', nTrain_all)
print('Test:', nTest_all)
# Select attributes
i = 0 # Try 0,1,2,3
j = 1 # Try 0,1,2,3 with j!=i
# Select two classes
c0 = 'Iris-versicolor'
c1 = 'Iris-virginica'
# Select two coordinates
ind = [i, j]
# Take training test
X_tr = np.array([[xTrain_all[n][i] for i in ind] for n in range(nTrain_all)
if cTrain_all[n]==c0 or cTrain_all[n]==c1])
C_tr = [cTrain_all[n] for n in range(nTrain_all)
if cTrain_all[n]==c0 or cTrain_all[n]==c1]
Y_tr = np.array([int(c==c1) for c in C_tr])
n_tr = len(X_tr)
# Take test set
X_tst = np.array([[xTest_all[n][i] for i in ind] for n in range(nTest_all)
if cTest_all[n]==c0 or cTest_all[n]==c1])
C_tst = [cTest_all[n] for n in range(nTest_all)
if cTest_all[n]==c0 or cTest_all[n]==c1]
Y_tst = np.array([int(c==c1) for c in C_tst])
n_tst = len(X_tst)
def normalize(X, mx=None, sx=None):
# Compute means and standard deviations
if mx is None:
mx = np.mean(X, axis=0)
if sx is None:
sx = np.std(X, axis=0)
# Normalize
X0 = (X-mx)/sx
return X0, mx, sx
# Normalize data
Xn_tr, mx, sx = normalize(X_tr)
Xn_tst, mx, sx = normalize(X_tst, mx, sx)
# Separate components of x into different arrays (just for the plots)
x0c0 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==0]
x1c0 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==0]
x0c1 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==1]
x1c1 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==1]
# Scatterplot.
labels = {'Iris-setosa': 'Setosa',
'Iris-versicolor': 'Versicolor',
'Iris-virginica': 'Virginica'}
plt.plot(x0c0, x1c0,'r.', label=labels[c0])
plt.plot(x0c1, x1c1,'g+', label=labels[c1])
plt.xlabel('$x_' + str(ind[0]) + '$')
plt.ylabel('$x_' + str(ind[1]) + '$')
plt.legend(loc='best')
plt.axis('equal')
plt.show()
def logregFit(Z_tr, Y_tr, rho, n_it):
# Data dimension
n_dim = Z_tr.shape[1]
# Initialize variables
nll_tr = np.zeros(n_it)
pe_tr = np.zeros(n_it)
Y_tr2 = 2*Y_tr - 1 # Transform labels into binary symmetric.
w = np.random.randn(n_dim,1)
# Running the gradient descent algorithm
for n in range(n_it):
# Compute posterior probabilities for weight w
p1_tr = logistic(np.dot(Z_tr, w))
# Compute negative log-likelihood
# (note that this is not required for the weight update, only for nll tracking)
nll_tr[n] = np.sum(np.log(1 + np.exp(-np.dot(Y_tr2*Z_tr, w))))
# Update weights
w += rho*np.dot(Z_tr.T, Y_tr - p1_tr)
return w, nll_tr
def logregPredict(Z, w):
# Compute posterior probability of class 1 for weights w.
p = logistic(np.dot(Z, w)).flatten()
# Class
D = [int(round(pn)) for pn in p]
return p, D
# Parameters of the algorithms
rho = float(1)/50 # Learning step
n_it = 200 # Number of iterations
# Compute Z's
Z_tr = np.c_[np.ones(n_tr), Xn_tr]
Z_tst = np.c_[np.ones(n_tst), Xn_tst]
n_dim = Z_tr.shape[1]
# Convert target arrays to column vectors
Y_tr2 = Y_tr[np.newaxis].T
Y_tst2 = Y_tst[np.newaxis].T
# Running the gradient descent algorithm
w, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it)
# Classify training and test data
p_tr, D_tr = logregPredict(Z_tr, w)
p_tst, D_tst = logregPredict(Z_tst, w)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
# NLL plot.
plt.plot(range(n_it), nll_tr,'b.:', label='Train')
plt.xlabel('Iteration')
plt.ylabel('Negative Log-Likelihood')
plt.legend()
print('The optimal weights are:')
print(w)
print('The final error rates are:')
print('- Training:', pe_tr)
print('- Test:', pe_tst)
print('The NLL after training is', nll_tr[len(nll_tr)-1])
# Create a regtangular grid.
x_min, x_max = Xn_tr[:, 0].min(), Xn_tr[:, 0].max()
y_min, y_max = Xn_tr[:, 1].min(), Xn_tr[:, 1].max()
dx = x_max - x_min
dy = y_max - y_min
h = dy /400
xx, yy = np.meshgrid(np.arange(x_min - 0.1 * dx, x_max + 0.1 * dx, h),
np.arange(y_min - 0.1 * dx, y_max + 0.1 * dy, h))
X_grid = np.array([xx.ravel(), yy.ravel()]).T
# Compute Z's
Z_grid = np.c_[np.ones(X_grid.shape[0]), X_grid]
# Compute the classifier output for all samples in the grid.
pp, dd = logregPredict(Z_grid, w)
# Paint output maps
pylab.rcParams['figure.figsize'] = 6, 6 # Set figure size
# Put the result into a color plot
plt.plot(x0c0, x1c0,'r.', label=labels[c0])
plt.plot(x0c1, x1c1,'g+', label=labels[c1])
plt.xlabel('$x_' + str(ind[0]) + '$')
plt.ylabel('$x_' + str(ind[1]) + '$')
plt.legend(loc='best')
plt.axis('equal')
pp = pp.reshape(xx.shape)
CS = plt.contourf(xx, yy, pp, cmap=plt.cm.copper)
plt.contour(xx, yy, pp, levels=[0.5],
colors='b', linewidths=(3,))
plt.colorbar(CS, ticks=[0, 0.5, 1])
plt.show()
# Parameters of the algorithms
rho = float(1)/50 # Learning step
n_it = 500 # Number of iterations
g = 5 # Degree of polynomial
# Compute Z_tr
poly = PolynomialFeatures(degree=g)
Z_tr = poly.fit_transform(Xn_tr)
# Normalize columns (this is useful to make algorithms more stable).)
Zn, mz, sz = normalize(Z_tr[:,1:])
Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)
# Compute Z_tst
Z_tst = poly.fit_transform(Xn_tst)
Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz)
Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)
# Convert target arrays to column vectors
Y_tr2 = Y_tr[np.newaxis].T
Y_tst2 = Y_tst[np.newaxis].T
# Running the gradient descent algorithm
w, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it)
# Classify training and test data
p_tr, D_tr = logregPredict(Z_tr, w)
p_tst, D_tst = logregPredict(Z_tst, w)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
# NLL plot.
plt.plot(range(n_it), nll_tr,'b.:', label='Train')
plt.xlabel('Iteration')
plt.ylabel('Negative Log-Likelihood')
plt.legend()
print('The optimal weights are:')
print(w)
print('The final error rates are:')
print('- Training:', pe_tr)
print('- Test:', pe_tst)
print('The NLL after training is', nll_tr[len(nll_tr)-1])
# Compute Z_grid
Z_grid = poly.fit_transform(X_grid)
n_grid = Z_grid.shape[0]
Zn, mz, sz = normalize(Z_grid[:,1:], mz, sz)
Z_grid = np.concatenate((np.ones((n_grid,1)), Zn), axis=1)
# Compute the classifier output for all samples in the grid.
pp, dd = logregPredict(Z_grid, w)
pp = pp.reshape(xx.shape)
# Paint output maps
pylab.rcParams['figure.figsize'] = 6, 6 # Set figure size
plt.plot(x0c0, x1c0,'r.', label=labels[c0])
plt.plot(x0c1, x1c1,'g+', label=labels[c1])
plt.xlabel('$x_' + str(ind[0]) + '$')
plt.ylabel('$x_' + str(ind[1]) + '$')
plt.axis('equal')
plt.legend(loc='best')
CS = plt.contourf(xx, yy, pp, cmap=plt.cm.copper)
plt.contour(xx, yy, pp, levels=[0.5],
colors='b', linewidths=(3,))
plt.colorbar(CS, ticks=[0, 0.5, 1])
plt.show()
def logregFit2(Z_tr, Y_tr, rho, n_it, C=1e4):
# Compute Z's
r = 2.0/C
n_dim = Z_tr.shape[1]
# Initialize variables
nll_tr = np.zeros(n_it)
pe_tr = np.zeros(n_it)
w = np.random.randn(n_dim,1)
# Running the gradient descent algorithm
for n in range(n_it):
p_tr = logistic(np.dot(Z_tr, w))
sk = np.multiply(p_tr, 1-p_tr)
S = np.diag(np.ravel(sk.T))
# Compute negative log-likelihood
nll_tr[n] = - np.dot(Y_tr.T, np.log(p_tr)) - np.dot((1-Y_tr).T, np.log(1-p_tr))
# Update weights
invH = np.linalg.inv(r*np.identity(n_dim) + np.dot(Z_tr.T, np.dot(S, Z_tr)))
w += rho*np.dot(invH, np.dot(Z_tr.T, Y_tr - p_tr))
return w, nll_tr
# Parameters of the algorithms
rho = float(1)/50 # Learning step
n_it = 500 # Number of iterations
C = 1000
g = 4
# Compute Z_tr
poly = PolynomialFeatures(degree=g)
Z_tr = poly.fit_transform(X_tr)
# Normalize columns (this is useful to make algorithms more stable).)
Zn, mz, sz = normalize(Z_tr[:,1:])
Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)
# Compute Z_tst
Z_tst = poly.fit_transform(X_tst)
Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz)
Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)
# Convert target arrays to column vectors
Y_tr2 = Y_tr[np.newaxis].T
Y_tst2 = Y_tst[np.newaxis].T
# Running the gradient descent algorithm
w, nll_tr = logregFit2(Z_tr, Y_tr2, rho, n_it, C)
# Classify training and test data
p_tr, D_tr = logregPredict(Z_tr, w)
p_tst, D_tst = logregPredict(Z_tst, w)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
# NLL plot.
plt.plot(range(n_it), nll_tr,'b.:', label='Train')
plt.xlabel('Iteration')
plt.ylabel('Negative Log-Likelihood')
plt.legend()
print('The final error rates are:')
print('- Training:', str(pe_tr))
print('- Test:', str(pe_tst))
print('The NLL after training is:', str(nll_tr[len(nll_tr)-1]))
# Create a logistic regression object.
LogReg = linear_model.LogisticRegression(C=1.0)
# Compute Z_tr
poly = PolynomialFeatures(degree=g)
Z_tr = poly.fit_transform(Xn_tr)
# Normalize columns (this is useful to make algorithms more stable).)
Zn, mz, sz = normalize(Z_tr[:,1:])
Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)
# Compute Z_tst
Z_tst = poly.fit_transform(Xn_tst)
Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz)
Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)
# Fit model to data.
LogReg.fit(Z_tr, Y_tr)
# Classify training and test data
D_tr = LogReg.predict(Z_tr)
D_tst = LogReg.predict(Z_tst)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
print('The final error rates are:')
print('- Training:', str(pe_tr))
print('- Test:', str(pe_tst))
# Compute Z_grid
Z_grid = poly.fit_transform(X_grid)
n_grid = Z_grid.shape[0]
Zn, mz, sz = normalize(Z_grid[:,1:], mz, sz)
Z_grid = np.concatenate((np.ones((n_grid,1)), Zn), axis=1)
# Compute the classifier output for all samples in the grid.
dd = LogReg.predict(Z_grid)
pp = LogReg.predict_proba(Z_grid)[:,1]
pp = pp.reshape(xx.shape)
# Paint output maps
pylab.rcParams['figure.figsize'] = 6, 6 # Set figure size
plt.plot(x0c0, x1c0,'r.', label=labels[c0])
plt.plot(x0c1, x1c1,'g+', label=labels[c1])
plt.xlabel('$x_' + str(ind[0]) + '$')
plt.ylabel('$x_' + str(ind[1]) + '$')
plt.axis('equal')
plt.contourf(xx, yy, pp, cmap=plt.cm.copper)
plt.legend(loc='best')
plt.contour(xx, yy, pp, levels=[0.5],
colors='b', linewidths=(3,))
plt.colorbar(CS, ticks=[0, 0.5, 1])
plt.show()
| 0.741019 | 0.983518 |
<a href="https://colab.research.google.com/github/j-hossain/Intro-to-Deep-Learning/blob/main/Day%202/LineAssumptionV2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import clear_output
import time
```
### Building a dataset
```
Sx = np.array([0, 1, 2.5, 3, 4, 5], dtype=np.float32)
Sy = np.array([0.6, 0, 2, 2.2, 4.7, 5], dtype=np.float32)
# Plotting in graph
plt.scatter(Sx, Sy)
# Graph axis names and grids
plt.grid(True)
plt.xlabel('Sx')
plt.ylabel('Sy')
```
#### How can we get a line that goes through all the points given in the above graph?
We can not achieve this using a single straight line. What if we can combine multiple straight lines?
Lets assume a straight line
$$y = wx + c$$
We can build a polynomial by merging multiple straing lines using the the following equations:
$$
l_0 = \sigma (w_0x + c_0) \\
l_1 = \sigma (w_1x + c_1) \\
l_2 = w_2l_0 + w_3l_1 + c_2 \\
$$
Here,
$$
\sigma(x) = \frac{1}{1+e^{-x}}
$$
Now, $l_2$ is a polynomial with multiple straight lines.
```
w0 = tf.Variable(1.2, dtype=tf.float32)
w1 = tf.Variable(2., dtype=tf.float32)
w2 = tf.Variable(-0.5, dtype=tf.float32)
w3 = tf.Variable(1.2, dtype=tf.float32)
w4 = tf.Variable(1.2, dtype=tf.float32)
w5 = tf.Variable(1.2, dtype=tf.float32)
w6 = tf.Variable(1.2, dtype=tf.float32)
w7 = tf.Variable(1.2, dtype=tf.float32)
w8 = tf.Variable(1.2, dtype=tf.float32)
w9 = tf.Variable(1.2, dtype=tf.float32)
c0 = tf.Variable(1.2, dtype=tf.float32)
c1 = tf.Variable(2, dtype=tf.float32)
c2 = tf.Variable(0.8, dtype=tf.float32)
c3 = tf.Variable(0.8, dtype=tf.float32)
c4 = tf.Variable(0.8, dtype=tf.float32)
c5 = tf.Variable(0.8, dtype=tf.float32)
c6 = tf.Variable(0.8, dtype=tf.float32)
def line_fn(x):
l0 = tf.nn.sigmoid(w0*x + c0)
l1 = tf.nn.sigmoid(w1*x + c1)
l2 = tf.nn.sigmoid(w2*l0 + w3*l1 + c2)
l3 = tf.nn.sigmoid(w4*x + c3)
l4 = tf.nn.sigmoid(w5*x + c4)
l5 = tf.nn.sigmoid(w6*l3 + w7*l4 + c5)
l6 = w8*l2 + w9*l5 + c6
return l6
# A function which would plot the line
def plot_line():
clear_output(wait=True)
p = np.arange(0, 5.2, 0.1)
plt.plot(p, line_fn(p).numpy())
# Plotting in graph
plt.scatter(Sx, Sy)
# Graph axis names and grids
plt.grid(True)
plt.xlabel('Sx')
plt.ylabel('Sy')
plt.show()
plot_line()
```
## Gradient descending algorithm:
$$m_{t} = m_{t-1} - lr \; \frac{\partial \;\; loss(l(x), y)}{\partial m} $$
$$loss(l(x), y) = (l(x) - y)^2$$
#### Here,
* $t$ = Time step
* $x$ = Input
* $y$ = Output
* $m$ = Updatable variable
* $loss(\cdot, \cdot)$ = Loss function
* $lr$ = Learning rate
* $l(\cdot)$ = Line function
```
# learning rate
lr = 1
total_steps = 30000
for step in range(total_steps):
#print(f"Step {step+1:2}:")
#print("-"*30)
with tf.GradientTape() as tape:
# Stating what variables need to be partially differentiated and calibrated
tape.watch([w0,w1,w2,w3,w4,w5,w6,w7,w8,w9,c0,c1,c2,c3,c4,c5,c6])
# Passing the points to the line function
pred_y = line_fn(Sx)
# Calculating the difference/loss of the output (pred_y) of the function
# w.r.t. the known output (Sy)
loss = (pred_y - Sy) * (pred_y - Sy)
# Calculating the gradients w.r.t. the partially diff. parameters
# and the generated output loss
grads = tape.gradient(loss, [w0,w1,w2,w3,w4,w5,w6,w7,w8,w9,c0,c1,c2,c3,c4,c5,c6])
# For some values, the gradient values can be very big, we call it
# exploding gradients. To tackle such problem we are clipping the values to 1
grads = tf.clip_by_norm(grads, 1)
# Updating the gradients
w0 = w0 - lr * grads[0]
w1 = w1 - lr * grads[1]
w2 = w2 - lr * grads[2]
w3 = w3 - lr * grads[3]
w4 = w4 - lr * grads[4]
w5 = w5 - lr * grads[5]
w6 = w6 - lr * grads[6]
w7 = w7 - lr * grads[7]
w8 = w8 - lr * grads[8]
w9 = w9 - lr * grads[9]
c0 = c0 - lr * grads[10]
c1 = c1 - lr * grads[11]
c2 = c2 - lr * grads[12]
c3 = c3 - lr * grads[13]
c4 = c4 - lr * grads[14]
c5 = c5 - lr * grads[15]
c6 = c6 - lr * grads[16]
if step%1000 == 0:
plot_line()
print(f"Step {step+1:2}:")
print(f"Loss: {sum(loss)}")
print(f"Lr: {lr:.4f}")
#time.sleep(0.25)
lr -= lr * 0.1
```
## Lets check the final result
```
plot_line()
```
Play with the following link with different number of hidden layers and activations:
https://playground.tensorflow.org/
Also, read this blog while playing:
https://cloud.google.com/blog/products/ai-machine-learning/understanding-neural-networks-with-tensorflow-playground
|
github_jupyter
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import clear_output
import time
Sx = np.array([0, 1, 2.5, 3, 4, 5], dtype=np.float32)
Sy = np.array([0.6, 0, 2, 2.2, 4.7, 5], dtype=np.float32)
# Plotting in graph
plt.scatter(Sx, Sy)
# Graph axis names and grids
plt.grid(True)
plt.xlabel('Sx')
plt.ylabel('Sy')
w0 = tf.Variable(1.2, dtype=tf.float32)
w1 = tf.Variable(2., dtype=tf.float32)
w2 = tf.Variable(-0.5, dtype=tf.float32)
w3 = tf.Variable(1.2, dtype=tf.float32)
w4 = tf.Variable(1.2, dtype=tf.float32)
w5 = tf.Variable(1.2, dtype=tf.float32)
w6 = tf.Variable(1.2, dtype=tf.float32)
w7 = tf.Variable(1.2, dtype=tf.float32)
w8 = tf.Variable(1.2, dtype=tf.float32)
w9 = tf.Variable(1.2, dtype=tf.float32)
c0 = tf.Variable(1.2, dtype=tf.float32)
c1 = tf.Variable(2, dtype=tf.float32)
c2 = tf.Variable(0.8, dtype=tf.float32)
c3 = tf.Variable(0.8, dtype=tf.float32)
c4 = tf.Variable(0.8, dtype=tf.float32)
c5 = tf.Variable(0.8, dtype=tf.float32)
c6 = tf.Variable(0.8, dtype=tf.float32)
def line_fn(x):
l0 = tf.nn.sigmoid(w0*x + c0)
l1 = tf.nn.sigmoid(w1*x + c1)
l2 = tf.nn.sigmoid(w2*l0 + w3*l1 + c2)
l3 = tf.nn.sigmoid(w4*x + c3)
l4 = tf.nn.sigmoid(w5*x + c4)
l5 = tf.nn.sigmoid(w6*l3 + w7*l4 + c5)
l6 = w8*l2 + w9*l5 + c6
return l6
# A function which would plot the line
def plot_line():
clear_output(wait=True)
p = np.arange(0, 5.2, 0.1)
plt.plot(p, line_fn(p).numpy())
# Plotting in graph
plt.scatter(Sx, Sy)
# Graph axis names and grids
plt.grid(True)
plt.xlabel('Sx')
plt.ylabel('Sy')
plt.show()
plot_line()
# learning rate
lr = 1
total_steps = 30000
for step in range(total_steps):
#print(f"Step {step+1:2}:")
#print("-"*30)
with tf.GradientTape() as tape:
# Stating what variables need to be partially differentiated and calibrated
tape.watch([w0,w1,w2,w3,w4,w5,w6,w7,w8,w9,c0,c1,c2,c3,c4,c5,c6])
# Passing the points to the line function
pred_y = line_fn(Sx)
# Calculating the difference/loss of the output (pred_y) of the function
# w.r.t. the known output (Sy)
loss = (pred_y - Sy) * (pred_y - Sy)
# Calculating the gradients w.r.t. the partially diff. parameters
# and the generated output loss
grads = tape.gradient(loss, [w0,w1,w2,w3,w4,w5,w6,w7,w8,w9,c0,c1,c2,c3,c4,c5,c6])
# For some values, the gradient values can be very big, we call it
# exploding gradients. To tackle such problem we are clipping the values to 1
grads = tf.clip_by_norm(grads, 1)
# Updating the gradients
w0 = w0 - lr * grads[0]
w1 = w1 - lr * grads[1]
w2 = w2 - lr * grads[2]
w3 = w3 - lr * grads[3]
w4 = w4 - lr * grads[4]
w5 = w5 - lr * grads[5]
w6 = w6 - lr * grads[6]
w7 = w7 - lr * grads[7]
w8 = w8 - lr * grads[8]
w9 = w9 - lr * grads[9]
c0 = c0 - lr * grads[10]
c1 = c1 - lr * grads[11]
c2 = c2 - lr * grads[12]
c3 = c3 - lr * grads[13]
c4 = c4 - lr * grads[14]
c5 = c5 - lr * grads[15]
c6 = c6 - lr * grads[16]
if step%1000 == 0:
plot_line()
print(f"Step {step+1:2}:")
print(f"Loss: {sum(loss)}")
print(f"Lr: {lr:.4f}")
#time.sleep(0.25)
lr -= lr * 0.1
plot_line()
| 0.706292 | 0.978774 |
# Guide for Authors
```
print('Welcome to "The Fuzzing Book"!')
```
This notebook compiles the most important conventions for all chapters (notebooks) of "The Fuzzing Book".
## Blooper Video
```
from bookutils import YouTubeVideo
YouTubeVideo('-HFN32Aeisk')
```
## Organization of this Book
### Chapters as Notebooks
Each chapter comes in its own _Jupyter notebook_. A single notebook (= a chapter) should cover the material (text and code, possibly slides) for a 90-minute lecture.
A chapter notebook should be named `Topic.ipynb`, where `Topic` is the topic. `Topic` must be usable as a Python module and should characterize the main contribution. If the main contribution of your chapter is a class `FooFuzzer`, for instance, then your topic (and notebook name) should be `FooFuzzer`, such that users can state
```python
from FooFuzzer import FooFuzzer
```
Since class and module names should start with uppercase letters, all non-notebook files and folders start with lowercase letters. this may make it easier to differentiate them. The special notebook `index.ipynb` gets converted into the home pages `index.html` (on fuzzingbook.org) and `README.md` (on GitHub).
Notebooks are stored in the `notebooks` folder.
### DebuggingBook and FuzzingBook
This project shares some infrastructure (and even chapters) with "The Fuzzing Book". Everything in `shared/` is maintained in "The Debugging Book" and only copied over to "The Fuzzing Book". If you want to edit or change any of the files in `shared/`, do so in "The Debugging Book".
### Output Formats
The notebooks by themselves can be used by instructors and students to toy around with. They can edit code (and text) as they like and even run them as a slide show.
The notebook can be _exported_ to multiple (non-interactive) formats:
* HTML – for placing this material online.
* PDF – for printing
* Python – for coding
* Slides – for presenting
The included Makefile can generate all of these automatically (and a few more).
At this point, we mostly focus on HTML and Python, as we want to get these out quickly; but you should also occasionally ensure that your notebooks can (still) be exported into PDF. Other formats (Word, Markdown) are experimental.
## Sites
All sources for the book end up on the [Github project page](https://github.com/uds-se/fuzzingbook). This holds the sources (notebooks), utilities (Makefiles), as well as an issue tracker.
The derived material for the book ends up in the `docs/` folder, from where it is eventually pushed to the [fuzzingbook website](http://www.fuzzingbook.org/). This site allows to read the chapters online, can launch Jupyter notebooks using the binder service, and provides access to code and slide formats. Use `make publish` to create and update the site.
### The Book PDF
The book PDF is compiled automatically from the individual notebooks. Each notebook becomes a chapter; references are compiled in the final chapter. Use `make book` to create the book.
## Creating and Building
### Tools you will need
To work on the notebook files, you need the following:
1. Jupyter notebook. The easiest way to install this is via the [Anaconda distribution](https://www.anaconda.com/download/).
2. Once you have the Jupyter notebook installed, you can start editing and coding right away by starting `jupyter notebook` (or `jupyter lab`) in the topmost project folder.
3. If (like me) you don't like the Jupyter Notebook interface, I recommend [Jupyter Lab](https://jupyterlab.readthedocs.io/en/stable/), the designated successor to Jupyter Notebook. Invoke it as `jupyter lab`. It comes with a much more modern interface, but misses autocompletion and a couple of extensions. I am running it [as a Desktop application](http://christopherroach.com/articles/jupyterlab-desktop-app/) which gets rid of all the browser toolbars.
4. To create the entire book (with citations, references, and all), you also need the [ipybublish](https://github.com/chrisjsewell/ipypublish) package. This allows you to create the HTML files, merge multiple chapters into a single PDF or HTML file, create slides, and more. The Makefile provides the essential tools for creation.
### Version Control
We use git in a single strand of revisions. Feel free branch for features, but eventually merge back into the main "master" branch. Sync early; sync often. Only push if everything ("make all") builds and passes.
The Github repo thus will typically reflect work in progress. If you reach a stable milestone, you can push things on the fuzzingbook.org web site, using `make publish`.
#### nbdime
The [nbdime](https://github.com/jupyter/nbdime) package gives you tools such as `nbdiff` (and even better, `nbdiff-web`) to compare notebooks against each other; this ensures that cell _contents_ are compared rather than the binary format.
`nbdime config-git --enable` integrates nbdime with git such that `git diff` runs the above tools; merging should also be notebook-specific.
#### nbstripout
Notebooks in version control _should not contain output cells,_ as these tend to change a lot. (Hey, we're talking random output generation here!) To have output cells automatically stripped during commit, install the [nbstripout](https://github.com/kynan/nbstripout) package and use
```
nbstripout --install
```
to set it up as a git filter. The `notebooks/` folder comes with a `.gitattributes` file already set up for `nbstripout`, so you should be all set.
Note that _published_ notebooks (in short, anything under the `docs/` tree _should_ have their output cells included, such that users can download and edit notebooks with pre-rendered output. This folder contains a `.gitattributes` file that should explicitly disable `nbstripout`, but it can't hurt to check.
As an example, the following cell
1. _should_ have its output included in the [HTML version of this guide](https://www.fuzzingbook.org/beta/html/Guide_for_Authors.html);
2. _should not_ have its output included in [the git repo](https://github.com/uds-se/fuzzingbook/blob/master/notebooks/Guide_for_Authors.ipynb) (`notebooks/`);
3. _should_ have its output included in [downloadable and editable notebooks](https://github.com/uds-se/fuzzingbook/blob/master/docs/beta/notebooks/Guide_for_Authors.ipynb) (`docs/notebooks/` and `docs/beta/notebooks/`).
```
import random
random.random()
```
### Inkscape and GraphViz
Creating derived files uses [Inkscape](https://inkscape.org/en/) and [Graphviz](https://www.graphviz.org/) – through its [Python wrapper](https://pypi.org/project/graphviz/) – to process SVG images. These tools are not automatically installed, but are available on pip, _brew_ and _apt-get_ for all major distributions.
### LaTeX Fonts
By default, creating PDF uses XeLaTeX with a couple of special fonts, which you can find in the `fonts/` folder; install these fonts system-wide to make them accessible to XeLaTeX.
You can also run `make LATEX=pdflatex` to use `pdflatex` and standard LaTeX fonts instead.
### Creating Derived Formats (HTML, PDF, code, ...)
The [Makefile](../Makefile) provides rules for all targets. Type `make help` for instructions.
The Makefile should work with GNU make and a standard Jupyter Notebook installation. To create the multi-chapter book and BibTeX citation support, you need to install the [iPyPublish](https://github.com/chrisjsewell/ipypublish) package (which includes the `nbpublish` command).
### Creating a New Chapter
To create a new chapter for the book,
1. Set up a new `.ipynb` notebook file as copy of [Template.ipynb](Template.ipynb).
2. Include it in the `CHAPTERS` list in the `Makefile`.
3. Add it to the git repository.
## Teaching a Topic
Each chapter should be devoted to a central concept and a small set of lessons to be learned. I recommend the following structure:
* Introduce the problem ("We want to parse inputs")
* Illustrate it with some code examples ("Here's some input I'd like to parse")
* Develop a first (possibly quick and dirty) solution ("A PEG parser is short and often does the job"_
* Show that it works and how it works ("Here's a neat derivation tree. Look how we can use this to mutate and combine expressions!")
* Develop a second, more elaborated solution, which should then become the main contribution. ("Here's a general LR(1) parser that does not require a special grammar format. (You can skip it if you're not interested)")
* Offload non-essential extensions to later sections or to exercises. ("Implement a universal parser, using the Dragon Book")
The key idea is that readers should be able to grasp the essentials of the problem and the solution in the beginning of the chapter, and get further into details as they progress through it. Make it easy for readers to be drawn in, providing insights of value quickly. If they are interested to understand how things work, they will get deeper into the topic. If they just want to use the technique (because they may be more interested in later chapters), having them read only the first few examples should be fine for them, too.
Whatever you introduce should be motivated first, and illustrated after. Motivate the code you'll be writing, and use plenty of examples to show what the code just introduced is doing. Remember that readers should have fun interacting with your code and your examples. Show and tell again and again and again.
### Special Sections
#### Quizzes
You can have _quizzes_ as part of the notebook. These are created using the `quiz()` function. Its arguments are
* The question
* A list of options
* The correct answer(s) - either
* the single number of the one single correct answer (starting with 1)
* a list of numbers of correct answers (multiple choices)
To make the answer less obvious, you can specify it as a string containing an arithmetic expression evaluating to the desired number(s). The expression will remain in the code (and possibly be shown as hint in the quiz).
```
from bookutils import quiz
# A single-choice quiz
quiz("The color of the sky is", ['blue', 'red', 'black'],
'5 - 4')
# A multiple-choice quiz
quiz("What is this book?", ['Novel', 'Friendly', 'Useful'],
['5 - 4', '1 + 1', '27 / 9'])
```
Cells that contain only the `quiz()` call will not be rendered (but the quiz will).
#### Synopsis
Each chapter should have a section named "Synopsis" at the very end:
```markdown
## Synopsis
This is the text of the synopsis.
```
This section is evaluated at the very end of the notebook. It should summarize the most important functionality (classes, methods, etc.) together with examples. In the derived HTML and PDF files, it is rendered at the beginning, such that it can serve as a quick reference
#### Excursions
There may be longer stretches of text (and code!) that are too special, too boring, or too repetitve to read. You can mark such stretches as "Excursions" by enclosing them in MarkDown cells that state:
```markdown
#### Excursion: TITLE
```
and
```markdown
#### End of Excursion
```
Stretches between these two markers get special treatment when rendering:
* In the resulting HTML output, these blocks are set up such that they are shown on demand only.
* In printed (PDF) versions, they will be replaced by a pointer to the online version.
* In the resulting slides, they will be omitted right away.
Here is an example of an excursion:
#### Excursion: Fine points on Excursion Cells
Note that the `Excursion` and `End of Excursion` cells must be separate cells; they cannot be merged with others.
#### End of Excursion
### Ignored Code
If a code cell starts with
```python
# ignore
```
then the code will not show up in rendered input. Its _output_ will, however.
This is useful for cells that create drawings, for instance - the focus should be on the result, not the code.
This also applies to cells that start with a call to `display()` or `quiz()`.
### Ignored Cells
You can have _any_ cell not show up at all (including its output) in any rendered input by adding the following meta-data to the cell:
```json
{
"ipub": {
"ignore": true
}
```
*This* text, for instance, does not show up in the rendered version.
### Documentation Assertions
If a code cell starts with
```python
# docassert
```
then the code will not show up in rendered input (like `# ignore`), but also not in exported code.
This is useful for inserting _assertions_ that encode assumptions made in the (following) documentation. Having this assertion fail means that the documentation no longer applies.
Since the documentation is not part of exported code, and since code may behave differently in standalone Python, these assertions are not exported.
## Coding
### Set up
The first code block in each notebook should be
```
import bookutils
```
This sets up stuff such that notebooks can import each other's code (see below). This import statement is removed in the exported Python code, as the .py files would import each other directly.
Importing `bookutils` also sets a fixed _seed_ for random number generation. This way, whenever you execute a notebook from scratch (restarting the kernel), you get the exact same results; these results will also end up in the derived HTML and PDF files. (If you run a notebook or a cell for the second time, you will get more random results.)
### Coding Style and Consistency
Here's a few rules regarding coding style.
#### Use Python 3
We use Python 3 (specifically, Python 3.9.7) for all code. As of 2021, there is no need anymore to include compatibility hacks for earlier Python versions.
#### Follow Python Coding Conventions
We use _standard Python coding conventions_ according to [PEP 8](https://www.python.org/dev/peps/pep-0008/).
Your code must pass the `pycodestyle` style checks which you get by invoking `make style`. A very easy way to meet this goal is to invoke `make reformat`, which reformats all code accordingly. The `code prettify` notebook extension also allows you to automatically make your code (mostly) adhere to PEP 8.
#### One Cell per Definition
Use one cell for each definition or example. During importing, this makes it easier to decide which cells to import (see below).
#### Identifiers
In the book, this is how we denote `variables`, `functions()` and `methods()`, `Classes`, `Notebooks`, `variables_and_constants`, `EXPORTED_CONSTANTS`, `files`, `folders/`, and `<grammar-elements>`.
#### Quotes
If you have the choice between quoting styles, prefer
* double quotes (`"strings"`) around strings that are used for interpolation or that are natural language messages, and
* single quotes (`'characters'`) for single characters and formal language symbols that a end user would not see.
#### Read More
Beyond simple syntactical things, here's a [very nice guide](https://docs.python-guide.org/writing/style/) to get you started writing "pythonic" code.
### Importing Code from Notebooks
To import the code of individual notebooks, you can import directly from .ipynb notebook files.
```
from Fuzzer import fuzzer
fuzzer(100, ord('0'), 10)
```
**Important**: When importing a notebook, the module loader will **only** load cells that start with
* a function definition (`def`)
* a class definition (`class`)
* a variable definition if all uppercase (`ABC = 123`)
* `import` and `from` statements
All other cells are _ignored_ to avoid recomputation of notebooks and clutter of `print()` output.
Exported Python code will import from the respective .py file instead. The exported Python code is set up such that only the above items will be imported.
If importing a module prints out something (or has other side effects), that is an error. Use `make check-imports` to check whether your modules import without output.
Import modules only as you need them, such that you can motivate them well in the text.
### Imports and Dependencies
Try to depend on as few other notebooks as possible. This will not only ease construction and reconstruction of the code, but also reduce requirements for readers, giving then more flexibility in navigating through the book.
When you import a notebook, this will show up as a dependency in the [Sitemap](00_Table_of_Contents.ipynb). If the imported module is not critical for understanding, and thus should not appear as a dependency in the sitemap, mark the import as "minor dependency" as follows:
```
from Reducer import DeltaDebuggingReducer # minor dependency
```
### Design and Architecture
Stick to simple functions and data types. We want our readers to focus on functionality, not Python. You are encouraged to write in a "pythonic" style, making use of elegant Python features such as list comprehensions, sets, and more; however, if you do so, be sure to explain the code such that readers familiar with, say, C or Java can still understand things.
### Incomplete Examples
When introducing examples for students to complete, use the ellipsis `...` to indicate where students should add code, as in here:
```
def student_example():
x = some_value()
# Now, do something with x
...
```
The ellipsis is legal code in Python 3. (Actually, it is an `Ellipsis` object.)
### Introducing Classes
Defining _classes_ can be a bit tricky, since all of a class must fit into a single cell. This defeats the incremental style preferred for notebooks. By defining a class _as a subclass of itself_, though, you can avoid this problem.
Here's an example. We introduce a class `Foo`:
```
class Foo:
def __init__(self):
pass
def bar(self):
pass
```
Now we could discuss what `__init__()` and `bar()` do, or give an example of how to use them:
```
f = Foo()
f.bar()
```
We now can introduce a new `Foo` method by subclassing from `Foo` into a class which is _also_ called `Foo`:
```
class Foo(Foo):
def baz(self):
pass
```
This is the same as if we had subclassed `Foo` into `Foo_1` with `Foo` then becoming an alias for `Foo_1`. The original `Foo` class is overshadowed by the new one:
```
new_f = Foo()
new_f.baz()
```
Note, though, that _existing_ objects keep their original class:
```
from ExpectError import ExpectError
with ExpectError(AttributeError):
f.baz() # type: ignore
```
## Helpers
There's a couple of notebooks with helpful functions, including [Timer](Timer.ipynb), [ExpectError and ExpectTimeout](ExpectError.ipynb). Also check out the [Coverage](Coverage.ipynb) class.
### Quality Assurance
In your code, make use of plenty of assertions that allow to catch errors quickly. These assertions also help your readers understand the code.
### Issue Tracker
The [Github project page](https://github.com/uds-se/fuzzingbook) allows to enter and track issues.
## Writing Text
Text blocks use Markdown syntax. [Here is a handy guide](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet).
### Sections
Any chapter notebook must begin with `# TITLE`, and sections and subsections should then follow by `## SECTION` and `### SUBSECTION`.
Sections should start with their own block, to facilitate cross-referencing.
### Highlighting
Use
* _emphasis_ (`_emphasis_`) for highlighting,
* *emphasis* (`*emphasis*`) for highlighting terms that will go into the index,
* `backticks` for code and other verbatim elements.
### Hyphens and Dashes
Use "–" for em-dashes, "-" for hyphens, and "$-$" for minus.
### Quotes
Use standard typewriter quotes (`"quoted string"`) for quoted text. The PDF version will automatically convert these to "smart" (e.g. left and right) quotes.
### Lists and Enumerations
You can use bulleted lists:
* Item A
* Item B
and enumerations:
1. item 1
1. item 2
For description lists, use a combination of bulleted lists and highlights:
* **PDF** is great for reading offline
* **HTML** is great for reading online
### Math
LaTeX math formatting works, too.
`$x = \sum_{n = 1}^{\infty}\frac{1}{n}$` gets you
$x = \sum_{n = 1}^{\infty}\frac{1}{n}$.
### Inline Code
Python code normally goes into its own cells, but you can also have it in the text:
```python
s = "Python syntax highlighting"
print(s)
```
### Images
To insert images, use Markdown syntax `{width=100%}` inserts a picture from the `PICS` folder.
{width=100%}
All pictures go to `PICS/`, both in source as well as derived formats; both are stored in git, too. (Not all of us have all tools to recreate diagrams, etc.)
### Footnotes
Markdown supports footnotes, as in [^footnote]. These are rendered as footnotes in HTML and PDF, _but not within Jupyter_; hence, readers may find them confusing. So far, the book makes no use of footnotes, and uses parenthesized text instead.
[^footnote]: Test, [Link](https://www.fuzzingbook.org).
### Floating Elements and References
\todo[inline]{I haven't gotten this to work yet -- AZ}
To produce floating elements in LaTeX and PDF, edit the metadata of the cell which contains it. (In the Jupyter Notebook Toolbar go to View -> Cell Toolbar -> Edit Metadata and a button will appear above each cell.) This allows you to control placement and create labels.
#### Floating Figures
Edit metadata as follows:
```json
{
"ipub": {
"figure": {
"caption": "Figure caption.",
"label": "fig:flabel",
"placement": "H",
"height":0.4,
"widefigure": false,
}
}
}
```
- all tags are optional
- height/width correspond to the fraction of the page height/width, only one should be used (aspect ratio will be maintained automatically)
- `placement` is optional and constitutes using a placement arguments for the figure (e.g. \begin{figure}[H]). See [Positioning_images_and_tables](https://www.sharelatex.com/learn/Positioning_images_and_tables).
- `widefigure` is optional and constitutes expanding the figure to the page width (i.e. \begin{figure*}) (placement arguments will then be ignored)
#### Floating Tables
For **tables** (e.g. those output by `pandas`), enter in cell metadata:
```json
{
"ipub": {
"table": {
"caption": "Table caption.",
"label": "tbl:tlabel",
"placement": "H",
"alternate": "gray!20"
}
}
}
```
- `caption` and `label` are optional
- `placement` is optional and constitutes using a placement arguments for the table (e.g. \begin{table}[H]). See [Positioning_images_and_tables](https://www.sharelatex.com/learn/Positioning_images_and_tables).
- `alternate` is optional and constitutes using alternating colors for the table rows (e.g. \rowcolors{2}{gray!25}{white}). See (https://tex.stackexchange.com/a/5365/107738)[https://tex.stackexchange.com/a/5365/107738].
- if tables exceed the text width, in latex, they will be shrunk to fit
#### Floating Equations
For **equations** (e.g. those output by `sympy`), enter in cell metadata:
```json
{
"ipub": {
"equation": {
"environment": "equation",
"label": "eqn:elabel"
}
}
}
```
- environment is optional and can be 'none' or any of those available in [amsmath](https://www.sharelatex.com/learn/Aligning_equations_with_amsmath); 'equation', 'align','multline','gather', or their \* variants. Additionally, 'breqn' or 'breqn\*' will select the experimental [breqn](https://ctan.org/pkg/breqn) environment to *smart* wrap long equations.
- label is optional and will only be used if the equation is in an environment
#### References
To reference a floating object, use `\cref`, e.g. \cref{eq:texdemo}
### Cross-Referencing
#### Section References
* To refer to sections in the same notebook, use the header name as anchor, e.g.
`[Code](#Code)` gives you [Code](#Code). For multi-word titles, replace spaces by hyphens (`-`), as in [Using Notebooks as Modules](#Using-Notebooks-as-Modules).
* To refer to cells (e.g. equations or figures), you can define a label as cell metadata. See [Floating Elements and References](#Floating-Elements-and-References) for details.
* To refer to other notebooks, use a Markdown cross-reference to the notebook file, e.g. [the "Fuzzing" chapter](Fuzzer.ipynb). A special script will be run to take care of these links. Reference chapters by name, not by number.
### Citations
To cite papers, cite in LaTeX style. The text
```
print(r"\cite{Purdom1972}")
```
is expanded to \cite{Purdom1972}, which in HTML and PDF should be a nice reference.
The keys refer to BibTeX entries in [fuzzingbook.bib](fuzzingbook.bib).
* LaTeX/PDF output will have a "References" section appended.
* HTML output will link to the URL field from the BibTeX entry. Be sure it points to the DOI.
### Todo's
* To mark todo's, use `\todo{Thing to be done}.` \todo{Expand this}
### Tables
Tables with fixed contents can be produced using Markdown syntax:
| Tables | Are | Cool |
| ------ | ---:| ----:|
| Zebra | 2 | 30 |
| Gnu | 20 | 400 |
If you want to produce tables from Python data, the `PrettyTable` package (included in the book) allows to [produce tables with LaTeX-style formatting.](http://blog.juliusschulz.de/blog/ultimate-ipython-notebook)
```
from bookutils import PrettyTable as pt
import numpy as np
data = np.array([[1, 2, 30], [2, 3, 400]])
pt.PrettyTable(data, [r"$\frac{a}{b}$", r"$b$",
r"$c$"], print_latex_longtable=False)
```
### Plots and Data
It is possible to include plots in notebooks. Here is an example of plotting a function:
```
%matplotlib inline
import matplotlib.pyplot as plt
x = np.linspace(0, 3 * np.pi, 500)
plt.plot(x, np.sin(x ** 2))
plt.title('A simple chirp');
```
And here's an example of plotting data:
```
%matplotlib inline
import matplotlib.pyplot as plt
data = [25, 36, 57]
plt.plot(data)
plt.title('Increase in data');
```
Plots are available in all derived versions (HTML, PDF, etc.) Plots with `plotly` are even nicer (and interactive, even in HTML), However, at this point, we cannot export them to PDF, so `matplotlib` it is.
## Slides
You can set up the notebooks such that they also can be presented as slides. In the browser, select View -> Cell Toolbar -> Slideshow. You can then select a slide type for each cell:
* `New slide` starts a new slide with the cell (typically, every `## SECTION` in the chapter)
* `Sub-slide` starts a new sub-slide which you navigate "down" to (anything in the section)
* `Fragment` is a cell that gets revealed after a click (on the same slide)
* `Skip` is skipped during the slide show (e.g. `import` statements; navigation guides)
* `Notes` goes into presenter notes
To create slides, do `make slides`; to view them, change into the `slides/` folder and open the created HTML files. (The `reveal.js` package has to be in the same folder as the slide to be presented.)
The ability to use slide shows is a compelling argument for teachers and instructors in our audience.
(Hint: In a slide presentation, type `s` to see presenter notes.)
## Writing Tools
When you're editing in the browser, you may find these extensions helpful:
### Jupyter Notebook
[Jupyter Notebook Extensions](https://github.com/ipython-contrib/jupyter_contrib_nbextensions) is a collection of productivity-enhancing tools (including spellcheckers).
I found these extensions to be particularly useful:
* Spell Checker (while you're editing)
* Table of contents (for quick navigation)
* Code prettify (to produce "nice" syntax)
* Codefolding
* Live Markdown Preview (while you're editing)
### Jupyter Lab
Extensions for _Jupyter Lab_ are much less varied and less supported, but things get better. I am running
* [Spell Checker](https://github.com/ijmbarr/jupyterlab_spellchecker)
* [Table of Contents](https://github.com/jupyterlab/jupyterlab-toc)
* [JupyterLab-LSP](https://towardsdatascience.com/jupyterlab-2-0-edd4155ab897) providing code completion, signatures, style checkers, and more.
## Interaction
It is possible to include interactive elements in a notebook, as in the following example:
```python
try:
from ipywidgets import interact, interactive, fixed, interact_manual
x = interact(fuzzer, char_start=(32, 128), char_range=(0, 96))
except ImportError:
pass
```
Note that such elements will be present in the notebook versions only, but not in the HTML and PDF versions, so use them sparingly (if at all). To avoid errors during production of derived files, protect against `ImportError` exceptions as in the above example.
## Read More
Here is some documentation on the tools we use:
1. [Markdown Cheatsheet](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) - general introduction to Markdown
1. [iPyPublish](https://github.com/chrisjsewell/ipypublish) - rich set of tools to create documents with citations and references
## Alternative Tool Sets
We don't currently use these, but they are worth learning:
1. [Making Publication-Ready Python Notebooks](http://blog.juliusschulz.de/blog/ultimate-ipython-notebook) - Another tool set on how to produce book chapters from notebooks
1. [Writing academic papers in plain text with Markdown and Jupyter notebook](https://sylvaindeville.net/2015/07/17/writing-academic-papers-in-plain-text-with-markdown-and-jupyter-notebook/) - Alternate ways on how to generate citations
1. [A Jupyter LaTeX template](https://gist.github.com/goerz/d5019bedacf5956bcf03ca8683dc5217#file-revtex-tplx) - How to define a LaTeX template
1. [Boost Your Jupyter Notebook Productivity](https://towardsdatascience.com/jupyter-notebook-hints-1f26b08429ad) - a collection of hints for debugging and profiling Jupyter notebooks
|
github_jupyter
|
print('Welcome to "The Fuzzing Book"!')
from bookutils import YouTubeVideo
YouTubeVideo('-HFN32Aeisk')
from FooFuzzer import FooFuzzer
nbstripout --install
import random
random.random()
from bookutils import quiz
# A single-choice quiz
quiz("The color of the sky is", ['blue', 'red', 'black'],
'5 - 4')
# A multiple-choice quiz
quiz("What is this book?", ['Novel', 'Friendly', 'Useful'],
['5 - 4', '1 + 1', '27 / 9'])
## Synopsis
This is the text of the synopsis.
#### Excursion: TITLE
#### End of Excursion
# ignore
{
"ipub": {
"ignore": true
}
# docassert
import bookutils
from Fuzzer import fuzzer
fuzzer(100, ord('0'), 10)
from Reducer import DeltaDebuggingReducer # minor dependency
def student_example():
x = some_value()
# Now, do something with x
...
class Foo:
def __init__(self):
pass
def bar(self):
pass
f = Foo()
f.bar()
class Foo(Foo):
def baz(self):
pass
new_f = Foo()
new_f.baz()
from ExpectError import ExpectError
with ExpectError(AttributeError):
f.baz() # type: ignore
s = "Python syntax highlighting"
print(s)
{
"ipub": {
"figure": {
"caption": "Figure caption.",
"label": "fig:flabel",
"placement": "H",
"height":0.4,
"widefigure": false,
}
}
}
{
"ipub": {
"table": {
"caption": "Table caption.",
"label": "tbl:tlabel",
"placement": "H",
"alternate": "gray!20"
}
}
}
{
"ipub": {
"equation": {
"environment": "equation",
"label": "eqn:elabel"
}
}
}
print(r"\cite{Purdom1972}")
from bookutils import PrettyTable as pt
import numpy as np
data = np.array([[1, 2, 30], [2, 3, 400]])
pt.PrettyTable(data, [r"$\frac{a}{b}$", r"$b$",
r"$c$"], print_latex_longtable=False)
%matplotlib inline
import matplotlib.pyplot as plt
x = np.linspace(0, 3 * np.pi, 500)
plt.plot(x, np.sin(x ** 2))
plt.title('A simple chirp');
%matplotlib inline
import matplotlib.pyplot as plt
data = [25, 36, 57]
plt.plot(data)
plt.title('Increase in data');
try:
from ipywidgets import interact, interactive, fixed, interact_manual
x = interact(fuzzer, char_start=(32, 128), char_range=(0, 96))
except ImportError:
pass
| 0.456894 | 0.939192 |
```
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense,Input,LSTM
from tensorflow.keras.preprocessing import timeseries_dataset_from_array
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
smp = pd.read_csv('../data/smp.csv')
smp['date'] = pd.to_datetime(smp['date'])
smp['day_of_week'] = smp['date'].dt.dayofweek
smp.head()
smp.shape
smp['day_of_week'] = smp['day_of_week'].astype('category')
smp = pd.get_dummies(smp, columns = ['day_of_week'], prefix='W', drop_first=True) # one hot encoding
smp.head()
plt.rcParams['figure.facecolor'] = 'white'
plt.figure(figsize=(20,8))
plt.plot(smp['date'], smp['smp_max'], label='smp_max')
plt.plot(smp['date'], smp['smp_mean'], label='smp_mean')
plt.plot(smp['date'], smp['smp_min'], label='smp_min')
plt.title('SMP')
plt.legend()
# Settings
train_split_idx = 729 # 2020.1.1. 행 인덱스 번호
window_size = 10 # 과거 10일 동안의 시계열 데이터를 학습 데이터로 사용
future = 3 # 3일 이후의 타겟을 예측
# Features
X_train = smp.iloc[:train_split_idx - window_size - future, 0:]
# Targets
y_train = smp.iloc[window_size + future :train_split_idx, [3]] # 'smp_mean' 열
print(X_train.shape, y_train.shape)
# X_test
test_start = train_split_idx - window_size - future # 테스트 데이터의 시작 행
test_end = smp.shape[0] - window_size - future
X_test = smp.iloc[test_start:test_end, 0:]
# y_test
# label_start = + future # 테스트 데이터의 첫 번째 타겟 데이터 위치
y_test = smp.iloc[train_split_idx:, [3]] # 'smp_mean' 열 선택
print(X_test.shape, y_test.shape)
# Feature Scaling
X_train_scaled = X_train.loc[:, 'smp_max':]
X_test_scaled = X_test.loc[:, 'smp_max':]
scaler=MinMaxScaler()
scaler.fit(X_train_scaled.values)
X_train_scaled.loc[:, :] = scaler.transform(X_train_scaled.values)
X_test_scaled.loc[:, :] = scaler.transform(X_test_scaled.values)
# Mini Batch 크기로 시계열을 변환
train_data = timeseries_dataset_from_array(
X_train_scaled, y_train, sequence_length=window_size, batch_size=16)
test_data = timeseries_dataset_from_array(
X_test_scaled, y_test, sequence_length=window_size, batch_size=16)
print(train_data)
print(test_data)
for batch in test_data.take(1):
inputs, targets = batch
print("Input:", inputs.numpy().shape)
print("Target:", targets.numpy().shape)
inputs[0]
targets[0]
model = Sequential()
model.add(Input(shape=[10, 9]))
model.add(LSTM(units=32, return_sequences=False))
model.add(Dense(units=1, activation='linear'))
model.compile(optimizer='adam', loss='mse', metrics=['mae'])
model.summary()
# 모델 훈련
history = model.fit(train_data, epochs=500,
validation_data=test_data)
# 손실 함수 그래프
def plot_loss_curve(history, total_epoch=10, start=1):
plt.figure(figsize=(5, 5))
plt.plot(range(start, total_epoch + 1),
history.history['loss'][start-1:total_epoch],
label='Train')
plt.plot(range(start, total_epoch + 1),
history.history['val_loss'][start-1:total_epoch],
label='Validation')
plt.xlabel('Epochs')
plt.ylabel('mse')
plt.legend()
plt.show()
plot_loss_curve(history=history,
total_epoch=len(history.history['loss']), start=1)
y_pred = model.predict(test_data)
y_pred.shape
plt.figure(figsize=(20, 10))
plt.plot(range(len(y_pred)), y_test[:-(window_size-1)].values, marker='o', label='y_test')
plt.plot(range(len(y_pred)), y_pred, marker='x', label='y_pred')
plt.legend()
plt.show()
```
|
github_jupyter
|
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense,Input,LSTM
from tensorflow.keras.preprocessing import timeseries_dataset_from_array
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
smp = pd.read_csv('../data/smp.csv')
smp['date'] = pd.to_datetime(smp['date'])
smp['day_of_week'] = smp['date'].dt.dayofweek
smp.head()
smp.shape
smp['day_of_week'] = smp['day_of_week'].astype('category')
smp = pd.get_dummies(smp, columns = ['day_of_week'], prefix='W', drop_first=True) # one hot encoding
smp.head()
plt.rcParams['figure.facecolor'] = 'white'
plt.figure(figsize=(20,8))
plt.plot(smp['date'], smp['smp_max'], label='smp_max')
plt.plot(smp['date'], smp['smp_mean'], label='smp_mean')
plt.plot(smp['date'], smp['smp_min'], label='smp_min')
plt.title('SMP')
plt.legend()
# Settings
train_split_idx = 729 # 2020.1.1. 행 인덱스 번호
window_size = 10 # 과거 10일 동안의 시계열 데이터를 학습 데이터로 사용
future = 3 # 3일 이후의 타겟을 예측
# Features
X_train = smp.iloc[:train_split_idx - window_size - future, 0:]
# Targets
y_train = smp.iloc[window_size + future :train_split_idx, [3]] # 'smp_mean' 열
print(X_train.shape, y_train.shape)
# X_test
test_start = train_split_idx - window_size - future # 테스트 데이터의 시작 행
test_end = smp.shape[0] - window_size - future
X_test = smp.iloc[test_start:test_end, 0:]
# y_test
# label_start = + future # 테스트 데이터의 첫 번째 타겟 데이터 위치
y_test = smp.iloc[train_split_idx:, [3]] # 'smp_mean' 열 선택
print(X_test.shape, y_test.shape)
# Feature Scaling
X_train_scaled = X_train.loc[:, 'smp_max':]
X_test_scaled = X_test.loc[:, 'smp_max':]
scaler=MinMaxScaler()
scaler.fit(X_train_scaled.values)
X_train_scaled.loc[:, :] = scaler.transform(X_train_scaled.values)
X_test_scaled.loc[:, :] = scaler.transform(X_test_scaled.values)
# Mini Batch 크기로 시계열을 변환
train_data = timeseries_dataset_from_array(
X_train_scaled, y_train, sequence_length=window_size, batch_size=16)
test_data = timeseries_dataset_from_array(
X_test_scaled, y_test, sequence_length=window_size, batch_size=16)
print(train_data)
print(test_data)
for batch in test_data.take(1):
inputs, targets = batch
print("Input:", inputs.numpy().shape)
print("Target:", targets.numpy().shape)
inputs[0]
targets[0]
model = Sequential()
model.add(Input(shape=[10, 9]))
model.add(LSTM(units=32, return_sequences=False))
model.add(Dense(units=1, activation='linear'))
model.compile(optimizer='adam', loss='mse', metrics=['mae'])
model.summary()
# 모델 훈련
history = model.fit(train_data, epochs=500,
validation_data=test_data)
# 손실 함수 그래프
def plot_loss_curve(history, total_epoch=10, start=1):
plt.figure(figsize=(5, 5))
plt.plot(range(start, total_epoch + 1),
history.history['loss'][start-1:total_epoch],
label='Train')
plt.plot(range(start, total_epoch + 1),
history.history['val_loss'][start-1:total_epoch],
label='Validation')
plt.xlabel('Epochs')
plt.ylabel('mse')
plt.legend()
plt.show()
plot_loss_curve(history=history,
total_epoch=len(history.history['loss']), start=1)
y_pred = model.predict(test_data)
y_pred.shape
plt.figure(figsize=(20, 10))
plt.plot(range(len(y_pred)), y_test[:-(window_size-1)].values, marker='o', label='y_test')
plt.plot(range(len(y_pred)), y_pred, marker='x', label='y_pred')
plt.legend()
plt.show()
| 0.597373 | 0.559832 |
# Introduction
This file covers a basic Python coding review if you need it. It is the same as the part 1 from the expert file in MSDS600.
Clicking on a cell and pressing shift+enter or ctrl+enter will run the cell (or the menu bar can be used). Whatever is last in the cell gets printed out. New cells can be created with the hotkeys a and b (after pressing escape), or the menu bar can be used.
The way Python is written is called it's syntax. This is like grammatical rules for writing. Python's syntax is simple on purpose.
# Reading list
This is the reading list for week 1 of MSDS600 (available through O'Reilly through the [library](https://libguides.regis.edu/computer_informationsciences)):
[1] [Python Data Science Essentials - Third Edition by Alberto Boschetti and Luca Massaron.](https://learning.oreilly.com/library/view/python-data-science/9781789537864/)
Sections:
- First Steps (“First Steps” through “Alternatives to Jupyter”)
- Strengthen Your Python Foundations (“Strengthen your Python Foundations” through “Don’t be shy, take a real challenge”)
[2] [Python for Data Science For Dummies, 2nd Edition by John Paul Mueller and Luca Massaron.](https://learning.oreilly.com/library/view/python-for-data/9781119547624/)
Chapters 1 and 2.
See the end of the presentation for more resources on learning/brushing up on Python.
# Python topics
Much of this is covered in the Python Data Science Essentials book and many other places. We will cover:
- variable types (ints, floats, strings, booleans, bytes)
- data structures (lists, tuples, sets, dictionaries, NumPy arrays, Pandas DataFrames)
- operators
- functions
- objects, classes, methods, and attributes
- loops and comprehensions
- conditional statements
- packages and modules
- keywords and built-in functions
## Comments
We can write notes about our code with comments. We can make single-line comments and multi-line:
```
# a single-line comment
"""
a multi-
line
comment
"""
```
The multi-line comments are actually multi-line strings, so if they are the last thing in the notebook cell, they get printed out. We want to minimize the amount of comments we have in our code (except for function documentation) and instead write our code so that it's clear what it's doing without comments.
## Variables and data types
In Python, there are a few key data types:
- integers (`int`)
- floats (`float`)
- strings (`str`)
- booleans (`bool`)
- bytes (`bytes`)
There are other data types as well, such as `complex` for complex numbers.
When we create a variable, the type is determined automatically. For example, we can store an integer like so:
```
an_integer = 1
an_integer
```
We are naming the variable with best practices here - `camel_case` (lowercase with underscores separating words) and a descriptive name.
We can check the type of a variable with the built-in `type()` function:
```
type(an_integer)
```
We can convert variable types with "casting". This converts an integer (`int`) to a string (`str`):
```
str(an_integer)
```
Floats have decimal places:
```
a_float = 1.0
type(a_float)
```
The usual math operators work (addition, subtraction, etc). Mixing floats and ints usually results in a float. The `//` operator is integer divion (rounds down), the `%` operator is the modulo (remainder from division). Exponentiation is `**`. More complex math operators can be found in the `numpy` package or built-in `math` module.
```
5 * 2
5 % 2
5 // 2
5 / 2
import numpy as np
np.log(10)
import math
math.sqrt(9)
```
Strings are a list or sequence of characters:
```
a_string = 'test string here'
a_string
```
Booloans are `True` or `False`:
```
a_bool = True
type(a_bool)
```
Bytes objects are sometimes encountered when loading data:
```
a_bytes = b'bytes object'
type(a_bytes)
```
Convert the boolean object to an integer using casting and see what happens:
Convert the number 5.8 to an integer and see what happens (converting to an integer always rounds down):
## Data structures
### Lists
The main data structure in Python is the list:
```
a_list = [1, 2, 3, 4]
type(a_list)
```
List indexing works like `[start:stop:step]`. The default is `[0:None:1]`, which steps through the list from beginning to end one element at a time.
Python is "0-indexed", meaning the first element has index 0. R, by contrast, is 1-indexed. The `start` index is inclusive, the `stop` index is exclusive. Here is how to get the first element:
```
a_list[0]
```
We can also 'slice' a list to get elements (0 through 1 here):
```
a_list[:2]
```
We can get elements from the end with negative index numbers. -1 is the last element, so this gets elements -3 up to and not including -1.
```
a_list[-3:-1]
```
The step argument controls the number of steps. We can reverse a list with `[::-1]`:
```
# get every other element
a_list[::2]
a_list[::-1]
```
Lists have many built-in functions (a.k.a. methods): https://docs.python.org/3/tutorial/datastructures.html
append is a common one:
```
# adds 4 to the end of the list
a_list.append(4)
a_list
```
Lists can hold most anything, including other lists:
```
another_list = [[1, 2, 3], [5, 5, 5]]
another_list
```
We can change elements of lists, also called mutability:
```
a_list[0] = 10
```
Lists can also be concatenated (joined) with the + operator:
```
a_list + another_list
```
Use the `extend()` method of lists to add `another_list` on to the end of `a_list` (gives the same results as the + operator):
### Tuples
A tuple is like a list, but is "immutable", meaning you cannot change it:
```
a_tuple = tuple([1, 2, 3])
a_tuple
```
Try changing the value of the first element of `a_tuple` and see what happens (you will get an error):
### Sets
Sets are the mathematical type of set - a collection of unique values. They have several built-in functions and operators available: https://docs.python.org/3/library/stdtypes.html#set
```
a_set = set(a_list)
a_set
another_set = {1, 2, 3, 3}
another_set
```
The operator `in` checks if something is in something else, and works well with sets. It returns a boolean:
```
4 in a_set
```
Use the union operator (the `|` or "pipe" symbol, usually under your backspace key) to join `a_set` and `another_set`:
### Dictionaries
Dictionaries have keys and values, and look similar to sets. There are many functions and related datatyes for dictionaries (e.g. OrderedDict): https://docs.python.org/3/library/stdtypes.html#dict
```
a_dict = {'key1': 'value_1', 'key_2': 12}
a_dict
a_dict['another_key'] = 'hey'
a_dict
'key1' in a_dict
a_dict.keys()
a_dict.values()
a_dict.items()
```
Add another item to `a_dict` with the key `key3` and value `hooray!`:
### NumPy arrays
NumPy is a Python package for numerical analysis. It has math operators and other classes/objects. A common one is the `array` object, which is like a list but can be multi-dimensional:
```
import numpy as np
an_array = np.array([[1, 2, 3], [5, 5, 5]])
# indexing is [rows, columns]
an_array[:, 1] # get all rows and the second column
```
We're first importing the library with an alias of `np`, which is the conventional way to import this. Then we create an array and index it to get the second column and all rows.
Get the last row and all columns of `an_array`:
### Pandas DataFrames and Series
A common data handling package in Python is pandas, which uses NumPy to hold data. We can easily make a DataFrame (similar to a spreadsheet) from a dictionary:
```
import pandas as pd
df = pd.DataFrame(data={'people': [5, 2, 3], 'revenue': [10, 1, 12]})
df
```
If we get a single column from the DataFrame, it's a pandas Series. The data is stored as NumPy arrays.
```
df['people']
type(df['people'])
type(df)
```
Pandas is a crucial part of the data science Python technology stack, so you should spend time learning it. There are DataCamp, DataQuest, Kaggle, and other courses coving pandas. There is also a book used in MSDE620, *Pandas for Everyone*, which is quite good, as well as several other pandas books available through O'Reilly through the library.
Get the revenue column from the DataFrame:
## Functions
Functions are crucial in programming; we can write our own functions to avoid repeating ourselves and others have written packages with functions in them to make it easier to do common things. We can use some built-in functions like so:
```
# get the length of something
len(a_list)
print(a_list)
```
Functions have a name, then parentheses, then take some arguments. The arguments can be positional, named, and so on. For example, the documentation for the `sorted()` function looks like this:
`sorted(iterable, /, *, key=None, reverse=False)`
The first argument, iterable, is a positional-only argument. The `/` designates that anything before it is positional-only (e.g. we cannot provide a name like `sorted(iterable=a_list)`, but should do `sorted(a_list)`). The `*` means anything after it is keyword-only and cannot be positional. Keyword arguments have the name, then the value: `sorted(a_list, reverse=True)`.
```
sorted(a_list, reverse=True)
```
We can see the documentation for a function (or other object) by putting a question mark next to it, or using the `help()` function. For example, for `len`:
```
?len
help(len)
```
Notice we don't use the parentheses for the function when pulling up the documentation.
We can also find documentation online; for example, the len documentation is here: https://docs.python.org/3/library/functions.html
We make our own functions with the `def` keyword, the function name, then the arguments in parentheses, and a colon at the end. Default values for arguments can be set with an equals sign, like `b=12`. The function body is indented (most people use the tab key, which is converted to 4 spaces). When the indentation ends, so does the function. We can use the `return` keyword to return values from the function. Our custom functions are used just like the built-in or pre-built functions. We can provide arguments by name or position here:
```
def test_function(a, b=12):
return a + b
result = test_function(50)
result
test_function(a=50)
```
Use `test_function` to add 1 and 1 with named arguments:
## Objects and classes
Everything in Python is an object. These can have attributes. Attributes can be functions (methods) or values. For example, our pandas DataFrame and NumPy arrays have an attribute `shape` which is a tuple telling us the number of rows and columns:
```
df.shape
```
DataFrames have lots of attributes and methods. One method is `sort_values`:
```
df.sort_values('people', ascending=False)
```
Most of the complex objects in Python that have attributes are classes. There are ways to make our own classes, which is like making our own functions, but more advanced. It's beyond our scope here, but there are several books and online courses and tutorials that cover classes in Python, such as *Modern Python Cookbook - Second Edition* by Steven Lott available through the library, and the official documentation: https://docs.python.org/3/tutorial/classes.html
In Juptyer Notebooks and IPython, you can type a variable name, then the period, then press 'tab' to see what attributes are available. Try it with df:
## Scoping
One important topic is scoping - variables within functions or classes are not available outside of the functions or classes unless we declare them as globals. However, using global variables is bad practice and should be avoided. Here is an example of scoping: we cannot access the variable inside the function outside of it. We get a `NameError` because the variable does not exist outside of the function.
```
def scoping_example():
new_var = 123
return new_var
scoping_example()
new_var
```
Modify the function above so it prints out the `type` of `new_var` within the function. Also modify the code above so it doesn't result in an error.
## Loops
Along with lists, loops are another key part of Python. We can use `for` and `while` loops. `for` goes through an iterable like a list, `while` keeps going till be break it. We use the word `for`, then a variable name, then `in`, then an iterable like a list, then a colon. On the next lines, we indent them. When the indentation stops, so does the loop.
```
for i in [1, 2, 3]:
print(i)
for i in [1, 2, 3]:
print(i)
break
```
Loops have some keywords: `break` and `continue`. `break` stops the loop and exits it, `continue` goes to the next iteration.
While loops keep running until we use `break` or the condition is broken. We use the word `while`, then give a boolean, then a colon. Anything indented on the next lines is in the `while` loop.
```
i = 0
while i < 10:
print(i)
i += 1
```
The `+=` operator is the same as using `a = a + 1`.
We often use the `range` and `len` functions with loops. Here we loop through a list and get the index of each value in the list, then print each value:
```
for i in range(len(a_list)):
print(a_list[i])
```
We can also use the `zip` function to join two iterables together:
```
for i, j in zip(range(10), range(10, 20)):
print(i, j)
```
Try making a `for` loop to loop through a `range` from 5 to 10 and print out the numbers 5 to 10. It might help to look up the documentation for `range`.
## Conditionals
Along with loops, we can use conditions to branch our code. This is usually `if-elif-else` statements. We use comparisons or booleans to test and choose which branch to go down:
```
i = 10
if i == 10:
print('i is 10')
elif i > 10:
print('i is big')
else:
print('i is small')
```
We can use only the `if` by itself, or `if` with `else`, or include as many `elif`s as we want.
The comparison operators available are:
- `<` (less than)
- `>` (greater than)
- `==` (equal to)
- `>=` (greater than or equal to)
- `<=` (less than or equal to)
- `!=` (not equal)
- `is [not]` (equal, or not equal)
- `[not] in` (in, or not in)
Try making an if statement to check if the length of `a_list` is greater than 10, and print out something telling us about the length of the list:
## Packages and modules
A module is a Python file, like module.py. A package is a collection of modules, like pandas and numpy. We import them like so:
```
import math
```
If we want to install new packages, we can use `pip install pandas` or `conda install -c conda-forge pandas`. We can search github.com, pypi.org (pip), and anaconda.org (conda) for packages. We can run these commands from a terminal, or from within a jupyter notebook like `!pip install pandas`.
We can also import a module from a package:
```
from numpy import warnings
warnings
```
We can also import specific variables or functions from modules or packages:
```
from math import ceil
```
We can change the name of an import with aliases:
```
from math import ceil as c
```
As we saw, we can alias package names like so:
```
import numpy as np
```
Don't do a global import like this, it makes it hard to know where functions or variables came from (making the code harder to read and understand):
```
# don't do this!
from numpy import *
```
Import the function `allclose` from `numpy` and alias it as `ac`:
There is an easter egg in Python. Try running `import this`.
## Keywords and built-in functions
In Python there are several keywords and built-in functions. We shouldn't name variables, functions, or classes the same thing as these keywords. We already saw some of these, and you may notice they turn green in Jupyter Notebook or IPython. We are not using all of these properly here, but you can see they are turning green. The `None` object is a special one - if a function returns nothing and we store it in a variable, the variable will be `None`.
```
None
pass
continue
break
for
range
in
```
Here is a keyword list, and list of built-in functions in Python:
- https://www.programiz.com/python-programming/keyword-list
- https://docs.python.org/3/library/functions.html
## Getting help
When we come across an error, there are a few ways to start off to get help:
- ? or help() to check the documentation
- documentation through an internet search engine
- internet search engine with the error
For example, if we try to access a variable that doesn't exist, we get the error:
```
not_a_var
```
The top part of the error is the 'traceback' - it steps through the code used and all the modules/python files and functions. At the end of the error, there is something like `NameError` or another CamelCase error, a colon, then the error. Here, it is `NameError: name 'not_a_var' is not defined`. We can copy-paste this into a search engine which will help. Often it will take us to stackoverflow, which is a very helpful site for figuring out what's going on. We also may find answers on GitHub issues for packages.
## Coding Style
The book *Clean Code in Python 2nd Edition* by Mariano Anaya has several principles on best practices for coding. Remember that most likely you will be the next person to read your code, so you want to make it easy to understand later. This is especially important if you are working on a bigger project you will be working on for months or years. Some best practices are:
- naming variables, functions, and classes clearly so they are easy to understand
- variables and functions should be `snake_case`, classes should be `CamelCase`
- following PEP8 standards (https://www.python.org/dev/peps/pep-0008/, you can use the `autopep8` package to clean up your code if needed)
- using version control like Git with GitHub or GitLab
- breaking up redundant pieces of code into functions (DRY, do not repeat yourself)
- writing documentation for your functions
# The end!
|
github_jupyter
|
# a single-line comment
"""
a multi-
line
comment
"""
an_integer = 1
an_integer
type(an_integer)
str(an_integer)
a_float = 1.0
type(a_float)
5 * 2
5 % 2
5 // 2
5 / 2
import numpy as np
np.log(10)
import math
math.sqrt(9)
a_string = 'test string here'
a_string
a_bool = True
type(a_bool)
a_bytes = b'bytes object'
type(a_bytes)
a_list = [1, 2, 3, 4]
type(a_list)
a_list[0]
a_list[:2]
a_list[-3:-1]
# get every other element
a_list[::2]
a_list[::-1]
# adds 4 to the end of the list
a_list.append(4)
a_list
another_list = [[1, 2, 3], [5, 5, 5]]
another_list
a_list[0] = 10
a_list + another_list
a_tuple = tuple([1, 2, 3])
a_tuple
a_set = set(a_list)
a_set
another_set = {1, 2, 3, 3}
another_set
4 in a_set
a_dict = {'key1': 'value_1', 'key_2': 12}
a_dict
a_dict['another_key'] = 'hey'
a_dict
'key1' in a_dict
a_dict.keys()
a_dict.values()
a_dict.items()
import numpy as np
an_array = np.array([[1, 2, 3], [5, 5, 5]])
# indexing is [rows, columns]
an_array[:, 1] # get all rows and the second column
import pandas as pd
df = pd.DataFrame(data={'people': [5, 2, 3], 'revenue': [10, 1, 12]})
df
df['people']
type(df['people'])
type(df)
# get the length of something
len(a_list)
print(a_list)
sorted(a_list, reverse=True)
?len
help(len)
def test_function(a, b=12):
return a + b
result = test_function(50)
result
test_function(a=50)
df.shape
df.sort_values('people', ascending=False)
def scoping_example():
new_var = 123
return new_var
scoping_example()
new_var
for i in [1, 2, 3]:
print(i)
for i in [1, 2, 3]:
print(i)
break
i = 0
while i < 10:
print(i)
i += 1
for i in range(len(a_list)):
print(a_list[i])
for i, j in zip(range(10), range(10, 20)):
print(i, j)
i = 10
if i == 10:
print('i is 10')
elif i > 10:
print('i is big')
else:
print('i is small')
import math
from numpy import warnings
warnings
from math import ceil
from math import ceil as c
import numpy as np
# don't do this!
from numpy import *
None
pass
continue
break
for
range
in
not_a_var
| 0.238462 | 0.987935 |
# Question Answering on a Knowledge Graph
[](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial10_Knowledge_Graph.ipynb)
Haystack allows storing and querying knowledge graphs with the help of pre-trained models that translate text queries to SPARQL queries.
This tutorial demonstrates how to load an existing knowledge graph into haystack, load a pre-trained retriever, and execute text queries on the knowledge graph.
The training of models that translate text queries into SPARQL queries is currently not supported.
```
# Install the latest release of Haystack in your own environment
#! pip install farm-haystack
# Install the latest master of Haystack
!pip install --upgrade pip
!pip install git+https://github.com/deepset-ai/haystack.git#egg=farm-haystack[colab,graphdb]
# Here are some imports that we'll need
import subprocess
import time
from pathlib import Path
from haystack.nodes import Text2SparqlRetriever
from haystack.document_stores import GraphDBKnowledgeGraph
from haystack.utils import fetch_archive_from_http
```
## Downloading Knowledge Graph and Model
```
# Let's first fetch some triples that we want to store in our knowledge graph
# Here: exemplary triples from the wizarding world
graph_dir = "data/tutorial10"
s3_url = "https://fandom-qa.s3-eu-west-1.amazonaws.com/triples_and_config.zip"
fetch_archive_from_http(url=s3_url, output_dir=graph_dir)
# Fetch a pre-trained BART model that translates text queries to SPARQL queries
model_dir = "../saved_models/tutorial10_knowledge_graph/"
s3_url = "https://fandom-qa.s3-eu-west-1.amazonaws.com/saved_models/hp_v3.4.zip"
fetch_archive_from_http(url=s3_url, output_dir=model_dir)
```
## Launching a GraphDB instance
```
# Unfortunately, there seems to be no good way to run GraphDB in colab environments
# In your local environment, you could start a GraphDB server with docker
# Feel free to check GraphDB's website for the free version https://www.ontotext.com/products/graphdb/graphdb-free/
print("Starting GraphDB ...")
status = subprocess.run(
[
"docker run -d -p 7200:7200 --name graphdb-instance-tutorial docker-registry.ontotext.com/graphdb-free:9.4.1-adoptopenjdk11"
],
shell=True,
)
if status.returncode:
raise Exception(
"Failed to launch GraphDB. Maybe it is already running or you already have a container with that name that you could start?"
)
time.sleep(5)
```
## Creating a new GraphDB repository (also known as index in haystack's document stores)
```
# Initialize a knowledge graph connected to GraphDB and use "tutorial_10_index" as the name of the index
kg = GraphDBKnowledgeGraph(index="tutorial_10_index")
# Delete the index as it might have been already created in previous runs
kg.delete_index()
# Create the index based on a configuration file
kg.create_index(config_path=Path(graph_dir + "repo-config.ttl"))
# Import triples of subject, predicate, and object statements from a ttl file
kg.import_from_ttl_file(index="tutorial_10_index", path=Path(graph_dir + "triples.ttl"))
print(f"The last triple stored in the knowledge graph is: {kg.get_all_triples()[-1]}")
print(f"There are {len(kg.get_all_triples())} triples stored in the knowledge graph.")
# Define prefixes for names of resources so that we can use shorter resource names in queries
prefixes = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX hp: <https://deepset.ai/harry_potter/>
"""
kg.prefixes = prefixes
# Load a pre-trained model that translates text queries to SPARQL queries
kgqa_retriever = Text2SparqlRetriever(knowledge_graph=kg, model_name_or_path=model_dir + "hp_v3.4")
```
## Query Execution
We can now ask questions that will be answered by our knowledge graph!
One limitation though: our pre-trained model can only generate questions about resources it has seen during training.
Otherwise, it cannot translate the name of the resource to the identifier used in the knowledge graph.
E.g. "Harry" -> "hp:Harry_potter"
```
query = "In which house is Harry Potter?"
print(f'Translating the text query "{query}" to a SPARQL query and executing it on the knowledge graph...')
result = kgqa_retriever.retrieve(query=query)
print(result)
# Correct SPARQL query: select ?a { hp:Harry_potter hp:house ?a . }
# Correct answer: Gryffindor
print("Executing a SPARQL query with prefixed names of resources...")
result = kgqa_retriever._query_kg(
sparql_query="select distinct ?sbj where { ?sbj hp:job hp:Keeper_of_keys_and_grounds . }"
)
print(result)
# Paraphrased question: Who is the keeper of keys and grounds?
# Correct answer: Rubeus Hagrid
print("Executing a SPARQL query with full names of resources...")
result = kgqa_retriever._query_kg(
sparql_query="select distinct ?obj where { <https://deepset.ai/harry_potter/Hermione_granger> <https://deepset.ai/harry_potter/patronus> ?obj . }"
)
print(result)
# Paraphrased question: What is the patronus of Hermione?
# Correct answer: Otter
```
## About us
This [Haystack](https://github.com/deepset-ai/haystack/) notebook was made with love by [deepset](https://deepset.ai/) in Berlin, Germany
We bring NLP to the industry via open source!
Our focus: Industry specific language models & large scale QA systems.
Some of our other work:
- [German BERT](https://deepset.ai/german-bert)
- [GermanQuAD and GermanDPR](https://deepset.ai/germanquad)
- [FARM](https://github.com/deepset-ai/FARM)
Get in touch:
[Twitter](https://twitter.com/deepset_ai) | [LinkedIn](https://www.linkedin.com/company/deepset-ai/) | [Slack](https://haystack.deepset.ai/community/join) | [GitHub Discussions](https://github.com/deepset-ai/haystack/discussions) | [Website](https://deepset.ai)
By the way: [we're hiring!](https://www.deepset.ai/jobs)
|
github_jupyter
|
# Install the latest release of Haystack in your own environment
#! pip install farm-haystack
# Install the latest master of Haystack
!pip install --upgrade pip
!pip install git+https://github.com/deepset-ai/haystack.git#egg=farm-haystack[colab,graphdb]
# Here are some imports that we'll need
import subprocess
import time
from pathlib import Path
from haystack.nodes import Text2SparqlRetriever
from haystack.document_stores import GraphDBKnowledgeGraph
from haystack.utils import fetch_archive_from_http
# Let's first fetch some triples that we want to store in our knowledge graph
# Here: exemplary triples from the wizarding world
graph_dir = "data/tutorial10"
s3_url = "https://fandom-qa.s3-eu-west-1.amazonaws.com/triples_and_config.zip"
fetch_archive_from_http(url=s3_url, output_dir=graph_dir)
# Fetch a pre-trained BART model that translates text queries to SPARQL queries
model_dir = "../saved_models/tutorial10_knowledge_graph/"
s3_url = "https://fandom-qa.s3-eu-west-1.amazonaws.com/saved_models/hp_v3.4.zip"
fetch_archive_from_http(url=s3_url, output_dir=model_dir)
# Unfortunately, there seems to be no good way to run GraphDB in colab environments
# In your local environment, you could start a GraphDB server with docker
# Feel free to check GraphDB's website for the free version https://www.ontotext.com/products/graphdb/graphdb-free/
print("Starting GraphDB ...")
status = subprocess.run(
[
"docker run -d -p 7200:7200 --name graphdb-instance-tutorial docker-registry.ontotext.com/graphdb-free:9.4.1-adoptopenjdk11"
],
shell=True,
)
if status.returncode:
raise Exception(
"Failed to launch GraphDB. Maybe it is already running or you already have a container with that name that you could start?"
)
time.sleep(5)
# Initialize a knowledge graph connected to GraphDB and use "tutorial_10_index" as the name of the index
kg = GraphDBKnowledgeGraph(index="tutorial_10_index")
# Delete the index as it might have been already created in previous runs
kg.delete_index()
# Create the index based on a configuration file
kg.create_index(config_path=Path(graph_dir + "repo-config.ttl"))
# Import triples of subject, predicate, and object statements from a ttl file
kg.import_from_ttl_file(index="tutorial_10_index", path=Path(graph_dir + "triples.ttl"))
print(f"The last triple stored in the knowledge graph is: {kg.get_all_triples()[-1]}")
print(f"There are {len(kg.get_all_triples())} triples stored in the knowledge graph.")
# Define prefixes for names of resources so that we can use shorter resource names in queries
prefixes = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX hp: <https://deepset.ai/harry_potter/>
"""
kg.prefixes = prefixes
# Load a pre-trained model that translates text queries to SPARQL queries
kgqa_retriever = Text2SparqlRetriever(knowledge_graph=kg, model_name_or_path=model_dir + "hp_v3.4")
query = "In which house is Harry Potter?"
print(f'Translating the text query "{query}" to a SPARQL query and executing it on the knowledge graph...')
result = kgqa_retriever.retrieve(query=query)
print(result)
# Correct SPARQL query: select ?a { hp:Harry_potter hp:house ?a . }
# Correct answer: Gryffindor
print("Executing a SPARQL query with prefixed names of resources...")
result = kgqa_retriever._query_kg(
sparql_query="select distinct ?sbj where { ?sbj hp:job hp:Keeper_of_keys_and_grounds . }"
)
print(result)
# Paraphrased question: Who is the keeper of keys and grounds?
# Correct answer: Rubeus Hagrid
print("Executing a SPARQL query with full names of resources...")
result = kgqa_retriever._query_kg(
sparql_query="select distinct ?obj where { <https://deepset.ai/harry_potter/Hermione_granger> <https://deepset.ai/harry_potter/patronus> ?obj . }"
)
print(result)
# Paraphrased question: What is the patronus of Hermione?
# Correct answer: Otter
| 0.66454 | 0.922203 |
## Convert Open Forcefield System to AMBER and GROMACS input files
The Open Forcefield Toolkit can create parametrized `System` objects that can be natively simulated with OpenMM. This example shows how you can convert a `System` into AMBER prmtop/inpcrd and GROMACS top/gro input files through the ParmEd library.
### Create an OpenMM System
We start by loading a PDB file containing one copy of ethanol and cyclohexane. Our goal is to create an OFF `Topology` object describing this system that we can parametrize with the SMIRNOFF-format "Parsley" force field.
The two `Molecule` objects created from the SMILES strings can contain information such as partial charges and stereochemistry that is not included in an OpenMM topology. In this example, partial charges are not explicitly given, and `ForceField` will assign AM1/BCC charges as specified by the "Parsley" force field.
```
from simtk.openmm.app import PDBFile
from openff.toolkit.topology import Molecule, Topology
from openff.toolkit.typing.engines.smirnoff import ForceField
ethanol = Molecule.from_smiles("CCO")
cyclohexane = Molecule.from_smiles("C1CCCCC1")
# Obtain the OpenMM Topology object from the PDB file.
pdbfile = PDBFile('1_cyclohexane_1_ethanol.pdb')
omm_topology = pdbfile.topology
# Create the Open Forcefield Topology.
off_topology = Topology.from_openmm(omm_topology, unique_molecules=[ethanol, cyclohexane])
```
Now we parametrize the OFF `Topology` to create an OpenMM `System`. Since ParmEd will run with the `constraints=HBonds` keyword later, we use the _unconstrained_ version of the Parsley force field here.
```
# Load the "Parsley" force field.
forcefield = ForceField('openff_unconstrained-1.0.0.offxml')
omm_system = forcefield.create_openmm_system(off_topology)
```
### Convert OpenMM System to AMBER and GROMACS files
First, we convert the OpenMM `System` into a ParmEd `Structure`. We'll use the atom positions in the PDB to create the coordinate files.
<div class="alert alert-block alert-warning">
<b>Warning:</b> ParmEd's Structure model is inspired by AMBER, and some information in an OpenMM System are not directly translatable into a Structure. In particular, as of today (4/2/2019), long-range interaction treatment method (e.g., PME, CutoffPeriodic) and parameters (e.g., cutoff and cutoff switching distance, PME error tolerance) are known to be lost during the conversion.
</div>
```
import parmed
# Convert OpenMM System to a ParmEd structure.
parmed_structure = parmed.openmm.load_topology(omm_topology, omm_system, pdbfile.positions)
```
We can then use ParmEd to convert an OpenMM `System` to prmtop/inpcrd or top/gro files that can be read by AMBER and GROMACS respectively. ParmEd is capable of converting parametrized files to other formats as well. For further information, see ParmEd's documentation: https://parmed.github.io/ParmEd/html/readwrite.html .
```
# Export AMBER files.
parmed_structure.save('system.prmtop', overwrite=True)
parmed_structure.save('system.inpcrd', overwrite=True)
# Export GROMACS files.
parmed_structure.save('system.top', overwrite=True)
parmed_structure.save('system.gro', overwrite=True)
```
### Validate the conversion
ParmEd is generally a reliable and robust library, but we can easily check that everything went as expected during the conversion by loading the exported files into an OpenMM `System` and comparing it with the original. Note that you'll have to specify the correct nonbonded method and cutoff settings for the energy comparison to make sense since they are not included in the AMBER prmtop (or GROMACS top/gro) files.
```
from simtk import openmm
for force in omm_system.getForces():
if isinstance(force, openmm.NonbondedForce):
break
print(force.getCutoffDistance())
print(force.getUseSwitchingFunction())
print(force.getNonbondedMethod() == openmm.NonbondedForce.PME)
from simtk import unit
from simtk.openmm.app import PME, HBonds
from openff.toolkit.tests.utils import compare_system_parameters, compare_system_energies
# Load the prmtop/inpcrd files into a ParmEd Structure.as an OpenMM System object.
amber_structure = parmed.load_file('system.prmtop', 'system.inpcrd')
# Convert the Structure to an OpenMM System. Note that by
# default ParmEd will add a CMMotionRemover force to the
# System, and won't constrain the hydrogen bonds.
amber_system = amber_structure.createSystem(nonbondedMethod=PME,
nonbondedCutoff=9.0*unit.angstrom,
switchDistance=0.0*unit.angstrom,
constraints=HBonds,
removeCMMotion=False)
# Compare the parameters of the original and converted Systems.
# This raises FailedParameterComparisonError if the comparison fails.
compare_system_parameters(omm_system, amber_system)
# Compare the energies by force.
# This raises FailedEnergyComparisonError if the comparison fails.
amber_energies, omm_energies = compare_system_energies(
amber_system, omm_system, amber_structure.positions, amber_structure.box_vectors,
rtol=1e-3)
# Pretty-print the energies by component.
from pprint import pprint
print('System loaded from AMBER files:')
print('-------------------------------')
pprint(amber_energies)
print('\nOriginal OpenMM System:')
print('-----------------------')
pprint(omm_energies)
```
|
github_jupyter
|
from simtk.openmm.app import PDBFile
from openff.toolkit.topology import Molecule, Topology
from openff.toolkit.typing.engines.smirnoff import ForceField
ethanol = Molecule.from_smiles("CCO")
cyclohexane = Molecule.from_smiles("C1CCCCC1")
# Obtain the OpenMM Topology object from the PDB file.
pdbfile = PDBFile('1_cyclohexane_1_ethanol.pdb')
omm_topology = pdbfile.topology
# Create the Open Forcefield Topology.
off_topology = Topology.from_openmm(omm_topology, unique_molecules=[ethanol, cyclohexane])
# Load the "Parsley" force field.
forcefield = ForceField('openff_unconstrained-1.0.0.offxml')
omm_system = forcefield.create_openmm_system(off_topology)
import parmed
# Convert OpenMM System to a ParmEd structure.
parmed_structure = parmed.openmm.load_topology(omm_topology, omm_system, pdbfile.positions)
# Export AMBER files.
parmed_structure.save('system.prmtop', overwrite=True)
parmed_structure.save('system.inpcrd', overwrite=True)
# Export GROMACS files.
parmed_structure.save('system.top', overwrite=True)
parmed_structure.save('system.gro', overwrite=True)
from simtk import openmm
for force in omm_system.getForces():
if isinstance(force, openmm.NonbondedForce):
break
print(force.getCutoffDistance())
print(force.getUseSwitchingFunction())
print(force.getNonbondedMethod() == openmm.NonbondedForce.PME)
from simtk import unit
from simtk.openmm.app import PME, HBonds
from openff.toolkit.tests.utils import compare_system_parameters, compare_system_energies
# Load the prmtop/inpcrd files into a ParmEd Structure.as an OpenMM System object.
amber_structure = parmed.load_file('system.prmtop', 'system.inpcrd')
# Convert the Structure to an OpenMM System. Note that by
# default ParmEd will add a CMMotionRemover force to the
# System, and won't constrain the hydrogen bonds.
amber_system = amber_structure.createSystem(nonbondedMethod=PME,
nonbondedCutoff=9.0*unit.angstrom,
switchDistance=0.0*unit.angstrom,
constraints=HBonds,
removeCMMotion=False)
# Compare the parameters of the original and converted Systems.
# This raises FailedParameterComparisonError if the comparison fails.
compare_system_parameters(omm_system, amber_system)
# Compare the energies by force.
# This raises FailedEnergyComparisonError if the comparison fails.
amber_energies, omm_energies = compare_system_energies(
amber_system, omm_system, amber_structure.positions, amber_structure.box_vectors,
rtol=1e-3)
# Pretty-print the energies by component.
from pprint import pprint
print('System loaded from AMBER files:')
print('-------------------------------')
pprint(amber_energies)
print('\nOriginal OpenMM System:')
print('-----------------------')
pprint(omm_energies)
| 0.695752 | 0.909947 |
This notebook is adapted from a lesson from the 2019 [KIPAC/StatisticalMethods course](https://github.com/KIPAC/StatisticalMethods), (c) 2019 Adam Mantz and Phil Marshall, licensed under the GPLv2.
# Evaluating Models
Goals:
* Be able to design and carry out tests of model adequacy (goodness of fit), and comparisons between models
* Understand and be prepared to use the Bayesian Evidence
## Preamble
You can't do inference without making assumptions.
$\longrightarrow$ We must _test_ the hypotheses defined by our models.
Three related but distinct questions come under the heading of **model evaluation**.
1. Does a model describe (fit) the data well?
2. Does a model make accurate predictions about new data?
3. How probable are our competing models in light of the data?
Often (2) and (3) are directly related to **model comparison** or **selection**.
Throughout this (and always!), **"model" means a complete generative model**.
That is, a "model" includes the specification of a prior.
**A familiar example:** imagine we have a data set like this
<img src="graphics/modelcheck-data.png" width=50%>
Specifically,
* we have precisely known $x$ values
* we have precisely known, Gaussian errors on $y$
* we're fitting a linear model, $\bar{y}(x)=b+mx$
Visual comparison of models drawn from the posterior with the data:
<table>
<tr>
<td><img src="graphics/modelcheck-linear-posterior.png" width=90%></td>
<td></td>
<td><img src="graphics/modelcheck-linear.png" width=90%></td>
</tr>
</table>
### Brainstorm
How might we decide whether our model adequately explains how the data were generated?
In this case, the likelihood is $\propto e^{-\chi^2/2}$.
So is the posterior, given uniform priors on $m$ and $b$.
## Classical Hypothesis Testing
Assuming this model (line plus Gaussian errors) is correct, the distribution over data sets of $\hat{\chi}^2$ must follow a $\chi^2_\nu$ distribution, where
* $\hat{\chi}^2$ is the best-fit $\chi^2$ over parameters for a given data set
* the number of degrees of freedom $\nu=N_\mathrm{data}-N_\mathrm{params}$
Hence, the classical $\chi^2$ test looks at whether $\hat{\chi}^2$ is consistent with this distribution. If not, it's unlikely that our data came from the assumed model.
In this case, the value of $\hat{\chi}^2\approx104$ doesn't look good in light of the expectation.
<img src="graphics/modelcheck-chisq.png" width=50%>
The probability $P(\chi^2\geq\hat{\chi}^2|\nu)$ ($\sim10^{-10}$ in this case) is called the **$p$-value** or **significance**.
* If the "null hypothesis" (our assumed model, with fitted parameters $[\hat{m},\hat{b}]$) is true, we expect the fraction of hypothetical new datasets to have $\chi^2$ values greater than $\hat{\chi}^2$ to be $p$.
The $p$-value is _not_ the probability of the model $(m,b)$ being true. Like the sampling distribution from which it is derived, it characterizes the probability of getting the data given the assumed model and its estimated parameters.
The result of a classical hypothesis test is of the following form:
_"We reject the null hypothesis at the $p$ significance level"_
(i.e. on the grounds that it inadequately predicts the data.)
### Practical Chi-squared Testing
* We can compute the p-value assuming a chi-squared distribution using `scipy.stats`:
```python
import scipy.stats
chisq = scipy.stats.chi2(Ndof)
pvalue = chisq.sf(chisq_min)
```
* The "reduced chi-squared", $\hat{\chi}^2_{R} = \hat{\chi}^2 / N_{\rm dof}$, is often used by astronomers to quantify goodness of fit - but note that you need to know the number of degrees of freedom separately from $\hat{\chi}^2$ to be able to interpret it.
* A useful, quick way to make sense of $\hat{\chi}^2$ and $N_{\rm dof}$ values is to use **Fisher's Gaussian approximation to the chi-squared distribution**:
$\;\;\;\;\;\sqrt{2\hat{\chi}^2} \sim \mathcal{N}\left( \sqrt{2 N_{\rm dof}-1}, 1 \right)$ (approximately)
$\longrightarrow$ The difference between $\sqrt{2\hat{\chi}^2}$ and $\sqrt{2 N_{\rm dof}-1}$ is the "number of sigma" ($n_{\sigma}$) we are away from a good fit.
> In our case, the MLE model is about 7-sigma away from being a good fit.
## Bayesian Hypothesis Testing
* In general our likelihood won't have nice, analytic properties.
* We will want to evaluate the success of our model at explaining the data taking our uncertainty in the model parameters into account.
We can construct analogous hypothesis tests by _simulating many "replica" data sets realized from the posterior distribution,_ and
comparing the observed data with the replica data via a suitable summary "test statistic", and its **"posterior predictive distribution"**.
We are free to design our test statistic to focus on the aspect of the data that we want the model to fit well.
**Posterior predictive model checking** - logic:
* If our model is the true one, then *replica* data generated by it should "look like" the one dataset we have.
* This means that any *summary* $T$ of both the real dataset, $T(d)$, and the replica datasets, $T(d^{\rm rep})$, should follow the same distribution over noise realizations _and_ model parameters.
* If the real dataset was not generated with our model, then its summary may be an _outlier_ from the distribution of summaries of replica datasets.
Note the similarity to the logic of the classical hypothesis test. The difference is that the Bayesian replica datasets were generated with plausible values of the parameters (drawn from the posterior PDF), while all the hypothetical datasets in frequentism (each with their own $\hat{\chi}^2$) are drawn using the same model parameters (the estimated ones).
Example test statistic: Pearson Correlation $r_{12}$
* Focuses on the tightness of linear correlation between $x$ and $y$
* $T(d) = r_{12} = \frac{\sum_i (x_i - \bar{x})(y_i - \bar{y})}{\left[ \sum_i (x_i - \bar{x})^2 \sum_i (y_i - \bar{y})^2 \right]^{1/2}}$
For each one of many posterior samples, we draw a _replica dataset_ from the sampling distribution given the sample parameter vector, and compute $T(d^{\rm rep})$, building up a histogram of $T$.
${\rm P}[T(d^{\rm rep})>T(d)\,|\,d] = 99.43\%$ - our dataset $d$ is clearly an outlier.
<img src="graphics/modelcheck-linear-TS.png" width=50%>
* The posterior predictive probability distribution for the test statistic $T(d)$ generated by sampling in this way is marginalized over both parameters and (replica) datasets.
* It takes into account both the uncertainty in the data (captured by the sampling distribution) _and_ the uncertainty in the parameters (propagated from our one dataset and our prior knowledge during posterior sampling).
* Posterior predictive model checking can be seen as the Bayesian extension of classical hypothesis testing, and is a useful test of _model adequacy_.
* As with classical hypothesis testing, a model can be discarded (or retained) on the basis of a posterior predictive model check.
* Note that we did not have to make any approximations in order to use a standard distribution for our summary $T$: _we just used the posterior PDF we already had_.
Test statistics $T(d,\theta)$ that are functions of both the data and the parameters are called **discrepancy measures**.
The maximum log-likelihood is a common example.
Discrepancy measure: $T = \hat{\chi}^2$; ${\rm Pr}(T(d^{\rm rep},\theta)>T(d,\theta)\,|\,d) \approx 0.0$
<img src="graphics/modelcheck-linear-discrepancy.png" width=50%>
Any way we look at it, it's unlikely that we'd conclude the linear model explains these data adequately. How do we choose an alternative?
One way to compare the fitness of models is to look at question (2) in model evaluation: **How accurately do they predict new data?**
## Generalized Predictive Accuracy and "Information Criteria"
* We typically want a fit that works well with any *potential* data set, rather than just reproducing the one we have.
* In general, this means an "Occam's Razor"-like penalty for complexity should be involved (to avoid focusing on models that "over-fit" the data).
In our example, we might add a quadratic term to the model: $y = b + m x + q x^2$. How do we quantify the improvement?
<table><tr>
<td><img src="graphics/modelcheck-quadratic.png" width=80%></td>
<td><img src="graphics/modelcheck-quadratic-discrepancy.png" width=80%></td>
</tr></table>
The gold standard for testing predictive accuracy is to _get more data_.
Short of that, the best option is **cross-validation**: fitting a model on many random subsets of the data and seeing how well it describes the complementary "out of sample" subsets.
> This method is ubiquitous in machine learning, where accurate out-of-sample prediction is usually the goal.
Short of exhaustive cross-validation, a number of **information criteria** exist that (asymptotically) relate to generalized predictive accuracy.
These have the advantage of being relatively quick to calculate from the results of a fit - either an MLE or a set of posterior samples - and include a penalty for models with greater freedom.
Some information criteria:
* Akaike information criterion (AIC)
* Deviance information criterion (DIC)
* Watanabe-Akaike information criterion (WAIC)
The DIC has the advantage of being compatible with Bayesian analysis (unlike AIC), and not requiring the data to be cleanly separable into conditionally independent subsets (unlike WAIC).
$\mathrm{DIC} = \langle D(\theta) \rangle + 2p_D; \quad p_D = \langle D(\theta) \rangle - D(\langle\theta\rangle)$
where $D(\theta)=-2\log P(\mathrm{data}|\theta)$ and averages $\langle\rangle$ are over the posterior.
$p_D$ is an _effective number of free parameters_, i.e. the number of parameters primarily constrained by the data rather than by their priors.
The DIC thus doesn't necessarily count unconstrained nuisance parameters used to marginalize out systematics as "added complexity".
Note that for all of these information criteria, a **lower** value is preferable (larger likelihood and/or less model complexity).
A somewhat motivated scale for interpreting differences in IC exists (named for Jeffreys):
<table style='font-size:100%'>
<thead><td>$$e^{(\mathrm{IC}_1-\mathrm{IC}_2)/2}$$</td><td>Strength of evidence for model 2</td></thead>
<tr><td> $<1$ </td><td> Negative </td></tr>
<tr><td> $1$-$3$ </td><td> Barely worth mentioning </td></tr>
<tr><td> $3$-$10$ </td><td> Substantial </td></tr>
<tr><td> $10$-$30$ </td><td> Strong </td></tr>
<tr><td> $30$-$100$ </td><td> Very strong </td></tr>
<tr><td> $>100$ </td><td> Decisive </td></tr>
</table>
### Exercise: Priors and the DIC
Say our model has 1 parameter, $\theta$, and the likelihood is a unit width Gaussian centered on $\theta=0$ with peak value $L_{\rm max}$.
For each of the priors on $\theta$ below, (a) sketch the likelihood and prior as a function of theta, (b) roughly approximate the DIC and $p_D$ for that model (just well enough for a qualitative comparison between the models).
1. $P(\theta|H_1)$ uniform on $[-1,+1]$
2. $P(\theta|H_2)$ uniform on $[-100,+100]$
3. $P(\theta|H_3)$ uniform on $[+3,+5]$
Recall: $\mathrm{DIC} = \langle D(\theta) \rangle + 2p_D; \quad p_D = \langle D(\theta) \rangle - D(\langle\theta\rangle)$
```
import numpy as np
import scipy.stats as st
def DIC_thingy(lower, upper):
y = st.truncnorm.rvs(lower, upper, size=100000)
av_of_D = np.mean(-2.0*st.norm.logpdf(y))
D_of_av = -2.0*st.norm.logpdf( np.mean(y) )
pD = av_of_D - D_of_av
DIC = av_of_D + 2*pD
return av_of_D, D_of_av, pD, DIC
print(DIC_thingy(-1.0, 1.0))
print(DIC_thingy(-100.0, 100.0))
print(DIC_thingy(3.0, 5.0))
```
**DIC exercise: notes**
1) Models that are less prescriptive (in terms of their priors) are penalized in the DIC.
2) However, there is a limit to this penalty. As the prior becomes less prescriptive, we get the penalty associated with "another free parameter", and that's it.
3) Sufficiently large improvements to the likelihood will overcome this.
How about the third question - **How probable are our competing models in the light of the data?**
* This question cannot be asked in classical statistics - where only data have probability distributions.
* Bayes theorem gives us a framework for assessing relative model probabilities which naturally includes Occam's razor.
## Bayesian Model Comparison
Inference on parameters $\theta$ given model $H$:
$P(\theta|D,H)=\frac{P(D|\theta,H)P(\theta|H)}{P(D|H)}$
Inference on models $H$:
$P(H|D,\Omega)=\frac{P(D|H,\Omega)P(H|\Omega)}{P(D|\Omega)}$
> NB. $H$ is a list of all of our assumptions - including our prior PDF assignments.
Here $\Omega$ is some space of all allowed models. As we normally do for parameter inference, we'll work with a simplified version:
$P(H|D)\propto P(D|H)P(H)$
$P(H)$ is a prior on the model, and
$P(D|H)=\int P(D|\theta,H) \, P(\theta|H) d\theta$
is the **evidence** - the normalizing denominator in Bayesian parameter inference (also known as the **fully marginalized likelihood**).
Ideally, we would compare models by looking at
$\frac{P(H_2|D)}{P(H_1|D)}=\frac{P(D|H_2)\,P(H_2)}{P(D|H_1)\,P(H_1)}$
General difficulties in computing the terms in this ratio:
* Assigning meaningful priors to models
* Assigning meaningful priors to parameters
* Calculating the evidence integral
### Exercise: Priors and the evidence
Say we have a model with 1 parameter, $\theta$, and a likelihood that works out to be a unit width Gaussian centered on $\theta=0$ with peak value $L_{\rm max}$.
For each of the priors on $\theta$ below, (a) sketch the likelihood and prior as a function of theta, (b) roughly approximate the evidence for that model (just well enough for a qualitative comparison between the models).
1. $P(\theta|H_1)$ uniform on $[-1,+1]$
2. $P(\theta|H_2)$ uniform on $[-100,+100]$
3. $P(\theta|H_3)$ uniform on $[+3,+5]$
Recall: $P(D|H)=\int P(D|\theta,H) \, P(\theta|H) d\theta$
```
def Evidence_thingy(lower, upper):
return (st.norm.cdf(upper) - st.norm.cdf(lower)) / (upper - lower)
print(Evidence_thingy(-1.0, 1.0))
print(Evidence_thingy(-100.0, 100.0))
print(Evidence_thingy(3.0, 5.0))
```
**Evidence exercise: notes**
1) Models that are less prescriptive (in terms of their priors) are penalized in the evidence. This is a feature, although it means we need to put real thought into those priors.
2) The evidence can be made arbitrarily small by increasing the prior volume: comparing evidences is more conservative than focusing on the goodness of fit ($L_{\rm max}$) alone.
3) The evidence is linearly sensitive to prior volume, but exponentially sensitive to goodness of fit ($L_{\rm max} \propto e^{-\hat{\chi}^2/2}$). It's still a likelihood, after all.
The evidence for model $H$, $P(D\,|\,H)$, enables a form of Bayesian hypothesis testing: model comparison with the "evidence ratio" or "odds ratio" or "Bayes Factor" $R$
$R = \frac{P(D|H_2)}{P(D|H_1)}$
$R$ is a *fully marginalized likelihood ratio* - which is to say that it *takes into account our uncertainty about values of the parameters of each model by integrating over all plausible values of them.*
Notice that if your two models are equally probable _a priori_, then
$\frac{P(H_2)}{P(H_1)} = 1$ such that $\frac{P(H_2|D)}{P(H_1|D)} = R$
This assumption is often not always easy to justify, but it makes $R$ easy to interpret: its just the ratio of model probabilities in our ideal comparison.
A more practical way to interpret the Bayes factor is to note that it updates the model prior ratio into a posterior one. This means that:
* If you believe, despite having seen the data and computed $R$, that your two models are *still equally probable,*
* then $R$ gives _the odds that you would have had to have been willing to take against $H_2$, before seeing the data._
In our linear model fit example, we can compute the evidence for the linear and quadratic models, and form the odds ratio $R$.
```
log Evidence for Straight Line Model: -157.2
log Evidence for Quadratic Model: -120.7
Evidence ratio in favour of the Quadratic Model:
7e15 to 1
```
The 26 unit difference in log evidence between the two models translates to a _huge_ odds ratio in favour of the quadratic model.
Incidentally those data did not come from *either* a linear or quadratic model...
The same Jeffreys scale used to interpret the information criteria can be used to interpret evidence ratios:
<table style='font-size:100%'>
<thead><td>$R$</td><td>Strength of evidence for model 2</td></thead>
<tr><td> $<1$ </td><td> Negative </td></tr>
<tr><td> 1-3 </td><td> Barely worth mentioning </td></tr>
<tr><td> 3-10 </td><td> Substantial </td></tr>
<tr><td> 10-30 </td><td> Strong </td></tr>
<tr><td> 30-100 </td><td> Very strong </td></tr>
<tr><td> $>100$ </td><td> Decisive </td></tr>
</table>
> The Bayesian Information Criterion (BIC) is an approximation of $R$ (assuming $N$ datapoints greatly outnumber $k$ parameters, and the priors are uninformative).
**Calculating the evidence**
Estimates directly calculated from Markov chains produced for parameter inference are generally not reliable.
Good methods include nested sampling (e.g. [MultiNest](https://github.com/JohannesBuchner/PyMultiNest)) and parallel tempering / thermodynamc integration (e.g. [emcee](http://dan.iel.fm/emcee/current/)).
**Bayesian Evidence: closing thoughts**
* The Bayesian evidence is *qualitatively different* from other model assessments. While they focus primarily on *prediction accuracy,* the evidence is the way in which information from the prior PDF propagates through into our posterior beliefs about the model as a whole.
* There are no inherent mathematical limitations to its use, in contrast to various other hypothesis tests that are only valid under certain assumptions (such as the models being nested, e.g. the classical $F$ test for comparing $\chi^2$ values). _Any two models can be compared and the odds ratio computed._
### Model Evaluation Summary
1. Does a model describe (fit) the data well?
> Posterior predictive model checks (visual, test stats, discrepancy measures)
2. Does a model make accurate predictions about new data?
> Cross validation; information criteria to quantify generalized predictive accuracy
3. How probable are our competing models in light of the data?
> Bayesian Evidence ratios ("Bayes factors")
|
github_jupyter
|
import scipy.stats
chisq = scipy.stats.chi2(Ndof)
pvalue = chisq.sf(chisq_min)
import numpy as np
import scipy.stats as st
def DIC_thingy(lower, upper):
y = st.truncnorm.rvs(lower, upper, size=100000)
av_of_D = np.mean(-2.0*st.norm.logpdf(y))
D_of_av = -2.0*st.norm.logpdf( np.mean(y) )
pD = av_of_D - D_of_av
DIC = av_of_D + 2*pD
return av_of_D, D_of_av, pD, DIC
print(DIC_thingy(-1.0, 1.0))
print(DIC_thingy(-100.0, 100.0))
print(DIC_thingy(3.0, 5.0))
def Evidence_thingy(lower, upper):
return (st.norm.cdf(upper) - st.norm.cdf(lower)) / (upper - lower)
print(Evidence_thingy(-1.0, 1.0))
print(Evidence_thingy(-100.0, 100.0))
print(Evidence_thingy(3.0, 5.0))
log Evidence for Straight Line Model: -157.2
log Evidence for Quadratic Model: -120.7
Evidence ratio in favour of the Quadratic Model:
7e15 to 1
| 0.409339 | 0.995906 |
# Using Convolutional Neural Networks
Welcome to the first week of the first deep learning certificate! We're going to use convolutional neural networks (CNNs) to allow our computer to see - something that is only possible thanks to deep learning.
## Introduction to this week's task: 'Dogs vs Cats'
We're going to try to create a model to enter the [Dogs vs Cats](https://www.kaggle.com/c/dogs-vs-cats) competition at Kaggle. There are 25,000 labelled dog and cat photos available for training, and 12,500 in the test set that we have to try to label for this competition. According to the Kaggle web-site, when this competition was launched (end of 2013): *"**State of the art**: The current literature suggests machine classifiers can score above 80% accuracy on this task"*. So if we can beat 80%, then we will be at the cutting edge as of 2013!
## Basic setup
There isn't too much to do to get started - just a few simple configuration steps.
This shows plots in the web page itself - we always wants to use this when using jupyter notebook:
```
#!pip install 'keras<2'
#!pip install 'h5py'
%matplotlib inline
```
Define path to data: (It's a good idea to put it in a subdirectory of your notebooks folder, and then exclude that directory from git control by adding it to .gitignore.)
```
%cd fastai_courses/deeplearning1/nbs
# path = "data/dogscats/"
path = "data/dogscats/"
```
A few basic libraries that we'll need for the initial exercises:
```
from __future__ import division,print_function
import os, json
from glob import glob
import numpy as np
np.set_printoptions(precision=4, linewidth=100)
from matplotlib import pyplot as plt
```
We have created a file most imaginatively called 'utils.py' to store any little convenience functions we'll want to use. We will discuss these as we use them.
```
import utils; reload(utils)
from utils import plots
```
# Use a pretrained VGG model with our **Vgg16** class
Our first step is simply to use a model that has been fully created for us, which can recognise a wide variety (1,000 categories) of images. We will use 'VGG', which won the 2014 Imagenet competition, and is a very simple model to create and understand. The VGG Imagenet team created both a larger, slower, slightly more accurate model (*VGG 19*) and a smaller, faster model (*VGG 16*). We will be using VGG 16 since the much slower performance of VGG19 is generally not worth the very minor improvement in accuracy.
We have created a python class, *Vgg16*, which makes using the VGG 16 model very straightforward.
## The punchline: state of the art custom model in 7 lines of code
Here's everything you need to do to get >97% accuracy on the Dogs vs Cats dataset - we won't analyze how it works behind the scenes yet, since at this stage we're just going to focus on the minimum necessary to actually do useful work.
```
# As large as you can, but no larger than 64 is recommended.
# If you have an older or cheaper GPU, you'll run out of memory, so will have to decrease this.
batch_size=8
# Import our class, and instantiate
import vgg16; reload(vgg16)
from vgg16 import Vgg16
vgg = Vgg16()
# Grab a few images at a time for training and validation.
# NB: They must be in subdirectories named based on their category
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size*2)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
```
The code above will work for any image recognition task, with any number of categories! All you have to do is to put your images into one folder per category, and run the code above.
Let's take a look at how this works, step by step...
## Use Vgg16 for basic image recognition
Let's start off by using the *Vgg16* class to recognise the main imagenet category for each image.
We won't be able to enter the Cats vs Dogs competition with an Imagenet model alone, since 'cat' and 'dog' are not categories in Imagenet - instead each individual breed is a separate category. However, we can use it to see how well it can recognise the images, which is a good first step.
First, create a Vgg16 object:
```
#vgg = Vgg16()
```
Vgg16 is built on top of *Keras* (which we will be learning much more about shortly!), a flexible, easy to use deep learning library that sits on top of Theano or Tensorflow. Keras reads groups of images and labels in *batches*, using a fixed directory structure, where images from each category for training must be placed in a separate folder.
Let's grab batches of data from our training folder:
```
batches = vgg.get_batches(path+'train', batch_size=4)
```
(BTW, when Keras refers to 'classes', it doesn't mean python classes - but rather it refers to the categories of the labels, such as 'pug', or 'tabby'.)
*Batches* is just a regular python iterator. Each iteration returns both the images themselves, as well as the labels.
```
imgs,labels = next(batches)
```
As you can see, the labels for each image are an array, containing a 1 in the first position if it's a cat, and in the second position if it's a dog. This approach to encoding categorical variables, where an array containing just a single 1 in the position corresponding to the category, is very common in deep learning. It is called *one hot encoding*.
The arrays contain two elements, because we have two categories (cat, and dog). If we had three categories (e.g. cats, dogs, and kangaroos), then the arrays would each contain two 0's, and one 1.
```
plots(imgs, titles=labels)
```
We can now pass the images to Vgg16's predict() function to get back probabilities, category indexes, and category names for each image's VGG prediction.
```
vgg.predict(imgs, True)
```
The category indexes are based on the ordering of categories used in the VGG model - e.g here are the first four:
```
vgg.classes
```
(Note that, other than creating the Vgg16 object, none of these steps are necessary to build a model; they are just showing how to use the class to view imagenet predictions.)
## Use our Vgg16 class to finetune a Dogs vs Cats model
To change our model so that it outputs "cat" vs "dog", instead of one of 1,000 very specific categories, we need to use a process called "finetuning". Finetuning looks from the outside to be identical to normal machine learning training - we provide a training set with data and labels to learn from, and a validation set to test against. The model learns a set of parameters based on the data provided.
However, the difference is that we start with a model that is already trained to solve a similar problem. The idea is that many of the parameters should be very similar, or the same, between the existing model, and the model we wish to create. Therefore, we only select a subset of parameters to train, and leave the rest untouched. This happens automatically when we call *fit()* after calling *finetune()*.
We create our batches just like before, and making the validation set available as well. A 'batch' (or *mini-batch* as it is commonly known) is simply a subset of the training data - we use a subset at a time when training or predicting, in order to speed up training, and to avoid running out of memory.
```
batch_size=8
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
```
Calling *finetune()* modifies the model such that it will be trained based on the data in the batches provided - in this case, to predict either 'dog' or 'cat'.
```
vgg.finetune(batches)
```
Finally, we *fit()* the parameters of the model using the training data, reporting the accuracy on the validation set after every epoch. (An *epoch* is one full pass through the training data.)
```
vgg.fit(batches, val_batches, nb_epoch=1)
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
```
That shows all of the steps involved in using the Vgg16 class to create an image recognition model using whatever labels you are interested in. For instance, this process could classify paintings by style, or leaves by type of disease, or satellite photos by type of crop, and so forth.
Next up, we'll dig one level deeper to see what's going on in the Vgg16 class.
# Create a VGG model from scratch in Keras
For the rest of this tutorial, we will not be using the Vgg16 class at all. Instead, we will recreate from scratch the functionality we just used. This is not necessary if all you want to do is use the existing model - but if you want to create your own models, you'll need to understand these details. It will also help you in the future when you debug any problems with your models, since you'll understand what's going on behind the scenes.
## Model setup
We need to import all the modules we'll be using from numpy, scipy, and keras:
```
from numpy.random import random, permutation
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
import keras
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.models import Sequential, Model
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers import Input
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, RMSprop
from keras.preprocessing import image
```
Let's import the mappings from VGG ids to imagenet category ids and descriptions, for display purposes later.
```
FILES_PATH = 'http://files.fast.ai/models/'; CLASS_FILE='imagenet_class_index.json'
# Keras' get_file() is a handy function that downloads files, and caches them for re-use later
fpath = get_file(CLASS_FILE, FILES_PATH+CLASS_FILE, cache_subdir='models')
with open(fpath) as f: class_dict = json.load(f)
# Convert dictionary with string indexes into an array
classes = [class_dict[str(i)][1] for i in range(len(class_dict))]
```
Here's a few examples of the categories we just imported:
```
classes[:5]
```
## Model creation
Creating the model involves creating the model architecture, and then loading the model weights into that architecture. We will start by defining the basic pieces of the VGG architecture.
VGG has just one type of convolutional block, and one type of fully connected ('dense') block. Here's the convolutional block definition:
```
def ConvBlock(layers, model, filters):
for i in range(layers):
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(filters, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
```
...and here's the fully-connected definition.
```
def FCBlock(model):
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
```
When the VGG model was trained in 2014, the creators subtracted the average of each of the three (R,G,B) channels first, so that the data for each channel had a mean of zero. Furthermore, their software that expected the channels to be in B,G,R order, whereas Python by default uses R,G,B. We need to preprocess our data to make these two changes, so that it is compatible with the VGG model:
```
# Mean of each channel as provided by VGG researchers
vgg_mean = np.array([123.68, 116.779, 103.939]).reshape((3,1,1))
def vgg_preprocess(x):
x = x - vgg_mean # subtract mean
return x[:, ::-1] # reverse axis bgr->rgb
```
Now we're ready to define the VGG model architecture - look at how simple it is, now that we have the basic blocks defined!
```
def VGG_16():
model = Sequential()
model.add(Lambda(vgg_preprocess, input_shape=(3,224,224)))
ConvBlock(2, model, 64)
ConvBlock(2, model, 128)
ConvBlock(3, model, 256)
ConvBlock(3, model, 512)
ConvBlock(3, model, 512)
model.add(Flatten())
FCBlock(model)
FCBlock(model)
model.add(Dense(1000, activation='softmax'))
return model
```
We'll learn about what these different blocks do later in the course. For now, it's enough to know that:
- Convolution layers are for finding patterns in images
- Dense (fully connected) layers are for combining patterns across an image
Now that we've defined the architecture, we can create the model like any python object:
```
model = VGG_16()
```
As well as the architecture, we need the weights that the VGG creators trained. The weights are the part of the model that is learnt from the data, whereas the architecture is pre-defined based on the nature of the problem.
Downloading pre-trained weights is much preferred to training the model ourselves, since otherwise we would have to download the entire Imagenet archive, and train the model for many days! It's very helpful when researchers release their weights, as they did here.
```
fpath = get_file('vgg16.h5', FILES_PATH+'vgg16.h5', cache_subdir='models')
model.load_weights(fpath)
```
## Getting imagenet predictions
The setup of the imagenet model is now complete, so all we have to do is grab a batch of images and call *predict()* on them.
```
batch_size = 4
```
Keras provides functionality to create batches of data from directories containing images; all we have to do is to define the size to resize the images to, what type of labels to create, whether to randomly shuffle the images, and how many images to include in each batch. We use this little wrapper to define some helpful defaults appropriate for imagenet data:
```
def get_batches(dirname, gen=image.ImageDataGenerator(), shuffle=True,
batch_size=batch_size, class_mode='categorical'):
return gen.flow_from_directory(path+dirname, target_size=(224,224),
class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
```
From here we can use exactly the same steps as before to look at predictions from the model.
```
batches = get_batches('train', batch_size=batch_size)
val_batches = get_batches('valid', batch_size=batch_size)
imgs,labels = next(batches)
# This shows the 'ground truth'
plots(imgs, titles=labels)
```
The VGG model returns 1,000 probabilities for each image, representing the probability that the model assigns to each possible imagenet category for each image. By finding the index with the largest probability (with *np.argmax()*) we can find the predicted label.
```
def pred_batch(imgs):
preds = model.predict(imgs)
idxs = np.argmax(preds, axis=1)
print('Shape: {}'.format(preds.shape))
print('First 5 classes: {}'.format(classes[:5]))
print('First 5 probabilities: {}\n'.format(preds[0, :5]))
print('Predictions prob/class: ')
for i in range(len(idxs)):
idx = idxs[i]
print (' {:.4f}/{}'.format(preds[i, idx], classes[idx]))
pred_batch(imgs)
```
|
github_jupyter
|
#!pip install 'keras<2'
#!pip install 'h5py'
%matplotlib inline
%cd fastai_courses/deeplearning1/nbs
# path = "data/dogscats/"
path = "data/dogscats/"
from __future__ import division,print_function
import os, json
from glob import glob
import numpy as np
np.set_printoptions(precision=4, linewidth=100)
from matplotlib import pyplot as plt
import utils; reload(utils)
from utils import plots
# As large as you can, but no larger than 64 is recommended.
# If you have an older or cheaper GPU, you'll run out of memory, so will have to decrease this.
batch_size=8
# Import our class, and instantiate
import vgg16; reload(vgg16)
from vgg16 import Vgg16
vgg = Vgg16()
# Grab a few images at a time for training and validation.
# NB: They must be in subdirectories named based on their category
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size*2)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
#vgg = Vgg16()
batches = vgg.get_batches(path+'train', batch_size=4)
imgs,labels = next(batches)
plots(imgs, titles=labels)
vgg.predict(imgs, True)
vgg.classes
batch_size=8
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
from numpy.random import random, permutation
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
import keras
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.models import Sequential, Model
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers import Input
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, RMSprop
from keras.preprocessing import image
FILES_PATH = 'http://files.fast.ai/models/'; CLASS_FILE='imagenet_class_index.json'
# Keras' get_file() is a handy function that downloads files, and caches them for re-use later
fpath = get_file(CLASS_FILE, FILES_PATH+CLASS_FILE, cache_subdir='models')
with open(fpath) as f: class_dict = json.load(f)
# Convert dictionary with string indexes into an array
classes = [class_dict[str(i)][1] for i in range(len(class_dict))]
classes[:5]
def ConvBlock(layers, model, filters):
for i in range(layers):
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(filters, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
def FCBlock(model):
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
# Mean of each channel as provided by VGG researchers
vgg_mean = np.array([123.68, 116.779, 103.939]).reshape((3,1,1))
def vgg_preprocess(x):
x = x - vgg_mean # subtract mean
return x[:, ::-1] # reverse axis bgr->rgb
def VGG_16():
model = Sequential()
model.add(Lambda(vgg_preprocess, input_shape=(3,224,224)))
ConvBlock(2, model, 64)
ConvBlock(2, model, 128)
ConvBlock(3, model, 256)
ConvBlock(3, model, 512)
ConvBlock(3, model, 512)
model.add(Flatten())
FCBlock(model)
FCBlock(model)
model.add(Dense(1000, activation='softmax'))
return model
model = VGG_16()
fpath = get_file('vgg16.h5', FILES_PATH+'vgg16.h5', cache_subdir='models')
model.load_weights(fpath)
batch_size = 4
def get_batches(dirname, gen=image.ImageDataGenerator(), shuffle=True,
batch_size=batch_size, class_mode='categorical'):
return gen.flow_from_directory(path+dirname, target_size=(224,224),
class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
batches = get_batches('train', batch_size=batch_size)
val_batches = get_batches('valid', batch_size=batch_size)
imgs,labels = next(batches)
# This shows the 'ground truth'
plots(imgs, titles=labels)
def pred_batch(imgs):
preds = model.predict(imgs)
idxs = np.argmax(preds, axis=1)
print('Shape: {}'.format(preds.shape))
print('First 5 classes: {}'.format(classes[:5]))
print('First 5 probabilities: {}\n'.format(preds[0, :5]))
print('Predictions prob/class: ')
for i in range(len(idxs)):
idx = idxs[i]
print (' {:.4f}/{}'.format(preds[i, idx], classes[idx]))
pred_batch(imgs)
| 0.667906 | 0.984913 |
# Loading data for a Language Model
```
%install-location $cwd/swift-install
%install-swiftpm-flags -c release
%install '.package(url: "https://github.com/tensorflow/swift-models", .branch("master"))' Batcher ModelSupport Datasets
import TensorFlow
import Batcher
import Foundation
import ModelSupport
import Datasets
```
## Downloading the data
First thing first, we will need to download the data somewhere. We use `DatasetUtilities` for this. You just need to split the url where the archived file is between the filename, extension and rest of the host url, then specify the folder where you want it downloaded. The function `.downloadResource` will then automatically download the archive (if needed) and inflate it (in the process the folder you specified will be created if it didn't exist).
```
let cwdURL = URL(fileURLWithPath: FileManager.default.currentDirectoryPath)
let dataFolder = DatasetUtilities.downloadResource(
filename: "wikitext-2",
fileExtension: "tgz",
remoteRoot: URL(string: "https://s3.amazonaws.com/fast-ai-nlp/")!,
localStorageDirectory: cwdURL.appendingPathComponent("data/", isDirectory: true)
)
var trainTexts = try! String(contentsOf: dataFolder.appendingPathComponent("train.csv"), encoding: .utf8)
public func readCSV(in file: URL) -> [String] {
let rawText = try! String(contentsOf: file, encoding: .utf8)
var rows = rawText.components(separatedBy: "\"\n\"")
//Removing the initial "
rows[0] = String(rows[0].dropFirst())
//Removing the last "\n
rows[rows.indices.last!] = String(rows.last!.dropLast(2))
return rows
}
let trainTexts = readCSV(in: dataFolder.appendingPathComponent("train.csv"))
let validTexts = readCSV(in: dataFolder.appendingPathComponent("test.csv"))
trainTexts[0]
```
## From texts to numbers
A model won't be able to train on raw texts like the one above. We will need to convert it into numbers first. To do this, there are two different steps: transforming a text into a list of *words* (called tokens) and then transforming those words in numbers. Those steps are usually called tokenization and numericalization in NLP.
### Tokenization
Tokenizing a text is converting it into a list of meaningful tokens. There are several way to do this:
- character-level tokenization just splits the texts in an array of characters
- word-level tokenization splits the texts by words or punctuation symbols
- subword-level tokenization splits the texts by subwords (particularly useful in languages like Turkish or German where you can build longer words by adding prefixes or suffixes)
While character-level tokenization is pretty straightforward, the two other kinds are a bit trickier. How do you split a word like "don't" for instance, which is actually "do not"? In our case, we don't ahve to worry about that since wikitext-103 has been pre-tokenized, so we can just split on space. (Alternatively, we could train a BPE tokenizer on those texts.)
```
func easyTokenize(_ text: String) -> [String] {
return text.components(separatedBy: " ")
}
let trainTokenizedTexts = trainTexts.map(easyTokenize)
let validTokenizedTexts = validTexts.map(easyTokenize)
```
### Numericalization
One our texts are splits into tokens, we can make a mapping token to unique index and convert them into numbers. We usually try to limit the size of the vocabulary by keeping only the most common tokens, or removing the tokens that are only present more than a given number of times. All tokens that are not part of the vocabulary will be changed to `<unk>` (for unkown).
So first, let's count how many times each token is used in our texts. We also save the length of each text since we will need that later on.
```
func countTokens(_ texts: [[String]]) -> ([Int], [String:Int]) {
var counts: [String:Int] = [:]
var lengths: [Int] = []
for tokens in texts {
lengths.append(tokens.count)
for token in tokens {
counts[token] = (counts[token] ?? 0) + 1
}
}
return (lengths,counts)
}
```
We only use the training set to build our vocabulary.
```
let (trainLengths, trainCounts) = countTokens(trainTokenizedTexts)
```
Then the following function will create a vocabulary containing all the most frequent words up to `maxCount`, and with a minimum frequency of `minFrequency` (NB: a language model can barely learn anything about words rarely present in the dataset). We return a tuple with the two mappings int to string and string to int (often called itos and stoi in NLP).
```
func makeVocabulary(
_ counts: [String:Int],
minFrequency: Int = 2,
maxCount: Int = 60000)
-> (itos: [Int:String], stoi: [String:Int]) {
let withoutSpec = counts.filter { $0.0 != "xxunk" && $0.0 != "xxpad" }
let sorted = withoutSpec.sorted { $0.1 > $1.1 }
var itos: [Int:String] = [0:"xxunk", 1:"xxpad"]
var stoi: [String:Int] = ["xxunk":0, "xxpad":1]
for (i,x) in sorted.enumerated() {
if i+2 >= maxCount || x.1 < minFrequency { break }
itos[i+2] = (x.0)
stoi[x.0] = i+2
}
return (itos: itos, stoi: stoi)
}
```
Let's use our previous counts to build a vocabulary:
```
let vocabulary = makeVocabulary(trainCounts)
```
And then we can use it to numericalize our tokenized texts, let's just check what is the index of the unknown token to use it for words that are our of vocabulary.
```
vocabulary.stoi["<unk>"]
func numericalize(_ tokens: [String], with stoi: [String:Int]) -> [Int] {
return tokens.map { stoi[$0] ?? 6 }
}
```
And we can apply it to all our tokenized texts:
```
let trainNumericalizedTexts = trainTokenizedTexts.map{ numericalize($0, with: vocabulary.stoi) }
let validNumericalizedTexts = validTokenizedTexts.map{ numericalize($0, with: vocabulary.stoi) }
```
## Understanding the language model dataset
A language model task is to guess the next word in a stream of texts. When having a list of tokenized and numericalized texts, we usually concatenate them all together in one big stream, separate it in the desired numbers of batches (which are `batchSize` chunks of continuous texts) then read through those `sequenceLength` at a time.
Let's look at an example:
```
let items = [[0,1,2,3,4],[5,6,7,8,9,10],[11,12,13,14,15,16,17,18],[19,20],[21,22]]
let dataset = LanguageModelDataset(batchSize: 4, sequenceLength: 3, numericalizedTexts: items)
```
Here our stream is the sequence of integers from 0 to 22. With a batchsize of 4, we split it in four chunks which are:
```
0,1,2,3,4
5,6,7,8,9
10,11,12,13,14
15,16,17,18,19
```
The last three bits of the stream are thrown away because we don't have a round multiple of 4.
Then if read with a sequenceLength of 3, the first batch has for input
```
0,1,2
5,6,7
10,11,12
15,16,17
```
and for target the next words:
```
1,2,3
6,7,8
11,12,13
16,17,18
```
Let's put our dataset in a batcher to check it does all of this for us:
```
let batcher = Batcher(on: dataset, batchSize: 4)
for x in batcher.sequenced() { print(x) }
```
The first batch is as expected, and the second one has only a sequence length of 2 because our big chunks of text have a length of 5 here.
Behind the scenes, `LanguageModelDataset` implements a new collection which has the proper length and subscrit, to return the pair input/target of text (and not the raw texts of varying lengths).
With the shuffle enabled, the texts are shuffled before being concatenated to form the stream. We just need to use `languageModelSample` as a `sampleIndices` function.
```
let batcher = Batcher(on: dataset, batchSize: 4, shuffle: true, sampleIndices: languageModelSample)
for x in batcher.sequenced() { print(x) }
```
## Applying it to our texts
We can create a `LanguageModelDataset` from all our text. Since it will need all the lengths of every sample to work, we can provide the array of lengths of each text to speed up the init (if we don't, it will make a pass over the dataset to compute them).
```
let trainSet = LanguageModelDataset(
batchSize: 64,
sequenceLength: 72,
numericalizedTexts: trainNumericalizedTexts,
lengths: trainLengths
)
let validSet = LanguageModelDataset(
batchSize: 64,
sequenceLength: 72,
numericalizedTexts: validNumericalizedTexts
)
```
And we can batch our samples:
```
let trainBatcher = Batcher(on: trainSet, batchSize: 64, numWorkers: 4,
shuffle: true, sampleIndices: languageModelSample)
let validBatcher = Batcher(on: validSet, batchSize: 64, numWorkers: 4,
sampleIndices: languageModelSample)
let b = trainBatcher.sequenced().first {_ in true}!
```
To iterate through our batches, we just use `.sequenced()` again. Here let's check we do read through the texts in order by storing the first five batches.
```
var samples: [TensorPair<Int32,Int32>] = []
for b in trainBatcher.sequenced() {
//Put the training loop here
if samples.count < 5 { samples.append(b) }
}
```
To show one of the lines of our tensor, we will use this function:
```
func showText(_ x: Tensor<Int32>) -> String {
var tokens = x.scalars.map { vocabulary.itos[Int($0)]! }
return tokens.joined(separator: " ")
}
```
Now let's look at the first row of our first batch:
```
showText(samples[0].first[0])
```
The targets are just shifted one word to the right:
```
showText(samples[0].second[0])
```
In the second sample, we pick up exactly where the first batch stopped:
```
showText(samples[0].first[0])
```
And this works on any rows:
```
showText(samples[0].first[1])
showText(samples[1].first[1])
```
|
github_jupyter
|
%install-location $cwd/swift-install
%install-swiftpm-flags -c release
%install '.package(url: "https://github.com/tensorflow/swift-models", .branch("master"))' Batcher ModelSupport Datasets
import TensorFlow
import Batcher
import Foundation
import ModelSupport
import Datasets
let cwdURL = URL(fileURLWithPath: FileManager.default.currentDirectoryPath)
let dataFolder = DatasetUtilities.downloadResource(
filename: "wikitext-2",
fileExtension: "tgz",
remoteRoot: URL(string: "https://s3.amazonaws.com/fast-ai-nlp/")!,
localStorageDirectory: cwdURL.appendingPathComponent("data/", isDirectory: true)
)
var trainTexts = try! String(contentsOf: dataFolder.appendingPathComponent("train.csv"), encoding: .utf8)
public func readCSV(in file: URL) -> [String] {
let rawText = try! String(contentsOf: file, encoding: .utf8)
var rows = rawText.components(separatedBy: "\"\n\"")
//Removing the initial "
rows[0] = String(rows[0].dropFirst())
//Removing the last "\n
rows[rows.indices.last!] = String(rows.last!.dropLast(2))
return rows
}
let trainTexts = readCSV(in: dataFolder.appendingPathComponent("train.csv"))
let validTexts = readCSV(in: dataFolder.appendingPathComponent("test.csv"))
trainTexts[0]
func easyTokenize(_ text: String) -> [String] {
return text.components(separatedBy: " ")
}
let trainTokenizedTexts = trainTexts.map(easyTokenize)
let validTokenizedTexts = validTexts.map(easyTokenize)
func countTokens(_ texts: [[String]]) -> ([Int], [String:Int]) {
var counts: [String:Int] = [:]
var lengths: [Int] = []
for tokens in texts {
lengths.append(tokens.count)
for token in tokens {
counts[token] = (counts[token] ?? 0) + 1
}
}
return (lengths,counts)
}
let (trainLengths, trainCounts) = countTokens(trainTokenizedTexts)
func makeVocabulary(
_ counts: [String:Int],
minFrequency: Int = 2,
maxCount: Int = 60000)
-> (itos: [Int:String], stoi: [String:Int]) {
let withoutSpec = counts.filter { $0.0 != "xxunk" && $0.0 != "xxpad" }
let sorted = withoutSpec.sorted { $0.1 > $1.1 }
var itos: [Int:String] = [0:"xxunk", 1:"xxpad"]
var stoi: [String:Int] = ["xxunk":0, "xxpad":1]
for (i,x) in sorted.enumerated() {
if i+2 >= maxCount || x.1 < minFrequency { break }
itos[i+2] = (x.0)
stoi[x.0] = i+2
}
return (itos: itos, stoi: stoi)
}
let vocabulary = makeVocabulary(trainCounts)
vocabulary.stoi["<unk>"]
func numericalize(_ tokens: [String], with stoi: [String:Int]) -> [Int] {
return tokens.map { stoi[$0] ?? 6 }
}
let trainNumericalizedTexts = trainTokenizedTexts.map{ numericalize($0, with: vocabulary.stoi) }
let validNumericalizedTexts = validTokenizedTexts.map{ numericalize($0, with: vocabulary.stoi) }
let items = [[0,1,2,3,4],[5,6,7,8,9,10],[11,12,13,14,15,16,17,18],[19,20],[21,22]]
let dataset = LanguageModelDataset(batchSize: 4, sequenceLength: 3, numericalizedTexts: items)
0,1,2,3,4
5,6,7,8,9
10,11,12,13,14
15,16,17,18,19
0,1,2
5,6,7
10,11,12
15,16,17
1,2,3
6,7,8
11,12,13
16,17,18
let batcher = Batcher(on: dataset, batchSize: 4)
for x in batcher.sequenced() { print(x) }
let batcher = Batcher(on: dataset, batchSize: 4, shuffle: true, sampleIndices: languageModelSample)
for x in batcher.sequenced() { print(x) }
let trainSet = LanguageModelDataset(
batchSize: 64,
sequenceLength: 72,
numericalizedTexts: trainNumericalizedTexts,
lengths: trainLengths
)
let validSet = LanguageModelDataset(
batchSize: 64,
sequenceLength: 72,
numericalizedTexts: validNumericalizedTexts
)
let trainBatcher = Batcher(on: trainSet, batchSize: 64, numWorkers: 4,
shuffle: true, sampleIndices: languageModelSample)
let validBatcher = Batcher(on: validSet, batchSize: 64, numWorkers: 4,
sampleIndices: languageModelSample)
let b = trainBatcher.sequenced().first {_ in true}!
var samples: [TensorPair<Int32,Int32>] = []
for b in trainBatcher.sequenced() {
//Put the training loop here
if samples.count < 5 { samples.append(b) }
}
func showText(_ x: Tensor<Int32>) -> String {
var tokens = x.scalars.map { vocabulary.itos[Int($0)]! }
return tokens.joined(separator: " ")
}
showText(samples[0].first[0])
showText(samples[0].second[0])
showText(samples[0].first[0])
showText(samples[0].first[1])
showText(samples[1].first[1])
| 0.694095 | 0.85567 |
```
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
import requests
from bs4 import BeautifulSoup
import pandas as pd
import sys
sys.path.append('..')
from nethack_raph.MonsterGlossary import MONSTERS_GLOSSARY
```
## ARMOR
```
URL = 'https://nethackwiki.com/wiki/Armor'
response = requests.get(URL)
soup = BeautifulSoup(response.text,'html.parser')
tables = soup.find_all('table',{'class':'prettytable'})
tables
table = tables[1]
rows = table.find_all('tr')
# columns = [v.text.replace('\n','') for v in rows[0].find_all('th')]
columns = ['name',
'cost',
'weight',
'AC',
'Weight perAC (+0)',
'max_AC',
'Weight permax AC',
'material',
'effect',
'MC',
'probability',
'magical',
'appearance',
'armor_type']
df = pd.DataFrame(columns=columns)
df
for i in range(1,len(rows)):
tds = rows[i].find_all('td')
values = [td.text.replace('\n',''.replace('\xa0','')) for td in tds]
if len(values) == 1:
armor_type = values[0].lower()
else:
values += [armor_type]
df = df.append(pd.Series(values, index=columns), ignore_index=True)
df['magical'] = df['magical'] == 'Yes'
df['appearance'] = [x.replace('*','').replace('--','') for x in df['appearance']]
df = df[df['name'] != 'cornuthaum']
df.loc[df['appearance'] != '', 'name'] = df['appearance']
df.drop(columns = ['Weight perAC (+0)','Weight permax AC', 'appearance'], inplace=True)
df[['AC', 'max_AC', 'cost', 'weight']] = df[['AC', 'max_AC', 'cost', 'weight']].astype(int)
df
data = df.set_index('name').to_dict('index')
data
armor_glossary = """
Glyph 1977 Type: GLYPH_OBJ : ARMOR_CLASS: "leather hat"
Glyph 1978 Type: GLYPH_OBJ : ARMOR_CLASS: "iron skull cap"
Glyph 1979 Type: GLYPH_OBJ : ARMOR_CLASS: "hard hat"
Glyph 1980 Type: GLYPH_OBJ : ARMOR_CLASS: "fedora"
Glyph 1981 Type: GLYPH_OBJ : ARMOR_CLASS: "conical hat"
Glyph 1982 Type: GLYPH_OBJ : ARMOR_CLASS: "conical hat"
Glyph 1983 Type: GLYPH_OBJ : ARMOR_CLASS: "dented pot"
Glyph 1984 Type: GLYPH_OBJ : ARMOR_CLASS: "plumed helmet"
Glyph 1985 Type: GLYPH_OBJ : ARMOR_CLASS: "etched helmet"
Glyph 1986 Type: GLYPH_OBJ : ARMOR_CLASS: "crested helmet"
Glyph 1987 Type: GLYPH_OBJ : ARMOR_CLASS: "visored helmet"
Glyph 1988 Type: GLYPH_OBJ : ARMOR_CLASS: "gray dragon scale mail"
Glyph 1989 Type: GLYPH_OBJ : ARMOR_CLASS: "silver dragon scale mail"
Glyph 1990 Type: GLYPH_OBJ : ARMOR_CLASS: "red dragon scale mail"
Glyph 1991 Type: GLYPH_OBJ : ARMOR_CLASS: "white dragon scale mail"
Glyph 1992 Type: GLYPH_OBJ : ARMOR_CLASS: "orange dragon scale mail"
Glyph 1993 Type: GLYPH_OBJ : ARMOR_CLASS: "black dragon scale mail"
Glyph 1994 Type: GLYPH_OBJ : ARMOR_CLASS: "blue dragon scale mail"
Glyph 1995 Type: GLYPH_OBJ : ARMOR_CLASS: "green dragon scale mail"
Glyph 1996 Type: GLYPH_OBJ : ARMOR_CLASS: "yellow dragon scale mail"
Glyph 1997 Type: GLYPH_OBJ : ARMOR_CLASS: "gray dragon scales"
Glyph 1998 Type: GLYPH_OBJ : ARMOR_CLASS: "silver dragon scales"
Glyph 1999 Type: GLYPH_OBJ : ARMOR_CLASS: "red dragon scales"
Glyph 2000 Type: GLYPH_OBJ : ARMOR_CLASS: "white dragon scales"
Glyph 2001 Type: GLYPH_OBJ : ARMOR_CLASS: "orange dragon scales"
Glyph 2002 Type: GLYPH_OBJ : ARMOR_CLASS: "black dragon scales"
Glyph 2003 Type: GLYPH_OBJ : ARMOR_CLASS: "blue dragon scales"
Glyph 2004 Type: GLYPH_OBJ : ARMOR_CLASS: "green dragon scales"
Glyph 2005 Type: GLYPH_OBJ : ARMOR_CLASS: "yellow dragon scales"
Glyph 2006 Type: GLYPH_OBJ : ARMOR_CLASS: "plate mail"
Glyph 2007 Type: GLYPH_OBJ : ARMOR_CLASS: "crystal plate mail"
Glyph 2008 Type: GLYPH_OBJ : ARMOR_CLASS: "bronze plate mail"
Glyph 2009 Type: GLYPH_OBJ : ARMOR_CLASS: "splint mail"
Glyph 2010 Type: GLYPH_OBJ : ARMOR_CLASS: "banded mail"
Glyph 2011 Type: GLYPH_OBJ : ARMOR_CLASS: "dwarvish mithril-coat"
Glyph 2012 Type: GLYPH_OBJ : ARMOR_CLASS: "elven mithril-coat"
Glyph 2013 Type: GLYPH_OBJ : ARMOR_CLASS: "chain mail"
Glyph 2014 Type: GLYPH_OBJ : ARMOR_CLASS: "crude chain mail"
Glyph 2015 Type: GLYPH_OBJ : ARMOR_CLASS: "scale mail"
Glyph 2016 Type: GLYPH_OBJ : ARMOR_CLASS: "studded leather armor"
Glyph 2017 Type: GLYPH_OBJ : ARMOR_CLASS: "ring mail"
Glyph 2018 Type: GLYPH_OBJ : ARMOR_CLASS: "crude ring mail"
Glyph 2019 Type: GLYPH_OBJ : ARMOR_CLASS: "leather armor"
Glyph 2020 Type: GLYPH_OBJ : ARMOR_CLASS: "leather jacket"
Glyph 2021 Type: GLYPH_OBJ : ARMOR_CLASS: "Hawaiian shirt"
Glyph 2022 Type: GLYPH_OBJ : ARMOR_CLASS: "T-shirt"
Glyph 2023 Type: GLYPH_OBJ : ARMOR_CLASS: "mummy wrapping"
Glyph 2024 Type: GLYPH_OBJ : ARMOR_CLASS: "faded pall"
Glyph 2025 Type: GLYPH_OBJ : ARMOR_CLASS: "coarse mantelet"
Glyph 2026 Type: GLYPH_OBJ : ARMOR_CLASS: "hooded cloak"
Glyph 2027 Type: GLYPH_OBJ : ARMOR_CLASS: "slippery cloak"
Glyph 2028 Type: GLYPH_OBJ : ARMOR_CLASS: "robe"
Glyph 2029 Type: GLYPH_OBJ : ARMOR_CLASS: "apron"
Glyph 2030 Type: GLYPH_OBJ : ARMOR_CLASS: "leather cloak"
Glyph 2031 Type: GLYPH_OBJ : ARMOR_CLASS: "tattered cape"
Glyph 2032 Type: GLYPH_OBJ : ARMOR_CLASS: "opera cloak"
Glyph 2033 Type: GLYPH_OBJ : ARMOR_CLASS: "ornamental cope"
Glyph 2034 Type: GLYPH_OBJ : ARMOR_CLASS: "piece of cloth"
Glyph 2035 Type: GLYPH_OBJ : ARMOR_CLASS: "small shield"
Glyph 2036 Type: GLYPH_OBJ : ARMOR_CLASS: "blue and green shield"
Glyph 2037 Type: GLYPH_OBJ : ARMOR_CLASS: "white-handed shield"
Glyph 2038 Type: GLYPH_OBJ : ARMOR_CLASS: "red-eyed shield"
Glyph 2039 Type: GLYPH_OBJ : ARMOR_CLASS: "large shield"
Glyph 2040 Type: GLYPH_OBJ : ARMOR_CLASS: "large round shield"
Glyph 2041 Type: GLYPH_OBJ : ARMOR_CLASS: "polished silver shield"
Glyph 2042 Type: GLYPH_OBJ : ARMOR_CLASS: "old gloves"
Glyph 2043 Type: GLYPH_OBJ : ARMOR_CLASS: "padded gloves"
Glyph 2044 Type: GLYPH_OBJ : ARMOR_CLASS: "riding gloves"
Glyph 2045 Type: GLYPH_OBJ : ARMOR_CLASS: "fencing gloves"
Glyph 2046 Type: GLYPH_OBJ : ARMOR_CLASS: "walking shoes"
Glyph 2047 Type: GLYPH_OBJ : ARMOR_CLASS: "hard shoes"
Glyph 2048 Type: GLYPH_OBJ : ARMOR_CLASS: "jackboots"
Glyph 2049 Type: GLYPH_OBJ : ARMOR_CLASS: "combat boots"
Glyph 2050 Type: GLYPH_OBJ : ARMOR_CLASS: "jungle boots"
Glyph 2051 Type: GLYPH_OBJ : ARMOR_CLASS: "hiking boots"
Glyph 2052 Type: GLYPH_OBJ : ARMOR_CLASS: "mud boots"
Glyph 2053 Type: GLYPH_OBJ : ARMOR_CLASS: "buckled boots"
Glyph 2054 Type: GLYPH_OBJ : ARMOR_CLASS: "riding boots"
Glyph 2055 Type: GLYPH_OBJ : ARMOR_CLASS: "snow boots"
"""
armor_glossary = armor_glossary.split('\n')[1:-1]
armor_glossary = [x.split('"') for x in armor_glossary]
armor_glossary = [(int(x[0].split()[1]), x[1]) for x in armor_glossary]
armor_glossary = {x:{'name': y} for x, y in armor_glossary}
armor_glossary
mapping = {v['name']: v['name'] for v in armor_glossary.values() if v['name'] in data}
mapping = {
**mapping,
**{x: ' '.join(x.split(' ')[1:]) for x in
['gray dragon scale mail',
'silver dragon scale mail',
'red dragon scale mail',
'white dragon scale mail',
'orange dragon scale mail',
'black dragon scale mail',
'blue dragon scale mail',
'green dragon scale mail',
'yellow dragon scale mail',
'gray dragon scales',
'silver dragon scales',
'red dragon scales',
'white dragon scales',
'orange dragon scales',
'black dragon scales',
'blue dragon scales',
'green dragon scales',
'yellow dragon scales',]
},
**{x: ' '.join(x.split(' ')[:-1]) for x in
['blue and green shield',
'white-handed shield',
'red-eyed shield',
'large round shield',
'polished silver shield',]
},
**{'plate mail': 'plate mail (tanko)'}
}
armor_glossary = {
k: {**v, **data[mapping[v['name']]]} for k, v in armor_glossary.items()
}
armor_glossary
```
## WEAPON
```
URL = 'https://nethackwiki.com/wiki/Weapon'
response = requests.get(URL)
soup = BeautifulSoup(response.text,'html.parser')
tables = soup.find_all('table',{'class':'prettytable'})
tables[1]
table = tables[1]
rows = table.find_all('tr')
# columns = [v.text.replace('\n','') for v in rows[0].find_all('th')]
columns = ['name',
'skill',
'cost',
'weight',
'probability',
'damage_S',
'damage_L',
'material',
'appearance'
]
df = pd.DataFrame(columns=columns)
for i in range(1,len(rows)):
tds = rows[i].find_all('td')
values = [td.text.replace('\n','') for td in tds][:-2]
df = df.append(pd.Series(values, index=columns), ignore_index=True)
df = df[df['name'] != 'runesword']
df.loc[df['appearance'] != '', 'name'] = df['appearance']
df['weight'] = df['weight'].astype(int)
df['cost'] = df['cost'].apply(lambda x: int(x.split()[0]))
df.drop(columns = ['appearance'], inplace=True)
df
data = df.set_index('name').to_dict('index')
data
weapon_glossary = """
Glyph 1907 Type: GLYPH_OBJ : WEAPON_CLASS: "arrow"
Glyph 1908 Type: GLYPH_OBJ : WEAPON_CLASS: "runed arrow"
Glyph 1909 Type: GLYPH_OBJ : WEAPON_CLASS: "crude arrow"
Glyph 1910 Type: GLYPH_OBJ : WEAPON_CLASS: "silver arrow"
Glyph 1911 Type: GLYPH_OBJ : WEAPON_CLASS: "bamboo arrow"
Glyph 1912 Type: GLYPH_OBJ : WEAPON_CLASS: "crossbow bolt"
Glyph 1913 Type: GLYPH_OBJ : WEAPON_CLASS: "dart"
Glyph 1914 Type: GLYPH_OBJ : WEAPON_CLASS: "throwing star"
Glyph 1915 Type: GLYPH_OBJ : WEAPON_CLASS: "boomerang"
Glyph 1916 Type: GLYPH_OBJ : WEAPON_CLASS: "spear"
Glyph 1917 Type: GLYPH_OBJ : WEAPON_CLASS: "runed spear"
Glyph 1918 Type: GLYPH_OBJ : WEAPON_CLASS: "crude spear"
Glyph 1919 Type: GLYPH_OBJ : WEAPON_CLASS: "stout spear"
Glyph 1920 Type: GLYPH_OBJ : WEAPON_CLASS: "silver spear"
Glyph 1921 Type: GLYPH_OBJ : WEAPON_CLASS: "throwing spear"
Glyph 1922 Type: GLYPH_OBJ : WEAPON_CLASS: "trident"
Glyph 1923 Type: GLYPH_OBJ : WEAPON_CLASS: "dagger"
Glyph 1924 Type: GLYPH_OBJ : WEAPON_CLASS: "runed dagger"
Glyph 1925 Type: GLYPH_OBJ : WEAPON_CLASS: "crude dagger"
Glyph 1926 Type: GLYPH_OBJ : WEAPON_CLASS: "silver dagger"
Glyph 1927 Type: GLYPH_OBJ : WEAPON_CLASS: "athame"
Glyph 1928 Type: GLYPH_OBJ : WEAPON_CLASS: "scalpel"
Glyph 1929 Type: GLYPH_OBJ : WEAPON_CLASS: "knife"
Glyph 1930 Type: GLYPH_OBJ : WEAPON_CLASS: "stiletto"
Glyph 1931 Type: GLYPH_OBJ : WEAPON_CLASS: "worm tooth"
Glyph 1932 Type: GLYPH_OBJ : WEAPON_CLASS: "crysknife"
Glyph 1933 Type: GLYPH_OBJ : WEAPON_CLASS: "axe"
Glyph 1934 Type: GLYPH_OBJ : WEAPON_CLASS: "double-headed axe"
Glyph 1935 Type: GLYPH_OBJ : WEAPON_CLASS: "short sword"
Glyph 1936 Type: GLYPH_OBJ : WEAPON_CLASS: "runed short sword"
Glyph 1937 Type: GLYPH_OBJ : WEAPON_CLASS: "crude short sword"
Glyph 1938 Type: GLYPH_OBJ : WEAPON_CLASS: "broad short sword"
Glyph 1939 Type: GLYPH_OBJ : WEAPON_CLASS: "curved sword"
Glyph 1940 Type: GLYPH_OBJ : WEAPON_CLASS: "silver saber"
Glyph 1941 Type: GLYPH_OBJ : WEAPON_CLASS: "broadsword"
Glyph 1942 Type: GLYPH_OBJ : WEAPON_CLASS: "runed broadsword"
Glyph 1943 Type: GLYPH_OBJ : WEAPON_CLASS: "long sword"
Glyph 1944 Type: GLYPH_OBJ : WEAPON_CLASS: "two-handed sword"
Glyph 1945 Type: GLYPH_OBJ : WEAPON_CLASS: "samurai sword"
Glyph 1946 Type: GLYPH_OBJ : WEAPON_CLASS: "long samurai sword"
Glyph 1947 Type: GLYPH_OBJ : WEAPON_CLASS: "runed broadsword"
Glyph 1948 Type: GLYPH_OBJ : WEAPON_CLASS: "vulgar polearm"
Glyph 1949 Type: GLYPH_OBJ : WEAPON_CLASS: "hilted polearm"
Glyph 1950 Type: GLYPH_OBJ : WEAPON_CLASS: "forked polearm"
Glyph 1951 Type: GLYPH_OBJ : WEAPON_CLASS: "single-edged polearm"
Glyph 1952 Type: GLYPH_OBJ : WEAPON_CLASS: "lance"
Glyph 1953 Type: GLYPH_OBJ : WEAPON_CLASS: "angled poleaxe"
Glyph 1954 Type: GLYPH_OBJ : WEAPON_CLASS: "long poleaxe"
Glyph 1955 Type: GLYPH_OBJ : WEAPON_CLASS: "pole cleaver"
Glyph 1956 Type: GLYPH_OBJ : WEAPON_CLASS: "broad pick"
Glyph 1957 Type: GLYPH_OBJ : WEAPON_CLASS: "pole sickle"
Glyph 1958 Type: GLYPH_OBJ : WEAPON_CLASS: "pruning hook"
Glyph 1959 Type: GLYPH_OBJ : WEAPON_CLASS: "hooked polearm"
Glyph 1960 Type: GLYPH_OBJ : WEAPON_CLASS: "pronged polearm"
Glyph 1961 Type: GLYPH_OBJ : WEAPON_CLASS: "beaked polearm"
Glyph 1962 Type: GLYPH_OBJ : WEAPON_CLASS: "mace"
Glyph 1963 Type: GLYPH_OBJ : WEAPON_CLASS: "morning star"
Glyph 1964 Type: GLYPH_OBJ : WEAPON_CLASS: "war hammer"
Glyph 1965 Type: GLYPH_OBJ : WEAPON_CLASS: "club"
Glyph 1966 Type: GLYPH_OBJ : WEAPON_CLASS: "rubber hose"
Glyph 1967 Type: GLYPH_OBJ : WEAPON_CLASS: "staff"
Glyph 1968 Type: GLYPH_OBJ : WEAPON_CLASS: "thonged club"
Glyph 1969 Type: GLYPH_OBJ : WEAPON_CLASS: "flail"
Glyph 1970 Type: GLYPH_OBJ : WEAPON_CLASS: "bullwhip"
Glyph 1971 Type: GLYPH_OBJ : WEAPON_CLASS: "bow"
Glyph 1972 Type: GLYPH_OBJ : WEAPON_CLASS: "runed bow"
Glyph 1973 Type: GLYPH_OBJ : WEAPON_CLASS: "crude bow"
Glyph 1974 Type: GLYPH_OBJ : WEAPON_CLASS: "long bow"
Glyph 1975 Type: GLYPH_OBJ : WEAPON_CLASS: "sling"
Glyph 1976 Type: GLYPH_OBJ : WEAPON_CLASS: "crossbow"
"""
weapon_glossary = weapon_glossary.split('\n')[1:-1]
weapon_glossary = [x.split('"') for x in weapon_glossary]
weapon_glossary = [(int(x[0].split()[1]), x[1]) for x in weapon_glossary]
weapon_glossary = {x:{'name': y} for x, y in weapon_glossary}
weapon_glossary
mapping = {v['name']: v['name'] for v in weapon_glossary.values() if v['name'] in data}
mapping = {
**mapping,
**{
'knife': 'knife (shito)',
'lance': 'lance (1)',
'short sword': 'short sword (wakizashi)',
'broadsword': 'broadsword (ninja-to)',
'flail': 'flail (nunchaku)',
}
}
[v['name'] for v in weapon_glossary.values() if v['name'] not in mapping]
[k for k in data.keys() if k not in mapping.values()]
weapon_glossary = {
k: {**v, **data[mapping[v['name']]]} for k, v in weapon_glossary.items()
}
weapon_glossary
```
|
github_jupyter
|
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
import requests
from bs4 import BeautifulSoup
import pandas as pd
import sys
sys.path.append('..')
from nethack_raph.MonsterGlossary import MONSTERS_GLOSSARY
URL = 'https://nethackwiki.com/wiki/Armor'
response = requests.get(URL)
soup = BeautifulSoup(response.text,'html.parser')
tables = soup.find_all('table',{'class':'prettytable'})
tables
table = tables[1]
rows = table.find_all('tr')
# columns = [v.text.replace('\n','') for v in rows[0].find_all('th')]
columns = ['name',
'cost',
'weight',
'AC',
'Weight perAC (+0)',
'max_AC',
'Weight permax AC',
'material',
'effect',
'MC',
'probability',
'magical',
'appearance',
'armor_type']
df = pd.DataFrame(columns=columns)
df
for i in range(1,len(rows)):
tds = rows[i].find_all('td')
values = [td.text.replace('\n',''.replace('\xa0','')) for td in tds]
if len(values) == 1:
armor_type = values[0].lower()
else:
values += [armor_type]
df = df.append(pd.Series(values, index=columns), ignore_index=True)
df['magical'] = df['magical'] == 'Yes'
df['appearance'] = [x.replace('*','').replace('--','') for x in df['appearance']]
df = df[df['name'] != 'cornuthaum']
df.loc[df['appearance'] != '', 'name'] = df['appearance']
df.drop(columns = ['Weight perAC (+0)','Weight permax AC', 'appearance'], inplace=True)
df[['AC', 'max_AC', 'cost', 'weight']] = df[['AC', 'max_AC', 'cost', 'weight']].astype(int)
df
data = df.set_index('name').to_dict('index')
data
armor_glossary = """
Glyph 1977 Type: GLYPH_OBJ : ARMOR_CLASS: "leather hat"
Glyph 1978 Type: GLYPH_OBJ : ARMOR_CLASS: "iron skull cap"
Glyph 1979 Type: GLYPH_OBJ : ARMOR_CLASS: "hard hat"
Glyph 1980 Type: GLYPH_OBJ : ARMOR_CLASS: "fedora"
Glyph 1981 Type: GLYPH_OBJ : ARMOR_CLASS: "conical hat"
Glyph 1982 Type: GLYPH_OBJ : ARMOR_CLASS: "conical hat"
Glyph 1983 Type: GLYPH_OBJ : ARMOR_CLASS: "dented pot"
Glyph 1984 Type: GLYPH_OBJ : ARMOR_CLASS: "plumed helmet"
Glyph 1985 Type: GLYPH_OBJ : ARMOR_CLASS: "etched helmet"
Glyph 1986 Type: GLYPH_OBJ : ARMOR_CLASS: "crested helmet"
Glyph 1987 Type: GLYPH_OBJ : ARMOR_CLASS: "visored helmet"
Glyph 1988 Type: GLYPH_OBJ : ARMOR_CLASS: "gray dragon scale mail"
Glyph 1989 Type: GLYPH_OBJ : ARMOR_CLASS: "silver dragon scale mail"
Glyph 1990 Type: GLYPH_OBJ : ARMOR_CLASS: "red dragon scale mail"
Glyph 1991 Type: GLYPH_OBJ : ARMOR_CLASS: "white dragon scale mail"
Glyph 1992 Type: GLYPH_OBJ : ARMOR_CLASS: "orange dragon scale mail"
Glyph 1993 Type: GLYPH_OBJ : ARMOR_CLASS: "black dragon scale mail"
Glyph 1994 Type: GLYPH_OBJ : ARMOR_CLASS: "blue dragon scale mail"
Glyph 1995 Type: GLYPH_OBJ : ARMOR_CLASS: "green dragon scale mail"
Glyph 1996 Type: GLYPH_OBJ : ARMOR_CLASS: "yellow dragon scale mail"
Glyph 1997 Type: GLYPH_OBJ : ARMOR_CLASS: "gray dragon scales"
Glyph 1998 Type: GLYPH_OBJ : ARMOR_CLASS: "silver dragon scales"
Glyph 1999 Type: GLYPH_OBJ : ARMOR_CLASS: "red dragon scales"
Glyph 2000 Type: GLYPH_OBJ : ARMOR_CLASS: "white dragon scales"
Glyph 2001 Type: GLYPH_OBJ : ARMOR_CLASS: "orange dragon scales"
Glyph 2002 Type: GLYPH_OBJ : ARMOR_CLASS: "black dragon scales"
Glyph 2003 Type: GLYPH_OBJ : ARMOR_CLASS: "blue dragon scales"
Glyph 2004 Type: GLYPH_OBJ : ARMOR_CLASS: "green dragon scales"
Glyph 2005 Type: GLYPH_OBJ : ARMOR_CLASS: "yellow dragon scales"
Glyph 2006 Type: GLYPH_OBJ : ARMOR_CLASS: "plate mail"
Glyph 2007 Type: GLYPH_OBJ : ARMOR_CLASS: "crystal plate mail"
Glyph 2008 Type: GLYPH_OBJ : ARMOR_CLASS: "bronze plate mail"
Glyph 2009 Type: GLYPH_OBJ : ARMOR_CLASS: "splint mail"
Glyph 2010 Type: GLYPH_OBJ : ARMOR_CLASS: "banded mail"
Glyph 2011 Type: GLYPH_OBJ : ARMOR_CLASS: "dwarvish mithril-coat"
Glyph 2012 Type: GLYPH_OBJ : ARMOR_CLASS: "elven mithril-coat"
Glyph 2013 Type: GLYPH_OBJ : ARMOR_CLASS: "chain mail"
Glyph 2014 Type: GLYPH_OBJ : ARMOR_CLASS: "crude chain mail"
Glyph 2015 Type: GLYPH_OBJ : ARMOR_CLASS: "scale mail"
Glyph 2016 Type: GLYPH_OBJ : ARMOR_CLASS: "studded leather armor"
Glyph 2017 Type: GLYPH_OBJ : ARMOR_CLASS: "ring mail"
Glyph 2018 Type: GLYPH_OBJ : ARMOR_CLASS: "crude ring mail"
Glyph 2019 Type: GLYPH_OBJ : ARMOR_CLASS: "leather armor"
Glyph 2020 Type: GLYPH_OBJ : ARMOR_CLASS: "leather jacket"
Glyph 2021 Type: GLYPH_OBJ : ARMOR_CLASS: "Hawaiian shirt"
Glyph 2022 Type: GLYPH_OBJ : ARMOR_CLASS: "T-shirt"
Glyph 2023 Type: GLYPH_OBJ : ARMOR_CLASS: "mummy wrapping"
Glyph 2024 Type: GLYPH_OBJ : ARMOR_CLASS: "faded pall"
Glyph 2025 Type: GLYPH_OBJ : ARMOR_CLASS: "coarse mantelet"
Glyph 2026 Type: GLYPH_OBJ : ARMOR_CLASS: "hooded cloak"
Glyph 2027 Type: GLYPH_OBJ : ARMOR_CLASS: "slippery cloak"
Glyph 2028 Type: GLYPH_OBJ : ARMOR_CLASS: "robe"
Glyph 2029 Type: GLYPH_OBJ : ARMOR_CLASS: "apron"
Glyph 2030 Type: GLYPH_OBJ : ARMOR_CLASS: "leather cloak"
Glyph 2031 Type: GLYPH_OBJ : ARMOR_CLASS: "tattered cape"
Glyph 2032 Type: GLYPH_OBJ : ARMOR_CLASS: "opera cloak"
Glyph 2033 Type: GLYPH_OBJ : ARMOR_CLASS: "ornamental cope"
Glyph 2034 Type: GLYPH_OBJ : ARMOR_CLASS: "piece of cloth"
Glyph 2035 Type: GLYPH_OBJ : ARMOR_CLASS: "small shield"
Glyph 2036 Type: GLYPH_OBJ : ARMOR_CLASS: "blue and green shield"
Glyph 2037 Type: GLYPH_OBJ : ARMOR_CLASS: "white-handed shield"
Glyph 2038 Type: GLYPH_OBJ : ARMOR_CLASS: "red-eyed shield"
Glyph 2039 Type: GLYPH_OBJ : ARMOR_CLASS: "large shield"
Glyph 2040 Type: GLYPH_OBJ : ARMOR_CLASS: "large round shield"
Glyph 2041 Type: GLYPH_OBJ : ARMOR_CLASS: "polished silver shield"
Glyph 2042 Type: GLYPH_OBJ : ARMOR_CLASS: "old gloves"
Glyph 2043 Type: GLYPH_OBJ : ARMOR_CLASS: "padded gloves"
Glyph 2044 Type: GLYPH_OBJ : ARMOR_CLASS: "riding gloves"
Glyph 2045 Type: GLYPH_OBJ : ARMOR_CLASS: "fencing gloves"
Glyph 2046 Type: GLYPH_OBJ : ARMOR_CLASS: "walking shoes"
Glyph 2047 Type: GLYPH_OBJ : ARMOR_CLASS: "hard shoes"
Glyph 2048 Type: GLYPH_OBJ : ARMOR_CLASS: "jackboots"
Glyph 2049 Type: GLYPH_OBJ : ARMOR_CLASS: "combat boots"
Glyph 2050 Type: GLYPH_OBJ : ARMOR_CLASS: "jungle boots"
Glyph 2051 Type: GLYPH_OBJ : ARMOR_CLASS: "hiking boots"
Glyph 2052 Type: GLYPH_OBJ : ARMOR_CLASS: "mud boots"
Glyph 2053 Type: GLYPH_OBJ : ARMOR_CLASS: "buckled boots"
Glyph 2054 Type: GLYPH_OBJ : ARMOR_CLASS: "riding boots"
Glyph 2055 Type: GLYPH_OBJ : ARMOR_CLASS: "snow boots"
"""
armor_glossary = armor_glossary.split('\n')[1:-1]
armor_glossary = [x.split('"') for x in armor_glossary]
armor_glossary = [(int(x[0].split()[1]), x[1]) for x in armor_glossary]
armor_glossary = {x:{'name': y} for x, y in armor_glossary}
armor_glossary
mapping = {v['name']: v['name'] for v in armor_glossary.values() if v['name'] in data}
mapping = {
**mapping,
**{x: ' '.join(x.split(' ')[1:]) for x in
['gray dragon scale mail',
'silver dragon scale mail',
'red dragon scale mail',
'white dragon scale mail',
'orange dragon scale mail',
'black dragon scale mail',
'blue dragon scale mail',
'green dragon scale mail',
'yellow dragon scale mail',
'gray dragon scales',
'silver dragon scales',
'red dragon scales',
'white dragon scales',
'orange dragon scales',
'black dragon scales',
'blue dragon scales',
'green dragon scales',
'yellow dragon scales',]
},
**{x: ' '.join(x.split(' ')[:-1]) for x in
['blue and green shield',
'white-handed shield',
'red-eyed shield',
'large round shield',
'polished silver shield',]
},
**{'plate mail': 'plate mail (tanko)'}
}
armor_glossary = {
k: {**v, **data[mapping[v['name']]]} for k, v in armor_glossary.items()
}
armor_glossary
URL = 'https://nethackwiki.com/wiki/Weapon'
response = requests.get(URL)
soup = BeautifulSoup(response.text,'html.parser')
tables = soup.find_all('table',{'class':'prettytable'})
tables[1]
table = tables[1]
rows = table.find_all('tr')
# columns = [v.text.replace('\n','') for v in rows[0].find_all('th')]
columns = ['name',
'skill',
'cost',
'weight',
'probability',
'damage_S',
'damage_L',
'material',
'appearance'
]
df = pd.DataFrame(columns=columns)
for i in range(1,len(rows)):
tds = rows[i].find_all('td')
values = [td.text.replace('\n','') for td in tds][:-2]
df = df.append(pd.Series(values, index=columns), ignore_index=True)
df = df[df['name'] != 'runesword']
df.loc[df['appearance'] != '', 'name'] = df['appearance']
df['weight'] = df['weight'].astype(int)
df['cost'] = df['cost'].apply(lambda x: int(x.split()[0]))
df.drop(columns = ['appearance'], inplace=True)
df
data = df.set_index('name').to_dict('index')
data
weapon_glossary = """
Glyph 1907 Type: GLYPH_OBJ : WEAPON_CLASS: "arrow"
Glyph 1908 Type: GLYPH_OBJ : WEAPON_CLASS: "runed arrow"
Glyph 1909 Type: GLYPH_OBJ : WEAPON_CLASS: "crude arrow"
Glyph 1910 Type: GLYPH_OBJ : WEAPON_CLASS: "silver arrow"
Glyph 1911 Type: GLYPH_OBJ : WEAPON_CLASS: "bamboo arrow"
Glyph 1912 Type: GLYPH_OBJ : WEAPON_CLASS: "crossbow bolt"
Glyph 1913 Type: GLYPH_OBJ : WEAPON_CLASS: "dart"
Glyph 1914 Type: GLYPH_OBJ : WEAPON_CLASS: "throwing star"
Glyph 1915 Type: GLYPH_OBJ : WEAPON_CLASS: "boomerang"
Glyph 1916 Type: GLYPH_OBJ : WEAPON_CLASS: "spear"
Glyph 1917 Type: GLYPH_OBJ : WEAPON_CLASS: "runed spear"
Glyph 1918 Type: GLYPH_OBJ : WEAPON_CLASS: "crude spear"
Glyph 1919 Type: GLYPH_OBJ : WEAPON_CLASS: "stout spear"
Glyph 1920 Type: GLYPH_OBJ : WEAPON_CLASS: "silver spear"
Glyph 1921 Type: GLYPH_OBJ : WEAPON_CLASS: "throwing spear"
Glyph 1922 Type: GLYPH_OBJ : WEAPON_CLASS: "trident"
Glyph 1923 Type: GLYPH_OBJ : WEAPON_CLASS: "dagger"
Glyph 1924 Type: GLYPH_OBJ : WEAPON_CLASS: "runed dagger"
Glyph 1925 Type: GLYPH_OBJ : WEAPON_CLASS: "crude dagger"
Glyph 1926 Type: GLYPH_OBJ : WEAPON_CLASS: "silver dagger"
Glyph 1927 Type: GLYPH_OBJ : WEAPON_CLASS: "athame"
Glyph 1928 Type: GLYPH_OBJ : WEAPON_CLASS: "scalpel"
Glyph 1929 Type: GLYPH_OBJ : WEAPON_CLASS: "knife"
Glyph 1930 Type: GLYPH_OBJ : WEAPON_CLASS: "stiletto"
Glyph 1931 Type: GLYPH_OBJ : WEAPON_CLASS: "worm tooth"
Glyph 1932 Type: GLYPH_OBJ : WEAPON_CLASS: "crysknife"
Glyph 1933 Type: GLYPH_OBJ : WEAPON_CLASS: "axe"
Glyph 1934 Type: GLYPH_OBJ : WEAPON_CLASS: "double-headed axe"
Glyph 1935 Type: GLYPH_OBJ : WEAPON_CLASS: "short sword"
Glyph 1936 Type: GLYPH_OBJ : WEAPON_CLASS: "runed short sword"
Glyph 1937 Type: GLYPH_OBJ : WEAPON_CLASS: "crude short sword"
Glyph 1938 Type: GLYPH_OBJ : WEAPON_CLASS: "broad short sword"
Glyph 1939 Type: GLYPH_OBJ : WEAPON_CLASS: "curved sword"
Glyph 1940 Type: GLYPH_OBJ : WEAPON_CLASS: "silver saber"
Glyph 1941 Type: GLYPH_OBJ : WEAPON_CLASS: "broadsword"
Glyph 1942 Type: GLYPH_OBJ : WEAPON_CLASS: "runed broadsword"
Glyph 1943 Type: GLYPH_OBJ : WEAPON_CLASS: "long sword"
Glyph 1944 Type: GLYPH_OBJ : WEAPON_CLASS: "two-handed sword"
Glyph 1945 Type: GLYPH_OBJ : WEAPON_CLASS: "samurai sword"
Glyph 1946 Type: GLYPH_OBJ : WEAPON_CLASS: "long samurai sword"
Glyph 1947 Type: GLYPH_OBJ : WEAPON_CLASS: "runed broadsword"
Glyph 1948 Type: GLYPH_OBJ : WEAPON_CLASS: "vulgar polearm"
Glyph 1949 Type: GLYPH_OBJ : WEAPON_CLASS: "hilted polearm"
Glyph 1950 Type: GLYPH_OBJ : WEAPON_CLASS: "forked polearm"
Glyph 1951 Type: GLYPH_OBJ : WEAPON_CLASS: "single-edged polearm"
Glyph 1952 Type: GLYPH_OBJ : WEAPON_CLASS: "lance"
Glyph 1953 Type: GLYPH_OBJ : WEAPON_CLASS: "angled poleaxe"
Glyph 1954 Type: GLYPH_OBJ : WEAPON_CLASS: "long poleaxe"
Glyph 1955 Type: GLYPH_OBJ : WEAPON_CLASS: "pole cleaver"
Glyph 1956 Type: GLYPH_OBJ : WEAPON_CLASS: "broad pick"
Glyph 1957 Type: GLYPH_OBJ : WEAPON_CLASS: "pole sickle"
Glyph 1958 Type: GLYPH_OBJ : WEAPON_CLASS: "pruning hook"
Glyph 1959 Type: GLYPH_OBJ : WEAPON_CLASS: "hooked polearm"
Glyph 1960 Type: GLYPH_OBJ : WEAPON_CLASS: "pronged polearm"
Glyph 1961 Type: GLYPH_OBJ : WEAPON_CLASS: "beaked polearm"
Glyph 1962 Type: GLYPH_OBJ : WEAPON_CLASS: "mace"
Glyph 1963 Type: GLYPH_OBJ : WEAPON_CLASS: "morning star"
Glyph 1964 Type: GLYPH_OBJ : WEAPON_CLASS: "war hammer"
Glyph 1965 Type: GLYPH_OBJ : WEAPON_CLASS: "club"
Glyph 1966 Type: GLYPH_OBJ : WEAPON_CLASS: "rubber hose"
Glyph 1967 Type: GLYPH_OBJ : WEAPON_CLASS: "staff"
Glyph 1968 Type: GLYPH_OBJ : WEAPON_CLASS: "thonged club"
Glyph 1969 Type: GLYPH_OBJ : WEAPON_CLASS: "flail"
Glyph 1970 Type: GLYPH_OBJ : WEAPON_CLASS: "bullwhip"
Glyph 1971 Type: GLYPH_OBJ : WEAPON_CLASS: "bow"
Glyph 1972 Type: GLYPH_OBJ : WEAPON_CLASS: "runed bow"
Glyph 1973 Type: GLYPH_OBJ : WEAPON_CLASS: "crude bow"
Glyph 1974 Type: GLYPH_OBJ : WEAPON_CLASS: "long bow"
Glyph 1975 Type: GLYPH_OBJ : WEAPON_CLASS: "sling"
Glyph 1976 Type: GLYPH_OBJ : WEAPON_CLASS: "crossbow"
"""
weapon_glossary = weapon_glossary.split('\n')[1:-1]
weapon_glossary = [x.split('"') for x in weapon_glossary]
weapon_glossary = [(int(x[0].split()[1]), x[1]) for x in weapon_glossary]
weapon_glossary = {x:{'name': y} for x, y in weapon_glossary}
weapon_glossary
mapping = {v['name']: v['name'] for v in weapon_glossary.values() if v['name'] in data}
mapping = {
**mapping,
**{
'knife': 'knife (shito)',
'lance': 'lance (1)',
'short sword': 'short sword (wakizashi)',
'broadsword': 'broadsword (ninja-to)',
'flail': 'flail (nunchaku)',
}
}
[v['name'] for v in weapon_glossary.values() if v['name'] not in mapping]
[k for k in data.keys() if k not in mapping.values()]
weapon_glossary = {
k: {**v, **data[mapping[v['name']]]} for k, v in weapon_glossary.items()
}
weapon_glossary
| 0.294418 | 0.358802 |
##Importing Libraries
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from google.colab import drive
import seaborn as sns
import matplotlib.category
drive.mount('/content/gdrive')
```
##Data Loading
```
path="/content/gdrive/My Drive/BE Air Quality Monitoring/Implementation/Holt Winter's/delhi.csv"
df=pd.read_csv(path)
print(df)
search=np.nan
for cols in df.columns:
if (search in df[cols]):
print('Found in '+cols)
print(df.corr(method='pearson'))
ax=sns.heatmap(df.corr(method='pearson'))
```
##Data Pre-processing
1. Merging date and time as single timestamp (datatype: datetime)
```
for ind in df.index:
string=df['Date'][ind]
string = string[:6]+"20"+string[6:]
string = string[6:]+"-"+string[3:5]+"-"+string[0:2]
dat=datetime.strptime(string,'%Y-%m-%d').date()
time=datetime.strptime(df['Timestamp'][ind],'%H:%M:%S').time()
timestamp = datetime.combine(dat,time)
df['Date'][ind]=timestamp
print(df['Date'])
print()
type(df['Date'][0])
```
2. Dealing with missing values
```
mean=df['CO'].mean()
df['CO'].replace(np.nan,mean,inplace=True)
mean=df['SO2'].mean()
df['SO2'].replace(np.nan,mean,inplace=True)
mean=df['NO2'].mean()
df['NO2'].replace(np.nan,mean,inplace=True)
mean=df['O3'].mean()
df['O3'].replace(np.nan,mean,inplace=True)
mean=df['PM10'].mean()
df['PM10'].replace(np.nan,mean,inplace=True)
mean=df['PM25'].mean()
df['PM25'].replace(np.nan,mean,inplace=True)
```
##For NO2
```
NO2=df
print(type(NO2))
```
3. Visualising Data
1. Line Plot
```
plt.figure(figsize=(40,8))
plt.plot(df['Date'],df['NO2'],label='NO2')
plt.legend()
```
2. Box Plot
```
NO2['NO2'].plot(kind='box')
```
4. Dealing with Outliers
```
Q1 = NO2['NO2'].quantile(0.25)
Q3 = NO2['NO2'].quantile(0.75)
IQR = Q3 - Q1
lower=Q1-1.5*IQR
upper=Q3+1.5*IQR
NO2=NO2[(NO2['NO2'] >= lower) & (NO2['NO2'] <= upper)]
print(NO2)
plt.figure(figsize=(40,8))
plt.plot(NO2['Date'],NO2['NO2'],label='NO2')
plt.legend()
NO2.reset_index(drop=True,inplace=True)
print(NO2)
```
5. Train and Test Data Splitting
```
rows=NO2.shape[0]
train=NO2[0:rows-168]
test=NO2[rows-168:rows]
print(train)
print(test)
```
##Model Fitting
```
print(train.index)
x=[]
y=[]
for i in range(0,2536):
x.append([i])
print(x)
for i in train['NO2']:
y.append(i)
print(y)
#svr_lin=SVR(kernel='linear',verbose=True)
#svr_poly=SVR(kernel='poly',C=1e3,degree=3)
svr_rbf=SVR(kernel='rbf',C=1e3,gamma=0.1)
svr_rbf.fit(x,y)
x_test=[]
for i in range(2536,2704):
x_test.append([i])
print(x_test)
yhat=svr_rbf.predict(x_test)
print(yhat)
```
##Model Evaluation
```
plt.figure(figsize=(16,8))
plt.plot(test['Date'],test['NO2'],label='test_NO2')
plt.plot(test['Date'],yhat,label='predicted_NO2')
plt.legend()
```
######MAPE Calculation
```
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
mean_absolute_percentage_error(test['NO2'], yhat)
```
##For O3
```
O3=df
print(type(O3))
plt.figure(figsize=(40,8))
plt.plot(O3['Date'],O3['O3'],label='O3')
plt.legend()
O3['O3'].plot(kind='box')
Q1 = O3['O3'].quantile(0.25)
Q3 = O3['O3'].quantile(0.75)
IQR = Q3 - Q1
lower=Q1-1.5*IQR
upper=Q3+1.5*IQR
O3=O3[(O3['O3'] >= lower) & (O3['O3'] <= upper)]
print(O3)
plt.figure(figsize=(40,8))
plt.plot(O3['Date'],O3['O3'],label='O3')
plt.legend()
O3.reset_index(drop=True,inplace=True)
print(O3)
rows=O3.shape[0]
train=O3[0:rows-168]
test=O3[rows-168:rows]
print(train)
print(test)
fit=ExponentialSmoothing(train['O3'],seasonal="additive",seasonal_periods=24).fit()
yhat=fit.predict(start=rows-168,end=rows-1)
plt.figure(figsize=(16,8))
plt.plot(test['Date'],test['O3'],label='test_O3')
plt.plot(test['Date'],yhat,label='predicted_O3')
plt.legend()
mean_absolute_percentage_error(test['O3'], yhat)
```
##For SO2
```
SO2=df
print(type(SO2))
plt.figure(figsize=(40,8))
plt.plot(SO2['Date'],SO2['SO2'],label='SO2')
plt.legend()
SO2['SO2'].plot(kind='box')
Q1 = SO2['SO2'].quantile(0.25)
Q3 = SO2['SO2'].quantile(0.75)
IQR = Q3 - Q1
lower=Q1-1.5*IQR
upper=Q3+1.5*IQR
SO2=SO2[(SO2['SO2'] >= lower) & (SO2['SO2'] <= upper)]
print(SO2)
plt.figure(figsize=(40,8))
plt.plot(SO2['Date'],SO2['SO2'],label='SO2')
plt.legend()
SO2.reset_index(drop=True,inplace=True)
print(SO2)
rows=SO2.shape[0]
train=SO2[0:rows-168]
test=SO2[rows-168:rows]
print(train)
print(test)
fit=ExponentialSmoothing(train['SO2'],seasonal="additive",seasonal_periods=24).fit()
yhat=fit.predict(start=rows-168,end=rows-1)
plt.figure(figsize=(16,8))
plt.plot(test['Date'],test['SO2'],label='test_SO2')
plt.plot(test['Date'],yhat,label='predicted_SO2')
plt.legend()
mean_absolute_percentage_error(test['SO2'], yhat)
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from google.colab import drive
import seaborn as sns
import matplotlib.category
drive.mount('/content/gdrive')
path="/content/gdrive/My Drive/BE Air Quality Monitoring/Implementation/Holt Winter's/delhi.csv"
df=pd.read_csv(path)
print(df)
search=np.nan
for cols in df.columns:
if (search in df[cols]):
print('Found in '+cols)
print(df.corr(method='pearson'))
ax=sns.heatmap(df.corr(method='pearson'))
for ind in df.index:
string=df['Date'][ind]
string = string[:6]+"20"+string[6:]
string = string[6:]+"-"+string[3:5]+"-"+string[0:2]
dat=datetime.strptime(string,'%Y-%m-%d').date()
time=datetime.strptime(df['Timestamp'][ind],'%H:%M:%S').time()
timestamp = datetime.combine(dat,time)
df['Date'][ind]=timestamp
print(df['Date'])
print()
type(df['Date'][0])
mean=df['CO'].mean()
df['CO'].replace(np.nan,mean,inplace=True)
mean=df['SO2'].mean()
df['SO2'].replace(np.nan,mean,inplace=True)
mean=df['NO2'].mean()
df['NO2'].replace(np.nan,mean,inplace=True)
mean=df['O3'].mean()
df['O3'].replace(np.nan,mean,inplace=True)
mean=df['PM10'].mean()
df['PM10'].replace(np.nan,mean,inplace=True)
mean=df['PM25'].mean()
df['PM25'].replace(np.nan,mean,inplace=True)
NO2=df
print(type(NO2))
plt.figure(figsize=(40,8))
plt.plot(df['Date'],df['NO2'],label='NO2')
plt.legend()
NO2['NO2'].plot(kind='box')
Q1 = NO2['NO2'].quantile(0.25)
Q3 = NO2['NO2'].quantile(0.75)
IQR = Q3 - Q1
lower=Q1-1.5*IQR
upper=Q3+1.5*IQR
NO2=NO2[(NO2['NO2'] >= lower) & (NO2['NO2'] <= upper)]
print(NO2)
plt.figure(figsize=(40,8))
plt.plot(NO2['Date'],NO2['NO2'],label='NO2')
plt.legend()
NO2.reset_index(drop=True,inplace=True)
print(NO2)
rows=NO2.shape[0]
train=NO2[0:rows-168]
test=NO2[rows-168:rows]
print(train)
print(test)
print(train.index)
x=[]
y=[]
for i in range(0,2536):
x.append([i])
print(x)
for i in train['NO2']:
y.append(i)
print(y)
#svr_lin=SVR(kernel='linear',verbose=True)
#svr_poly=SVR(kernel='poly',C=1e3,degree=3)
svr_rbf=SVR(kernel='rbf',C=1e3,gamma=0.1)
svr_rbf.fit(x,y)
x_test=[]
for i in range(2536,2704):
x_test.append([i])
print(x_test)
yhat=svr_rbf.predict(x_test)
print(yhat)
plt.figure(figsize=(16,8))
plt.plot(test['Date'],test['NO2'],label='test_NO2')
plt.plot(test['Date'],yhat,label='predicted_NO2')
plt.legend()
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
mean_absolute_percentage_error(test['NO2'], yhat)
O3=df
print(type(O3))
plt.figure(figsize=(40,8))
plt.plot(O3['Date'],O3['O3'],label='O3')
plt.legend()
O3['O3'].plot(kind='box')
Q1 = O3['O3'].quantile(0.25)
Q3 = O3['O3'].quantile(0.75)
IQR = Q3 - Q1
lower=Q1-1.5*IQR
upper=Q3+1.5*IQR
O3=O3[(O3['O3'] >= lower) & (O3['O3'] <= upper)]
print(O3)
plt.figure(figsize=(40,8))
plt.plot(O3['Date'],O3['O3'],label='O3')
plt.legend()
O3.reset_index(drop=True,inplace=True)
print(O3)
rows=O3.shape[0]
train=O3[0:rows-168]
test=O3[rows-168:rows]
print(train)
print(test)
fit=ExponentialSmoothing(train['O3'],seasonal="additive",seasonal_periods=24).fit()
yhat=fit.predict(start=rows-168,end=rows-1)
plt.figure(figsize=(16,8))
plt.plot(test['Date'],test['O3'],label='test_O3')
plt.plot(test['Date'],yhat,label='predicted_O3')
plt.legend()
mean_absolute_percentage_error(test['O3'], yhat)
SO2=df
print(type(SO2))
plt.figure(figsize=(40,8))
plt.plot(SO2['Date'],SO2['SO2'],label='SO2')
plt.legend()
SO2['SO2'].plot(kind='box')
Q1 = SO2['SO2'].quantile(0.25)
Q3 = SO2['SO2'].quantile(0.75)
IQR = Q3 - Q1
lower=Q1-1.5*IQR
upper=Q3+1.5*IQR
SO2=SO2[(SO2['SO2'] >= lower) & (SO2['SO2'] <= upper)]
print(SO2)
plt.figure(figsize=(40,8))
plt.plot(SO2['Date'],SO2['SO2'],label='SO2')
plt.legend()
SO2.reset_index(drop=True,inplace=True)
print(SO2)
rows=SO2.shape[0]
train=SO2[0:rows-168]
test=SO2[rows-168:rows]
print(train)
print(test)
fit=ExponentialSmoothing(train['SO2'],seasonal="additive",seasonal_periods=24).fit()
yhat=fit.predict(start=rows-168,end=rows-1)
plt.figure(figsize=(16,8))
plt.plot(test['Date'],test['SO2'],label='test_SO2')
plt.plot(test['Date'],yhat,label='predicted_SO2')
plt.legend()
mean_absolute_percentage_error(test['SO2'], yhat)
| 0.308607 | 0.753172 |
<!--BOOK_INFORMATION-->
<img align="left" style="padding-right:10px;" src="fig/cover-small.jpg">
*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).*
*The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).*
<!--NAVIGATION-->
< [Basic Python Semantics: Variables and Objects](03-Semantics-Variables.ipynb) | [Contents](Index.ipynb) | [Built-In Types: Simple Values](05-Built-in-Scalar-Types.ipynb) >
# Basic Python Semantics: Operators
In the previous section, we began to look at the semantics of Python variables and objects; here we'll dig into the semantics of the various *operators* included in the language.
By the end of this section, you'll have the basic tools to begin comparing and operating on data in Python.
## Arithmetic Operations
Python implements seven basic binary arithmetic operators, two of which can double as unary operators.
They are summarized in the following table:
| Operator | Name | Description |
|--------------|----------------|--------------------------------------------------------|
| ``a + b`` | Addition | Sum of ``a`` and ``b`` |
| ``a - b`` | Subtraction | Difference of ``a`` and ``b`` |
| ``a * b`` | Multiplication | Product of ``a`` and ``b`` |
| ``a / b`` | True division | Quotient of ``a`` and ``b`` |
| ``a // b`` | Floor division | Quotient of ``a`` and ``b``, removing fractional parts |
| ``a % b`` | Modulus | Integer remainder after division of ``a`` by ``b`` |
| ``a ** b`` | Exponentiation | ``a`` raised to the power of ``b`` |
| ``-a`` | Negation | The negative of ``a`` |
| ``+a`` | Unary plus | ``a`` unchanged (rarely used) |
These operators can be used and combined in intuitive ways, using standard parentheses to group operations.
For example:
```
divmod(9, 5)
# addition, subtraction, multiplication
(4 + 8) * (6.5 - 3)
```
Floor division is true division with fractional parts truncated:
```
# True division
print(11 / 2)
# Floor division
print(13.7 // 2)
```
The floor division operator was added in Python 3; you should be aware if working in Python 2 that the standard division operator (``/``) acts like floor division for integers and like true division for floating-point numbers.
Finally, I'll mention an eighth arithmetic operator that was added in Python 3.5: the ``a @ b`` operator, which is meant to indicate the *matrix product* of ``a`` and ``b``, for use in various linear algebra packages.
## Bitwise Operations
In addition to the standard numerical operations, Python includes operators to perform bitwise logical operations on integers.
These are much less commonly used than the standard arithmetic operations, but it's useful to know that they exist.
The six bitwise operators are summarized in the following table:
| Operator | Name | Description |
|--------------|-----------------|---------------------------------------------|
| ``a & b`` | Bitwise AND | Bits defined in both ``a`` and ``b`` |
| <code>a | b</code>| Bitwise OR | Bits defined in ``a`` or ``b`` or both |
| ``a ^ b`` | Bitwise XOR | Bits defined in ``a`` or ``b`` but not both |
| ``a << b`` | Bit shift left | Shift bits of ``a`` left by ``b`` units |
| ``a >> b`` | Bit shift right | Shift bits of ``a`` right by ``b`` units |
| ``~a`` | Bitwise NOT | Bitwise negation of ``a`` |
These bitwise operators only make sense in terms of the binary representation of numbers, which you can see using the built-in ``bin`` function:
```
2 >> 2
```
The result is prefixed with ``'0b'``, which indicates a binary representation.
The rest of the digits indicate that the number 10 is expressed as the sum $1 \cdot 2^3 + 0 \cdot 2^2 + 1 \cdot 2^1 + 0 \cdot 2^0$.
Similarly, we can write:
```
bin(4)
```
Now, using bitwise OR, we can find the number which combines the bits of 4 and 10:
```
4 | 10
bin(4 | 10)
```
These bitwise operators are not as immediately useful as the standard arithmetic operators, but it's helpful to see them at least once to understand what class of operation they perform.
In particular, users from other languages are sometimes tempted to use XOR (i.e., ``a ^ b``) when they really mean exponentiation (i.e., ``a ** b``).
## Assignment Operations
We've seen that variables can be assigned with the "``=``" operator, and the values stored for later use. For example:
```
a = 24
print(a)
```
We can use these variables in expressions with any of the operators mentioned earlier.
For example, to add 2 to ``a`` we write:
```
a + 2
```
We might want to update the variable ``a`` with this new value; in this case, we could combine the addition and the assignment and write ``a = a + 2``.
Because this type of combined operation and assignment is so common, Python includes built-in update operators for all of the arithmetic operations:
```
a += 2 # equivalent to a = a + 2
print(a)
```
There is an augmented assignment operator corresponding to each of the binary operators listed earlier; in brief, they are:
|||||
|-|-|
|``a += b``| ``a -= b``|``a *= b``| ``a /= b``|
|``a //= b``| ``a %= b``|``a **= b``|``a &= b``|
|<code>a |= b</code>| ``a ^= b``|``a <<= b``| ``a >>= b``|
Each one is equivalent to the corresponding operation followed by assignment: that is, for any operator "``■``", the expression ``a ■= b`` is equivalent to ``a = a ■ b``, with a slight catch.
For mutable objects like lists, arrays, or DataFrames, these augmented assignment operations are actually subtly different than their more verbose counterparts: they modify the contents of the original object rather than creating a new object to store the result.
## Comparison Operations
Another type of operation which can be very useful is comparison of different values.
For this, Python implements standard comparison operators, which return Boolean values ``True`` and ``False``.
The comparison operations are listed in the following table:
| Operation | Description || Operation | Description |
|---------------|-----------------------------------||---------------|--------------------------------------|
| ``a == b`` | ``a`` equal to ``b`` || ``a != b`` | ``a`` not equal to ``b`` |
| ``a < b`` | ``a`` less than ``b`` || ``a > b`` | ``a`` greater than ``b`` |
| ``a <= b`` | ``a`` less than or equal to ``b`` || ``a >= b`` | ``a`` greater than or equal to ``b`` |
These comparison operators can be combined with the arithmetic and bitwise operators to express a virtually limitless range of tests for the numbers.
For example, we can check if a number is odd by checking that the modulus with 2 returns 1:
```
limite = 10
mayus = [i.upper() for i in "hola mundo"] # comprehension de listas
mayus
2 >= 2
pares
impares
# 25 is odd
25 % 2 == 1
# 66 is odd
66 % 2 == 1
```
We can string-together multiple comparisons to check more complicated relationships:
```
# check if a is between 15 and 30
a = 15
15 <= a < 30 # (15 < a) and (a < 30)
```
And, just to make your head hurt a bit, take a look at this comparison:
```
-1 == ~0
```
Recall that ``~`` is the bit-flip operator, and evidently when you flip all the bits of zero you end up with -1.
If you're curious as to why this is, look up the *two's complement* integer encoding scheme, which is what Python uses to encode signed integers, and think about what happens when you start flipping all the bits of integers encoded this way.
## Boolean Operations
When working with Boolean values, Python provides operators to combine the values using the standard concepts of "and", "or", and "not".
Predictably, these operators are expressed using the words ``and``, ``or``, and ``not``:
```
x = 4
(x < 6) and (x > 2)
(x > 10) or (x % 2 == 0)
not (x < 6)
```
Boolean algebra aficionados might notice that the XOR operator is not included; this can of course be constructed in several ways from a compound statement of the other operators.
Otherwise, a clever trick you can use for XOR of Boolean values is the following:
```
# (x > 1) xor (x < 10)
(x > 1) != (x < 10)
```
These sorts of Boolean operations will become extremely useful when we begin discussing *control flow statements* such as conditionals and loops.
One sometimes confusing thing about the language is when to use Boolean operators (``and``, ``or``, ``not``), and when to use bitwise operations (``&``, ``|``, ``~``).
The answer lies in their names: Boolean operators should be used when you want to compute *Boolean values (i.e., truth or falsehood) of entire statements*.
Bitwise operations should be used when you want to *operate on individual bits or components of the objects in question*.
## Identity and Membership Operators
Like ``and``, ``or``, and ``not``, Python also contains prose-like operators to check for identity and membership.
They are the following:
| Operator | Description |
|---------------|---------------------------------------------------|
| ``a is b`` | True if ``a`` and ``b`` are identical objects |
| ``a is not b``| True if ``a`` and ``b`` are not identical objects |
| ``a in b`` | True if ``a`` is a member of ``b`` |
| ``a not in b``| True if ``a`` is not a member of ``b`` |
### Identity Operators: "``is``" and "``is not``"
The identity operators, "``is``" and "``is not``" check for *object identity*.
Object identity is different than equality, as we can see here:
```
a = [1, 2, 3]
b = [1, 2, 3]
a == b
a is b
a is not b
```
What do identical objects look like? Here is an example:
```
a = [1, 2, 3]
b = a
a is b
```
The difference between the two cases here is that in the first, ``a`` and ``b`` point to *different objects*, while in the second they point to the *same object*.
As we saw in the previous section, Python variables are pointers. The "``is``" operator checks whether the two variables are pointing to the same container (object), rather than referring to what the container contains.
With this in mind, in most cases that a beginner is tempted to use "``is``" what they really mean is ``==``.
### Membership operators
Membership operators check for membership within compound objects.
So, for example, we can write:
```
1 in [1, 2, 3]
2 not in [1, 2, 3]
usuarios_permitidos = ["A", "B"]
usuario = "C"
usuario not in usuarios_permitidos
```
These membership operations are an example of what makes Python so easy to use compared to lower-level languages such as C.
In C, membership would generally be determined by manually constructing a loop over the list and checking for equality of each value.
In Python, you just type what you want to know, in a manner reminiscent of straightforward English prose.
<!--NAVIGATION-->
< [Basic Python Semantics: Variables and Objects](03-Semantics-Variables.ipynb) | [Contents](Index.ipynb) | [Built-In Types: Simple Values](05-Built-in-Scalar-Types.ipynb) >
|
github_jupyter
|
divmod(9, 5)
# addition, subtraction, multiplication
(4 + 8) * (6.5 - 3)
# True division
print(11 / 2)
# Floor division
print(13.7 // 2)
2 >> 2
bin(4)
4 | 10
bin(4 | 10)
a = 24
print(a)
a + 2
a += 2 # equivalent to a = a + 2
print(a)
limite = 10
mayus = [i.upper() for i in "hola mundo"] # comprehension de listas
mayus
2 >= 2
pares
impares
# 25 is odd
25 % 2 == 1
# 66 is odd
66 % 2 == 1
# check if a is between 15 and 30
a = 15
15 <= a < 30 # (15 < a) and (a < 30)
-1 == ~0
x = 4
(x < 6) and (x > 2)
(x > 10) or (x % 2 == 0)
not (x < 6)
# (x > 1) xor (x < 10)
(x > 1) != (x < 10)
a = [1, 2, 3]
b = [1, 2, 3]
a == b
a is b
a is not b
a = [1, 2, 3]
b = a
a is b
1 in [1, 2, 3]
2 not in [1, 2, 3]
usuarios_permitidos = ["A", "B"]
usuario = "C"
usuario not in usuarios_permitidos
| 0.330579 | 0.993116 |
```
import tensorflow as tf
import deepchem as dc
import rdkit as rdkit
import rdkit.Chem as Chem
import rdkit.Chem.PandasTools as pt
import pandas as pd
import numpy as np
print('tf version:', tf.__version__, '\nGPU avilable?:', tf.config.list_physical_devices('GPU'))
print('rdkit version: ', rdkit.__version__)
print('DeepChem version', dc.__version__)
from layers.layers import GraphEncoderLayer
from tensorflow.keras import layers
data = pd.read_csv('./data/QM9_HAC_5_9_.csv')
reduced_data = list(data['SMILES'])
import random
random.shuffle(reduced_data)
mols = list(filter(lambda x: x is not None, [Chem.MolFromSmiles(x) for x in reduced_data]))
print('how many molecules obtained from smiles: ', len(mols))
import logging
from deepchem.utils.typing import RDKitAtom, RDKitBond, RDKitMol, List
from deepchem.feat.base_classes import MolecularFeaturizer
logger = logging.getLogger(__name__)
class GraphMatrix():
"""
This is class used to store data for MolGAN neural networks.
Parameters
----------
node_features: np.ndarray
Node feature matrix with shape [num_nodes, num_node_features]
edge_features: np.ndarray,
Edge feature matrix with shape [num_nodes, num_nodes]
Returns
-------
graph: GraphMatrix
A molecule graph with some features.
"""
def __init__(self,adjacency_matrix: np.ndarray, node_features: np.ndarray):
self.adjacency_matrix = adjacency_matrix
self.node_features = node_features
class MolGanFeaturizer(MolecularFeaturizer):
"""This class implements featurizer used with MolGAN de-novo molecular generation based on:
`MolGAN: An implicit generative model for small molecular graphs`<https://arxiv.org/abs/1805.11973>`_.
The default representation is in form of GraphMatrix object, being wrapper for two matrices containing atom and bond type information.
The class also provides reverse capabilities"""
def __init__(self,
max_atom_count:int = 9,
kekulize:bool = True,
bond_labels: List[RDKitBond] = None,
atom_labels: List[int] = None):
"""
Parameters
----------
max_atom_count: int, default 9
Maximum number of atoms used for creation of adjacency matrix, molecules cannot have more atoms than this number; implicit hydrogens do not count.
kekulize: bool, default True
Should molecules be kekulized; solves number of issues with defeaturization when used.
bond_labels: List[RDKitBond]
List containing types of bond used for generation of adjacency matrix
atom_labels: List[int]
List of atomic numbers used for generation of node features
"""
self.max_atom_count = max_atom_count
self.kekulize = kekulize
#bond labels
if bond_labels is None:
self.bond_labels = [
Chem.rdchem.BondType.ZERO
,Chem.rdchem.BondType.SINGLE
,Chem.rdchem.BondType.DOUBLE
,Chem.rdchem.BondType.TRIPLE
,Chem.rdchem.BondType.AROMATIC]
else:
self.bond_labels = bond_labels
#atom labels
if atom_labels is None:
self.atom_labels = [0, 6, 7, 8, 9] #C,N,O,F
else:
self.atom_labels = atom_labels
#create bond encoders and decoders
self.bond_encoder = {l: i for i, l in enumerate(self.bond_labels)}
self.bond_decoder = {i: l for i, l in enumerate(self.bond_labels)}
#create atom encoders and decoders
self.atom_encoder = {l: i for i, l in enumerate(self.atom_labels)}
self.atom_decoder = {i: l for i, l in enumerate(self.atom_labels)}
def _featurize(self, mol: RDKitMol) -> GraphMatrix:
"""Calculate adjacency matrix and nodes features for RDKitMol.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit mol object.
Returns
-------
graph: GraphMatrix
A molecule graph with some features.
"""
if self.kekulize:
Chem.Kekulize(mol)
A = np.zeros(shape=(self.max_atom_count, self.max_atom_count), dtype=np.float32)
bonds = mol.GetBonds()
begin, end = [b.GetBeginAtomIdx() for b in bonds], [b.GetEndAtomIdx() for b in bonds]
bond_type = [self.bond_encoder[b.GetBondType()] for b in bonds]
A[begin, end] = bond_type
A[end, begin] = bond_type
degree = np.sum(A[:mol.GetNumAtoms(), :mol.GetNumAtoms()], axis=-1)
X = np.array([self.atom_encoder[atom.GetAtomicNum()] for atom in mol.GetAtoms()] + [0] * (self.max_atom_count - mol.GetNumAtoms()), dtype=np.int32)
graph = GraphMatrix(A,X)
return graph if (degree > 0).all() else None
def _defeaturize(self, graph_matrix: GraphMatrix, sanitize:bool = True, cleanup=True) ->RDKitMol:
"""Recreate RDKitMol from GraphMatrix object. For working correctly same object needs to be used for featurization and defeaturization.
Parameters
----------
graph_matrix: GraphMatrix
GraphMatrix object.
sanitize: bool, default True
Should RDKit sanitization be included in the process.
cleanup: bool, default True
Splits salts and removes compounds with "*" atom types
Returns
-------
mol: RDKitMol object
RDKitMol object representing molecule.
"""
node_labels = graph_matrix.node_features
edge_labels = graph_matrix.adjacency_matrix
mol = Chem.RWMol()
for node_label in node_labels:
mol.AddAtom(Chem.Atom(self.atom_decoder[node_label]))
for start, end in zip(*np.nonzero(edge_labels)):
if start > end:
mol.AddBond(int(start), int(end), self.bond_decoder[edge_labels[start, end]])
if sanitize:
try:
Chem.SanitizeMol(mol)
except Exception:
mol = None
if cleanup:
try:
smiles = Chem.MolToSmiles(mol)
smiles = max(smiles.split('.'), key=len)
if "*" not in smiles:
mol = Chem.MolFromSmiles(smiles)
else:
mol = None
except Exception:
mol = None
return mol
def defeaturize(self, graphs, log_every_n=1000) -> np.ndarray:
"""Calculates molecules from correspoing GraphMatrix objects.
Parameters
----------
graphs: GraphMatrix / iterable
GraphMatrix object or corresponding iterable
log_every_n: int, default 1000
Logging messages reported every `log_every_n` samples.
Returns
-------
features: np.ndarray
A numpy array containing RDKitMol objext.
"""
# Special case handling of single molecule
if isinstance(graphs, GraphMatrix):
graphs = [graphs]
else:
# Convert iterables to list
graphs = list(graphs)
molecules = []
for i, gr in enumerate(graphs):
if i % log_every_n == 0:
logger.info("Featurizing datapoint %i" % i)
try:
molecules.append(self._defeaturize(gr))
except Exception as e:
logger.warning("Failed to defeaturize datapoint %d, %s. Appending empty array", i,gr)
logger.warning("Exception message: {}".format(e))
molecules.append(np.array([]))
molecules = np.asarray(molecules)
return molecules
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from deepchem.models import WGAN
from layers.layers import GraphEncoderLayer
class MolGAN(WGAN):
"""Model for automatic generation of compounds based on GAN architecture described by Nicola De Cao et al.
`MolGAN: An implicit generative model for small molecular graphs`<https://arxiv.org/abs/1805.11973>`_.
It uses adjacency matrix and node features as inputs, both need to be converted to one hot representation before use.
Examples
--------
gan = MolGAN(learning_rate=ExponentialDecay(0.001, 0.9, 5000))
dataset = dc.data.NumpyDataset([x.adjacency_matrix for x in limited],[x.node_features for x in limited])
def iterbatches(epochs):
for i in range(epochs):
for batch in dataset.iterbatches(batch_size=gan.batch_size, pad_batches=True):
adjacency_tensor = tf.one_hot(batch[0], gan.edges)
node_tesor = tf.one_hot(batch[1], gan.nodes)
yield {gan.data_inputs[0]: adjacency_tensor, gan.data_inputs[1]:node_tesor}
gan.fit_gan(iterbatches(10), generator_steps=0.2, checkpoint_interval=5000)
"""
def __init__(self
,edges: int = 5
,vertices:int = 9
,nodes: int = 5
,embedding_dim: int = 10
,dropout_rate: float = 0.
,name:str =''
, **kwargs):
"""
Parameters
----------
edges: int, default 5
Number of bond types includes BondType.Zero
vertices: int, default 9
Max number of atoms in adjacency and node features matrices
nodes: int, default 5
Number of atom types in node features matrix
embedding_dim: int, default 10
Size of noise input
dropout_rate: float, default = 0.
Rate of dropout used across whole model
name: str, default ''
Name of the model
"""
self.edges = edges
self.vertices = vertices
self.nodes =nodes
self.embedding_dim = embedding_dim
self.dropout_rate = dropout_rate
super(MolGAN, self).__init__(name=name, **kwargs)
def get_noise_input_shape(self):
return (self.embedding_dim,)
def get_data_input_shapes(self):
return [(self.vertices, self.vertices, self.edges),(self.vertices, self.nodes)]
def create_generator(self):
input_layer = layers.Input(shape=(self.embedding_dim,))
x = layers.Dense(128, activation='tanh')(input_layer)
x = layers.Dropout(self.dropout_rate)(x)
x = layers.Dense(256, activation='tanh')(x)
x = layers.Dropout(self.dropout_rate)(x)
x = layers.Dense(512, activation='tanh')(x)
x = layers.Dropout(self.dropout_rate)(x)
#EDGES LOGITS
edges_logits = layers.Dense(units=self.edges*self.vertices*self.vertices, activation=None)(x)
edges_logits = layers.Reshape((self.edges, self.vertices, self.vertices))(edges_logits)
matrix_transpose = layers.Permute((1,3,2))(edges_logits)
edges_logits = (edges_logits + matrix_transpose)/2
edges_logits = layers.Permute((2,3,1))(edges_logits)
edges_logits = layers.Dropout(self.dropout_rate)(edges_logits)
#used during training of the model
edges_softmax = tf.nn.softmax(edges_logits)
#NODES LOGITS
nodes_logits = layers.Dense(units=(self.vertices * self.nodes), activation=None)(x)
nodes_logits = layers.Reshape((self.vertices, self.nodes))(nodes_logits)
nodes_logits = layers.Dropout(self.dropout_rate)(nodes_logits)
#used during training of the model
nodes_softmax = tf.nn.softmax(nodes_logits)
#used to generate molecules, consider returning just logits and then use additonal layer when molecules needs to generated
#used for compound generation, consider removing this from this section and just return un
e_gumbel_logits = edges_logits - tf.math.log(- tf.math.log(tf.random.uniform(tf.shape(edges_logits), dtype=edges_logits.dtype)))
e_gumbel_argmax = tf.one_hot(tf.argmax(e_gumbel_logits, axis=-1), depth=e_gumbel_logits.shape[-1], dtype=e_gumbel_logits.dtype)
e_argmax = tf.argmax(e_gumbel_argmax, axis=-1)
#used for compound generation
n_gumbel_logits = nodes_logits - tf.math.log(- tf.math.log(tf.random.uniform(tf.shape(nodes_logits), dtype=nodes_logits.dtype)))
n_gumbel_argmax = tf.one_hot(tf.argmax(n_gumbel_logits, axis=-1), depth=n_gumbel_logits.shape[-1], dtype=n_gumbel_logits.dtype)
n_argmax = tf.argmax(n_gumbel_argmax, axis=-1)
#final model
return keras.Model(inputs = input_layer, outputs=[edges_softmax, nodes_softmax,e_argmax, n_argmax])
def create_discriminator(self):
adjacency_tensor= layers.Input(shape=(self.vertices, self.vertices, self.edges))
node_tensor = layers.Input(shape=(self.vertices, self.nodes))
graph = GraphEncoderLayer(units = [(128,64),128], dropout_rate= self.dropout_rate, edges=self.edges)([adjacency_tensor,node_tensor])
dense = layers.Dense(units=128, activation='tanh')(graph)
dense = layers.Dropout(self.dropout_rate)(dense)
dense = layers.Dense(units=64, activation='tanh')(dense)
dense = layers.Dropout(self.dropout_rate)(dense)
output = layers.Dense(units=1)(dense)
return keras.Model(inputs=[adjacency_tensor,node_tensor], outputs=[output])
def predict_gan_generator(self,
batch_size=1,
noise_input=None,
generator_index=0):
"""Use the GAN to generate a batch of samples.
Parameters
----------
batch_size: int
the number of samples to generate. If either noise_input or
conditional_inputs is specified, this argument is ignored since the batch
size is then determined by the size of that argument.
noise_input: array
the value to use for the generator's noise input. If None (the default),
get_noise_batch() is called to generate a random input, so each call will
produce a new set of samples.
generator_index: int
the index of the generator (between 0 and n_generators-1) to use for
generating the samples.
Returns
-------
An array (if the generator has only one output) or list of arrays (if it has
multiple outputs) containing the generated samples.
"""
if noise_input is not None:
batch_size = len(noise_input)
if noise_input is None:
noise_input = self.get_noise_batch(batch_size)
inputs = noise_input
_,_,adjacency_matrix, nodes_features = gan.generators[0](inputs, training=False)
graphs = [GraphMatrix(i,j) for i,j in zip(adjacency_matrix.numpy(),nodes_features.numpy())]
return graphs
#create featurizer
feat = MolGanFeaturizer()
#featurize molecules
limited = feat.featurize(mols)
#Remove empty objects
limited = list(filter(lambda x: x is not None, limited))
#create dataset
dataset = dc.data.NumpyDataset([x.adjacency_matrix for x in limited],[x.node_features for x in limited])
from deepchem.models.optimizers import ExponentialDecay
#create model
gan = MolGAN(learning_rate=ExponentialDecay(0.001, 0.9, 5000))
#prepare data for training
def iterbatches(epochs):
for i in range(epochs):
for batch in dataset.iterbatches(batch_size=gan.batch_size, pad_batches=True):
adjacency_tensor = tf.one_hot(batch[0], gan.edges)
node_tesor = tf.one_hot(batch[1], gan.nodes)
yield {gan.data_inputs[0]: adjacency_tensor, gan.data_inputs[1]:node_tesor}
#train model
gan.fit_gan(iterbatches(10), generator_steps=0.2, checkpoint_interval=5000)
#generat prediced sample
g = gan.predict_gan_generator(1000)
#convert graphs to RDKitmolecules
nmols = feat.defeaturize(g)
print("{} molecules generated".format(len(nmols)))
#remove invalid moles
nmols = list(filter(lambda x: x is not None, nmols))
print ("{} valid molecules".format(len(nmols)))
#get unique molecules
nmols = [Chem.MolFromSmiles(x) for x in list(set([Chem.MolToSmiles(z) for z in nmols]))]
print("{} unique molecules".format(len(nmols)))
#display unique molecules
Chem.Draw.MolsToGridImage(nmols)
```
|
github_jupyter
|
import tensorflow as tf
import deepchem as dc
import rdkit as rdkit
import rdkit.Chem as Chem
import rdkit.Chem.PandasTools as pt
import pandas as pd
import numpy as np
print('tf version:', tf.__version__, '\nGPU avilable?:', tf.config.list_physical_devices('GPU'))
print('rdkit version: ', rdkit.__version__)
print('DeepChem version', dc.__version__)
from layers.layers import GraphEncoderLayer
from tensorflow.keras import layers
data = pd.read_csv('./data/QM9_HAC_5_9_.csv')
reduced_data = list(data['SMILES'])
import random
random.shuffle(reduced_data)
mols = list(filter(lambda x: x is not None, [Chem.MolFromSmiles(x) for x in reduced_data]))
print('how many molecules obtained from smiles: ', len(mols))
import logging
from deepchem.utils.typing import RDKitAtom, RDKitBond, RDKitMol, List
from deepchem.feat.base_classes import MolecularFeaturizer
logger = logging.getLogger(__name__)
class GraphMatrix():
"""
This is class used to store data for MolGAN neural networks.
Parameters
----------
node_features: np.ndarray
Node feature matrix with shape [num_nodes, num_node_features]
edge_features: np.ndarray,
Edge feature matrix with shape [num_nodes, num_nodes]
Returns
-------
graph: GraphMatrix
A molecule graph with some features.
"""
def __init__(self,adjacency_matrix: np.ndarray, node_features: np.ndarray):
self.adjacency_matrix = adjacency_matrix
self.node_features = node_features
class MolGanFeaturizer(MolecularFeaturizer):
"""This class implements featurizer used with MolGAN de-novo molecular generation based on:
`MolGAN: An implicit generative model for small molecular graphs`<https://arxiv.org/abs/1805.11973>`_.
The default representation is in form of GraphMatrix object, being wrapper for two matrices containing atom and bond type information.
The class also provides reverse capabilities"""
def __init__(self,
max_atom_count:int = 9,
kekulize:bool = True,
bond_labels: List[RDKitBond] = None,
atom_labels: List[int] = None):
"""
Parameters
----------
max_atom_count: int, default 9
Maximum number of atoms used for creation of adjacency matrix, molecules cannot have more atoms than this number; implicit hydrogens do not count.
kekulize: bool, default True
Should molecules be kekulized; solves number of issues with defeaturization when used.
bond_labels: List[RDKitBond]
List containing types of bond used for generation of adjacency matrix
atom_labels: List[int]
List of atomic numbers used for generation of node features
"""
self.max_atom_count = max_atom_count
self.kekulize = kekulize
#bond labels
if bond_labels is None:
self.bond_labels = [
Chem.rdchem.BondType.ZERO
,Chem.rdchem.BondType.SINGLE
,Chem.rdchem.BondType.DOUBLE
,Chem.rdchem.BondType.TRIPLE
,Chem.rdchem.BondType.AROMATIC]
else:
self.bond_labels = bond_labels
#atom labels
if atom_labels is None:
self.atom_labels = [0, 6, 7, 8, 9] #C,N,O,F
else:
self.atom_labels = atom_labels
#create bond encoders and decoders
self.bond_encoder = {l: i for i, l in enumerate(self.bond_labels)}
self.bond_decoder = {i: l for i, l in enumerate(self.bond_labels)}
#create atom encoders and decoders
self.atom_encoder = {l: i for i, l in enumerate(self.atom_labels)}
self.atom_decoder = {i: l for i, l in enumerate(self.atom_labels)}
def _featurize(self, mol: RDKitMol) -> GraphMatrix:
"""Calculate adjacency matrix and nodes features for RDKitMol.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit mol object.
Returns
-------
graph: GraphMatrix
A molecule graph with some features.
"""
if self.kekulize:
Chem.Kekulize(mol)
A = np.zeros(shape=(self.max_atom_count, self.max_atom_count), dtype=np.float32)
bonds = mol.GetBonds()
begin, end = [b.GetBeginAtomIdx() for b in bonds], [b.GetEndAtomIdx() for b in bonds]
bond_type = [self.bond_encoder[b.GetBondType()] for b in bonds]
A[begin, end] = bond_type
A[end, begin] = bond_type
degree = np.sum(A[:mol.GetNumAtoms(), :mol.GetNumAtoms()], axis=-1)
X = np.array([self.atom_encoder[atom.GetAtomicNum()] for atom in mol.GetAtoms()] + [0] * (self.max_atom_count - mol.GetNumAtoms()), dtype=np.int32)
graph = GraphMatrix(A,X)
return graph if (degree > 0).all() else None
def _defeaturize(self, graph_matrix: GraphMatrix, sanitize:bool = True, cleanup=True) ->RDKitMol:
"""Recreate RDKitMol from GraphMatrix object. For working correctly same object needs to be used for featurization and defeaturization.
Parameters
----------
graph_matrix: GraphMatrix
GraphMatrix object.
sanitize: bool, default True
Should RDKit sanitization be included in the process.
cleanup: bool, default True
Splits salts and removes compounds with "*" atom types
Returns
-------
mol: RDKitMol object
RDKitMol object representing molecule.
"""
node_labels = graph_matrix.node_features
edge_labels = graph_matrix.adjacency_matrix
mol = Chem.RWMol()
for node_label in node_labels:
mol.AddAtom(Chem.Atom(self.atom_decoder[node_label]))
for start, end in zip(*np.nonzero(edge_labels)):
if start > end:
mol.AddBond(int(start), int(end), self.bond_decoder[edge_labels[start, end]])
if sanitize:
try:
Chem.SanitizeMol(mol)
except Exception:
mol = None
if cleanup:
try:
smiles = Chem.MolToSmiles(mol)
smiles = max(smiles.split('.'), key=len)
if "*" not in smiles:
mol = Chem.MolFromSmiles(smiles)
else:
mol = None
except Exception:
mol = None
return mol
def defeaturize(self, graphs, log_every_n=1000) -> np.ndarray:
"""Calculates molecules from correspoing GraphMatrix objects.
Parameters
----------
graphs: GraphMatrix / iterable
GraphMatrix object or corresponding iterable
log_every_n: int, default 1000
Logging messages reported every `log_every_n` samples.
Returns
-------
features: np.ndarray
A numpy array containing RDKitMol objext.
"""
# Special case handling of single molecule
if isinstance(graphs, GraphMatrix):
graphs = [graphs]
else:
# Convert iterables to list
graphs = list(graphs)
molecules = []
for i, gr in enumerate(graphs):
if i % log_every_n == 0:
logger.info("Featurizing datapoint %i" % i)
try:
molecules.append(self._defeaturize(gr))
except Exception as e:
logger.warning("Failed to defeaturize datapoint %d, %s. Appending empty array", i,gr)
logger.warning("Exception message: {}".format(e))
molecules.append(np.array([]))
molecules = np.asarray(molecules)
return molecules
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from deepchem.models import WGAN
from layers.layers import GraphEncoderLayer
class MolGAN(WGAN):
"""Model for automatic generation of compounds based on GAN architecture described by Nicola De Cao et al.
`MolGAN: An implicit generative model for small molecular graphs`<https://arxiv.org/abs/1805.11973>`_.
It uses adjacency matrix and node features as inputs, both need to be converted to one hot representation before use.
Examples
--------
gan = MolGAN(learning_rate=ExponentialDecay(0.001, 0.9, 5000))
dataset = dc.data.NumpyDataset([x.adjacency_matrix for x in limited],[x.node_features for x in limited])
def iterbatches(epochs):
for i in range(epochs):
for batch in dataset.iterbatches(batch_size=gan.batch_size, pad_batches=True):
adjacency_tensor = tf.one_hot(batch[0], gan.edges)
node_tesor = tf.one_hot(batch[1], gan.nodes)
yield {gan.data_inputs[0]: adjacency_tensor, gan.data_inputs[1]:node_tesor}
gan.fit_gan(iterbatches(10), generator_steps=0.2, checkpoint_interval=5000)
"""
def __init__(self
,edges: int = 5
,vertices:int = 9
,nodes: int = 5
,embedding_dim: int = 10
,dropout_rate: float = 0.
,name:str =''
, **kwargs):
"""
Parameters
----------
edges: int, default 5
Number of bond types includes BondType.Zero
vertices: int, default 9
Max number of atoms in adjacency and node features matrices
nodes: int, default 5
Number of atom types in node features matrix
embedding_dim: int, default 10
Size of noise input
dropout_rate: float, default = 0.
Rate of dropout used across whole model
name: str, default ''
Name of the model
"""
self.edges = edges
self.vertices = vertices
self.nodes =nodes
self.embedding_dim = embedding_dim
self.dropout_rate = dropout_rate
super(MolGAN, self).__init__(name=name, **kwargs)
def get_noise_input_shape(self):
return (self.embedding_dim,)
def get_data_input_shapes(self):
return [(self.vertices, self.vertices, self.edges),(self.vertices, self.nodes)]
def create_generator(self):
input_layer = layers.Input(shape=(self.embedding_dim,))
x = layers.Dense(128, activation='tanh')(input_layer)
x = layers.Dropout(self.dropout_rate)(x)
x = layers.Dense(256, activation='tanh')(x)
x = layers.Dropout(self.dropout_rate)(x)
x = layers.Dense(512, activation='tanh')(x)
x = layers.Dropout(self.dropout_rate)(x)
#EDGES LOGITS
edges_logits = layers.Dense(units=self.edges*self.vertices*self.vertices, activation=None)(x)
edges_logits = layers.Reshape((self.edges, self.vertices, self.vertices))(edges_logits)
matrix_transpose = layers.Permute((1,3,2))(edges_logits)
edges_logits = (edges_logits + matrix_transpose)/2
edges_logits = layers.Permute((2,3,1))(edges_logits)
edges_logits = layers.Dropout(self.dropout_rate)(edges_logits)
#used during training of the model
edges_softmax = tf.nn.softmax(edges_logits)
#NODES LOGITS
nodes_logits = layers.Dense(units=(self.vertices * self.nodes), activation=None)(x)
nodes_logits = layers.Reshape((self.vertices, self.nodes))(nodes_logits)
nodes_logits = layers.Dropout(self.dropout_rate)(nodes_logits)
#used during training of the model
nodes_softmax = tf.nn.softmax(nodes_logits)
#used to generate molecules, consider returning just logits and then use additonal layer when molecules needs to generated
#used for compound generation, consider removing this from this section and just return un
e_gumbel_logits = edges_logits - tf.math.log(- tf.math.log(tf.random.uniform(tf.shape(edges_logits), dtype=edges_logits.dtype)))
e_gumbel_argmax = tf.one_hot(tf.argmax(e_gumbel_logits, axis=-1), depth=e_gumbel_logits.shape[-1], dtype=e_gumbel_logits.dtype)
e_argmax = tf.argmax(e_gumbel_argmax, axis=-1)
#used for compound generation
n_gumbel_logits = nodes_logits - tf.math.log(- tf.math.log(tf.random.uniform(tf.shape(nodes_logits), dtype=nodes_logits.dtype)))
n_gumbel_argmax = tf.one_hot(tf.argmax(n_gumbel_logits, axis=-1), depth=n_gumbel_logits.shape[-1], dtype=n_gumbel_logits.dtype)
n_argmax = tf.argmax(n_gumbel_argmax, axis=-1)
#final model
return keras.Model(inputs = input_layer, outputs=[edges_softmax, nodes_softmax,e_argmax, n_argmax])
def create_discriminator(self):
adjacency_tensor= layers.Input(shape=(self.vertices, self.vertices, self.edges))
node_tensor = layers.Input(shape=(self.vertices, self.nodes))
graph = GraphEncoderLayer(units = [(128,64),128], dropout_rate= self.dropout_rate, edges=self.edges)([adjacency_tensor,node_tensor])
dense = layers.Dense(units=128, activation='tanh')(graph)
dense = layers.Dropout(self.dropout_rate)(dense)
dense = layers.Dense(units=64, activation='tanh')(dense)
dense = layers.Dropout(self.dropout_rate)(dense)
output = layers.Dense(units=1)(dense)
return keras.Model(inputs=[adjacency_tensor,node_tensor], outputs=[output])
def predict_gan_generator(self,
batch_size=1,
noise_input=None,
generator_index=0):
"""Use the GAN to generate a batch of samples.
Parameters
----------
batch_size: int
the number of samples to generate. If either noise_input or
conditional_inputs is specified, this argument is ignored since the batch
size is then determined by the size of that argument.
noise_input: array
the value to use for the generator's noise input. If None (the default),
get_noise_batch() is called to generate a random input, so each call will
produce a new set of samples.
generator_index: int
the index of the generator (between 0 and n_generators-1) to use for
generating the samples.
Returns
-------
An array (if the generator has only one output) or list of arrays (if it has
multiple outputs) containing the generated samples.
"""
if noise_input is not None:
batch_size = len(noise_input)
if noise_input is None:
noise_input = self.get_noise_batch(batch_size)
inputs = noise_input
_,_,adjacency_matrix, nodes_features = gan.generators[0](inputs, training=False)
graphs = [GraphMatrix(i,j) for i,j in zip(adjacency_matrix.numpy(),nodes_features.numpy())]
return graphs
#create featurizer
feat = MolGanFeaturizer()
#featurize molecules
limited = feat.featurize(mols)
#Remove empty objects
limited = list(filter(lambda x: x is not None, limited))
#create dataset
dataset = dc.data.NumpyDataset([x.adjacency_matrix for x in limited],[x.node_features for x in limited])
from deepchem.models.optimizers import ExponentialDecay
#create model
gan = MolGAN(learning_rate=ExponentialDecay(0.001, 0.9, 5000))
#prepare data for training
def iterbatches(epochs):
for i in range(epochs):
for batch in dataset.iterbatches(batch_size=gan.batch_size, pad_batches=True):
adjacency_tensor = tf.one_hot(batch[0], gan.edges)
node_tesor = tf.one_hot(batch[1], gan.nodes)
yield {gan.data_inputs[0]: adjacency_tensor, gan.data_inputs[1]:node_tesor}
#train model
gan.fit_gan(iterbatches(10), generator_steps=0.2, checkpoint_interval=5000)
#generat prediced sample
g = gan.predict_gan_generator(1000)
#convert graphs to RDKitmolecules
nmols = feat.defeaturize(g)
print("{} molecules generated".format(len(nmols)))
#remove invalid moles
nmols = list(filter(lambda x: x is not None, nmols))
print ("{} valid molecules".format(len(nmols)))
#get unique molecules
nmols = [Chem.MolFromSmiles(x) for x in list(set([Chem.MolToSmiles(z) for z in nmols]))]
print("{} unique molecules".format(len(nmols)))
#display unique molecules
Chem.Draw.MolsToGridImage(nmols)
| 0.786049 | 0.52476 |
# MSDS 620 Week 2 Lab
## Natalia Weakly
## Merging, Joining and Null Values
### 1/27/2019
### Task 2
```
import pandas as pd
import numpy as np
#load the data into a panda dataframe
#Air quality dataset downloaded from http://archive.ics.uci.edu/ml/machine-learning-databases/00360/
df = pd.read_csv('AirQualityUCI.csv', sep=';', decimal=",")
#look at the first few rows to make sure the data was loaded correctly
df.head()
#Look at the last few rows
df.tail()
#use info() function to find the number of missing values in each column
df.info()
```
The data was loaded into a dataframe with 9471 observations of 17 variables. However, this dataset contains a large number of missing values. The last two unnamed columns do not have any non-null values. In addition, the dataframe contains several empty rows in the end. According to the metadata for this dataset, it should contain 9358 observations of 15 variables. These empty rows and columns are likely a result of a technical error introduced while merging sensor data in one data file. So, we can just delete the empty columns and rows.
```
#Drop the last two columns
df.drop(['Unnamed: 15', 'Unnamed: 16'], axis=1, inplace=True)
#Check results
df.head()
#look for the missing values again
df.info()
#make sure that rows [9357:9471] are empty
df[9357:9471]
#drop empty rows
df_dropna = df.dropna()
df_dropna.info()
```
The df_dropna dataframe now contains 9357 non-null observations of 15 variables. However, according to the dataset description, missing values were recorded as '-200'. It is likely that malfunctioning sensors are to blame for these missing measurements. These values, while not technically NaN, would still skew the results of the analysis. So, it is better to find and address them.
```
#replace -200.0 with NaN
df_withnulls = df_dropna.replace(-200.0, np.NaN)
#How many nulls were introduced?
df_withnulls.info()
```
As the output above shows, the NMHC(GT) column contains only 914 non-null values, 8623 (or slightly over 90%) of values are missing. Imputing these values might introduce bias in the dataset. So, in this case, it might be better just to drop this column.
```
#drop NMHC(GT) column
df_withnulls.drop('NMHC(GT)', axis=1, inplace=True)
#Check for remaining null values
df_withnulls.info()
```
All remaining columns, except for the "Date" and "Time" have missing values. Since these measurements represent time series data from atmospheric sensors, it is likely that each data point is related to the points before and after. So, we can use a variety of imputation methods - using a mean (median or most frequent number) for the column, as well as filling forward(backward) and interpolating. For practice purposes, I will use different methods for different columns.
```
#Fill forward CO(GT) column - replace values with the last known value
df_withnulls.iloc[0:9356, 2].fillna(method='ffill', inplace=True)
#Check results
df_withnulls.info()
#PT09.S1(CO) column - fill backwards - uses the newest values to replace a missing value
df_withnulls.iloc[0:9356, 3].fillna(method='bfill', inplace=True)
#Use interpolation to fill missing values - 'C6H6(GT)' column
#method "pad" - fill in NaNs using existing values
df_withnulls.iloc[:,4].interpolate(method='pad', inplace=True)
#Check results
df_withnulls.info()
#copy remaining columns
df_toimpute=df_withnulls.iloc[:, 5:14].values
df_toimpute.shape
#Use SimpleImputer from sklearn
from sklearn.impute import SimpleImputer
#create an imputer using a mean value for a column
imputer=SimpleImputer(missing_values=np.nan, strategy='mean')
#fit it to the data
imputer.fit(df_toimpute)
#transform the data and convert it back to a panda data frame
df_nonulls =pd.DataFrame(imputer.transform(df_toimpute))
#check results
df_nonulls.info()
#add column names to the transformed data frame
df_nonulls.columns = ['PT08.S2(NMHC)', 'NOx(GT)', 'PT08.S3(NOx)', 'NO2(GT)', 'PT08.S4(NO2)', 'PT08.S5(O3)', 'T', 'RH', 'AH' ]
#Copy initial Date, Time columns and columns to which we applied filling forward, backward and interpolation
df_final = df_withnulls[['Date', 'Time', 'CO(GT)', 'PT08.S1(CO)', 'C6H6(GT)']]
#concatenate the two data franes cleaned of null values in one dataframe
df_result=pd.concat([df_final, df_nonulls], axis=1)
#chech the first five rows
df_result.head()
#check the last five rows
df_result.tail()
#Display detailed information about the structure of the data frame
#to make sure that there are no null values
df_result.info()
#Display summary statistics for the variables (columns)
#No '-200.0' or other default values resent
df_result.describe()
```
|
github_jupyter
|
import pandas as pd
import numpy as np
#load the data into a panda dataframe
#Air quality dataset downloaded from http://archive.ics.uci.edu/ml/machine-learning-databases/00360/
df = pd.read_csv('AirQualityUCI.csv', sep=';', decimal=",")
#look at the first few rows to make sure the data was loaded correctly
df.head()
#Look at the last few rows
df.tail()
#use info() function to find the number of missing values in each column
df.info()
#Drop the last two columns
df.drop(['Unnamed: 15', 'Unnamed: 16'], axis=1, inplace=True)
#Check results
df.head()
#look for the missing values again
df.info()
#make sure that rows [9357:9471] are empty
df[9357:9471]
#drop empty rows
df_dropna = df.dropna()
df_dropna.info()
#replace -200.0 with NaN
df_withnulls = df_dropna.replace(-200.0, np.NaN)
#How many nulls were introduced?
df_withnulls.info()
#drop NMHC(GT) column
df_withnulls.drop('NMHC(GT)', axis=1, inplace=True)
#Check for remaining null values
df_withnulls.info()
#Fill forward CO(GT) column - replace values with the last known value
df_withnulls.iloc[0:9356, 2].fillna(method='ffill', inplace=True)
#Check results
df_withnulls.info()
#PT09.S1(CO) column - fill backwards - uses the newest values to replace a missing value
df_withnulls.iloc[0:9356, 3].fillna(method='bfill', inplace=True)
#Use interpolation to fill missing values - 'C6H6(GT)' column
#method "pad" - fill in NaNs using existing values
df_withnulls.iloc[:,4].interpolate(method='pad', inplace=True)
#Check results
df_withnulls.info()
#copy remaining columns
df_toimpute=df_withnulls.iloc[:, 5:14].values
df_toimpute.shape
#Use SimpleImputer from sklearn
from sklearn.impute import SimpleImputer
#create an imputer using a mean value for a column
imputer=SimpleImputer(missing_values=np.nan, strategy='mean')
#fit it to the data
imputer.fit(df_toimpute)
#transform the data and convert it back to a panda data frame
df_nonulls =pd.DataFrame(imputer.transform(df_toimpute))
#check results
df_nonulls.info()
#add column names to the transformed data frame
df_nonulls.columns = ['PT08.S2(NMHC)', 'NOx(GT)', 'PT08.S3(NOx)', 'NO2(GT)', 'PT08.S4(NO2)', 'PT08.S5(O3)', 'T', 'RH', 'AH' ]
#Copy initial Date, Time columns and columns to which we applied filling forward, backward and interpolation
df_final = df_withnulls[['Date', 'Time', 'CO(GT)', 'PT08.S1(CO)', 'C6H6(GT)']]
#concatenate the two data franes cleaned of null values in one dataframe
df_result=pd.concat([df_final, df_nonulls], axis=1)
#chech the first five rows
df_result.head()
#check the last five rows
df_result.tail()
#Display detailed information about the structure of the data frame
#to make sure that there are no null values
df_result.info()
#Display summary statistics for the variables (columns)
#No '-200.0' or other default values resent
df_result.describe()
| 0.572842 | 0.965283 |
---
_You are currently looking at **version 1.1** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-machine-learning/resources/bANLa) course resource._
---
## Assignment 4 - Understanding and Predicting Property Maintenance Fines
This assignment is based on a data challenge from the Michigan Data Science Team ([MDST](http://midas.umich.edu/mdst/)).
The Michigan Data Science Team ([MDST](http://midas.umich.edu/mdst/)) and the Michigan Student Symposium for Interdisciplinary Statistical Sciences ([MSSISS](https://sites.lsa.umich.edu/mssiss/)) have partnered with the City of Detroit to help solve one of the most pressing problems facing Detroit - blight. [Blight violations](http://www.detroitmi.gov/How-Do-I/Report/Blight-Complaint-FAQs) are issued by the city to individuals who allow their properties to remain in a deteriorated condition. Every year, the city of Detroit issues millions of dollars in fines to residents and every year, many of these fines remain unpaid. Enforcing unpaid blight fines is a costly and tedious process, so the city wants to know: how can we increase blight ticket compliance?
The first step in answering this question is understanding when and why a resident might fail to comply with a blight ticket. This is where predictive modeling comes in. For this assignment, your task is to predict whether a given blight ticket will be paid on time.
All data for this assignment has been provided to us through the [Detroit Open Data Portal](https://data.detroitmi.gov/). **Only the data already included in your Coursera directory can be used for training the model for this assignment.** Nonetheless, we encourage you to look into data from other Detroit datasets to help inform feature creation and model selection. We recommend taking a look at the following related datasets:
* [Building Permits](https://data.detroitmi.gov/Property-Parcels/Building-Permits/xw2a-a7tf)
* [Trades Permits](https://data.detroitmi.gov/Property-Parcels/Trades-Permits/635b-dsgv)
* [Improve Detroit: Submitted Issues](https://data.detroitmi.gov/Government/Improve-Detroit-Submitted-Issues/fwz3-w3yn)
* [DPD: Citizen Complaints](https://data.detroitmi.gov/Public-Safety/DPD-Citizen-Complaints-2016/kahe-efs3)
* [Parcel Map](https://data.detroitmi.gov/Property-Parcels/Parcel-Map/fxkw-udwf)
___
We provide you with two data files for use in training and validating your models: train.csv and test.csv. Each row in these two files corresponds to a single blight ticket, and includes information about when, why, and to whom each ticket was issued. The target variable is compliance, which is True if the ticket was paid early, on time, or within one month of the hearing data, False if the ticket was paid after the hearing date or not at all, and Null if the violator was found not responsible. Compliance, as well as a handful of other variables that will not be available at test-time, are only included in train.csv.
Note: All tickets where the violators were found not responsible are not considered during evaluation. They are included in the training set as an additional source of data for visualization, and to enable unsupervised and semi-supervised approaches. However, they are not included in the test set.
<br>
**File descriptions** (Use only this data for training your model!)
readonly/train.csv - the training set (all tickets issued 2004-2011)
readonly/test.csv - the test set (all tickets issued 2012-2016)
readonly/addresses.csv & readonly/latlons.csv - mapping from ticket id to addresses, and from addresses to lat/lon coordinates.
Note: misspelled addresses may be incorrectly geolocated.
<br>
**Data fields**
train.csv & test.csv
ticket_id - unique identifier for tickets
agency_name - Agency that issued the ticket
inspector_name - Name of inspector that issued the ticket
violator_name - Name of the person/organization that the ticket was issued to
violation_street_number, violation_street_name, violation_zip_code - Address where the violation occurred
mailing_address_str_number, mailing_address_str_name, city, state, zip_code, non_us_str_code, country - Mailing address of the violator
ticket_issued_date - Date and time the ticket was issued
hearing_date - Date and time the violator's hearing was scheduled
violation_code, violation_description - Type of violation
disposition - Judgment and judgement type
fine_amount - Violation fine amount, excluding fees
admin_fee - $20 fee assigned to responsible judgments
state_fee - $10 fee assigned to responsible judgments
late_fee - 10% fee assigned to responsible judgments
discount_amount - discount applied, if any
clean_up_cost - DPW clean-up or graffiti removal cost
judgment_amount - Sum of all fines and fees
grafitti_status - Flag for graffiti violations
train.csv only
payment_amount - Amount paid, if any
payment_date - Date payment was made, if it was received
payment_status - Current payment status as of Feb 1 2017
balance_due - Fines and fees still owed
collection_status - Flag for payments in collections
compliance [target variable for prediction]
Null = Not responsible
0 = Responsible, non-compliant
1 = Responsible, compliant
compliance_detail - More information on why each ticket was marked compliant or non-compliant
___
## Evaluation
Your predictions will be given as the probability that the corresponding blight ticket will be paid on time.
The evaluation metric for this assignment is the Area Under the ROC Curve (AUC).
Your grade will be based on the AUC score computed for your classifier. A model which with an AUROC of 0.7 passes this assignment, over 0.75 will recieve full points.
___
For this assignment, create a function that trains a model to predict blight ticket compliance in Detroit using `readonly/train.csv`. Using this model, return a series of length 61001 with the data being the probability that each corresponding ticket from `readonly/test.csv` will be paid, and the index being the ticket_id.
Example:
ticket_id
284932 0.531842
285362 0.401958
285361 0.105928
285338 0.018572
...
376499 0.208567
376500 0.818759
369851 0.018528
Name: compliance, dtype: float32
### Hints
* Make sure your code is working before submitting it to the autograder.
* Print out your result to see whether there is anything weird (e.g., all probabilities are the same).
* Generally the total runtime should be less than 10 mins. You should NOT use Neural Network related classifiers (e.g., MLPClassifier) in this question.
* Try to avoid global variables. If you have other functions besides blight_model, you should move those functions inside the scope of blight_model.
* Refer to the pinned threads in Week 4's discussion forum when there is something you could not figure it out.
```
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import roc_auc_score
train = pd.read_csv('train.csv', encoding = 'ISO-8859-1')
test = pd.read_csv('test.csv')
addresses = pd.read_csv('addresses.csv')
latlons = pd.read_csv('latlons.csv')
train = train[train['compliance'].notnull()]
train = pd.merge(train, pd.merge(addresses, latlons, on='address'), on='ticket_id')
test = pd.merge(test, pd.merge(addresses, latlons, on='address'), on='ticket_id')
lat_mean = train['lat'].mean()
train['lat'].fillna(lat_mean, inplace=True)
test['lat'].fillna(lat_mean, inplace=True)
lon_mean = train['lon'].mean()
train['lon'].fillna(lon_mean, inplace=True)
test['lon'].fillna(lon_mean, inplace=True)
violation_code_mean = train.groupby('violation_code').compliance.mean()
train['violation_code_enc'] = train['violation_code'].map(violation_code_mean)
test['violation_code_enc'] = test['violation_code'].map(violation_code_mean)
test['violation_code_enc'].fillna(test['violation_code_enc'].mean(), inplace=True)
disposition_mean = train.groupby('disposition').compliance.mean()
train['disposition_enc'] = train['disposition'].map(disposition_mean)
test['disposition_enc'] = test['disposition'].map(disposition_mean)
test['disposition_enc'].fillna(test['disposition_enc'].mean(), inplace=True)
train_drop_columns = [
'ticket_id',
'agency_name',
'inspector_name',
'violator_name',
'violation_street_number',
'violation_street_name',
'violation_zip_code',
'mailing_address_str_number',
'mailing_address_str_name',
'city',
'state',
'zip_code',
'non_us_str_code',
'country',
'ticket_issued_date',
'hearing_date',
'violation_code',
'violation_description',
'disposition',
'payment_amount',
'balance_due',
'payment_date',
'payment_status',
'collection_status',
'grafitti_status',
'compliance_detail',
'address'
]
train.drop(train_drop_columns, axis=1, inplace=True)
test_drop_columns = [
'agency_name',
'inspector_name',
'violator_name',
'violation_street_number',
'violation_street_name',
'violation_zip_code',
'mailing_address_str_number',
'mailing_address_str_name',
'city',
'state',
'zip_code',
'non_us_str_code',
'country',
'ticket_issued_date',
'hearing_date',
'violation_code',
'violation_description',
'disposition',
'grafitti_status',
'address'
]
test.drop(test_drop_columns, axis=1, inplace=True)
X = train.drop(['compliance'], axis=1)
y = train['compliance']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
rf = RandomForestRegressor(random_state=0)
grid_values = {'n_estimators': [30, 50, 70], 'max_depth': [5, 10, 15]}
grid_clf_auc = GridSearchCV(rf, param_grid=grid_values, scoring='roc_auc', cv=5)
grid_clf_auc.fit(X_train, y_train)
y_decision_fn_scores_auc = grid_clf_auc.predict(X_test)
print('Test set AUC: ', roc_auc_score(y_test, y_decision_fn_scores_auc))
print('Grid best parameter (max. AUC): ', grid_clf_auc.best_params_)
print('Grid best score (AUC): ', grid_clf_auc.best_score_)
import pandas as pd
import numpy as np
def blight_model():
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import roc_auc_score
# load data
train = pd.read_csv('train.csv', encoding = 'ISO-8859-1')
test = pd.read_csv('test.csv')
addresses = pd.read_csv('addresses.csv')
latlons = pd.read_csv('latlons.csv')
# Preprocessing
train = train[train['compliance'].notnull()]
train = pd.merge(train, pd.merge(addresses, latlons, on='address'), on='ticket_id')
test = pd.merge(test, pd.merge(addresses, latlons, on='address'), on='ticket_id')
lat_mean = train['lat'].mean()
train['lat'].fillna(lat_mean, inplace=True)
test['lat'].fillna(lat_mean, inplace=True)
lon_mean = train['lon'].mean()
train['lon'].fillna(lon_mean, inplace=True)
test['lon'].fillna(lon_mean, inplace=True)
# Mean encoding
violation_code_mean = train.groupby('violation_code').compliance.mean()
train['violation_code_enc'] = train['violation_code'].map(violation_code_mean)
test['violation_code_enc'] = test['violation_code'].map(violation_code_mean)
test['violation_code_enc'].fillna(test['violation_code_enc'].mean(), inplace=True)
disposition_mean = train.groupby('disposition').compliance.mean()
train['disposition_enc'] = train['disposition'].map(disposition_mean)
test['disposition_enc'] = test['disposition'].map(disposition_mean)
test['disposition_enc'].fillna(test['disposition_enc'].mean(), inplace=True)
# drop some unused columns
train_drop_columns = [
'ticket_id',
'agency_name',
'inspector_name',
'violator_name',
'violation_street_number',
'violation_street_name',
'violation_zip_code',
'mailing_address_str_number',
'mailing_address_str_name',
'city',
'state',
'zip_code',
'non_us_str_code',
'country',
'ticket_issued_date',
'hearing_date',
'violation_code',
'violation_description',
'disposition',
'payment_amount',
'balance_due',
'payment_date',
'payment_status',
'collection_status',
'grafitti_status',
'compliance_detail',
'address'
]
train.drop(train_drop_columns, axis=1, inplace=True)
test_drop_columns = [
'agency_name',
'inspector_name',
'violator_name',
'violation_street_number',
'violation_street_name',
'violation_zip_code',
'mailing_address_str_number',
'mailing_address_str_name',
'city',
'state',
'zip_code',
'non_us_str_code',
'country',
'ticket_issued_date',
'hearing_date',
'violation_code',
'violation_description',
'disposition',
'grafitti_status',
'address'
]
test.drop(test_drop_columns, axis=1, inplace=True)
# Train the model
X = train.drop(['compliance'], axis=1)
y = train['compliance']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Based on previous grid search
rf = RandomForestRegressor(random_state=0, n_estimators=70, max_depth=10)
rf.fit(X_train, y_train)
y_decision_fn_scores_auc = rf.predict(X_test)
print('Test set AUC: ', roc_auc_score(y_test, y_decision_fn_scores_auc))
# Create test output
test_prob = rf.predict(test.drop(['ticket_id'], axis=1))
return pd.DataFrame(test_prob, index=test['ticket_id'])
blight_model()
```
|
github_jupyter
|
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import roc_auc_score
train = pd.read_csv('train.csv', encoding = 'ISO-8859-1')
test = pd.read_csv('test.csv')
addresses = pd.read_csv('addresses.csv')
latlons = pd.read_csv('latlons.csv')
train = train[train['compliance'].notnull()]
train = pd.merge(train, pd.merge(addresses, latlons, on='address'), on='ticket_id')
test = pd.merge(test, pd.merge(addresses, latlons, on='address'), on='ticket_id')
lat_mean = train['lat'].mean()
train['lat'].fillna(lat_mean, inplace=True)
test['lat'].fillna(lat_mean, inplace=True)
lon_mean = train['lon'].mean()
train['lon'].fillna(lon_mean, inplace=True)
test['lon'].fillna(lon_mean, inplace=True)
violation_code_mean = train.groupby('violation_code').compliance.mean()
train['violation_code_enc'] = train['violation_code'].map(violation_code_mean)
test['violation_code_enc'] = test['violation_code'].map(violation_code_mean)
test['violation_code_enc'].fillna(test['violation_code_enc'].mean(), inplace=True)
disposition_mean = train.groupby('disposition').compliance.mean()
train['disposition_enc'] = train['disposition'].map(disposition_mean)
test['disposition_enc'] = test['disposition'].map(disposition_mean)
test['disposition_enc'].fillna(test['disposition_enc'].mean(), inplace=True)
train_drop_columns = [
'ticket_id',
'agency_name',
'inspector_name',
'violator_name',
'violation_street_number',
'violation_street_name',
'violation_zip_code',
'mailing_address_str_number',
'mailing_address_str_name',
'city',
'state',
'zip_code',
'non_us_str_code',
'country',
'ticket_issued_date',
'hearing_date',
'violation_code',
'violation_description',
'disposition',
'payment_amount',
'balance_due',
'payment_date',
'payment_status',
'collection_status',
'grafitti_status',
'compliance_detail',
'address'
]
train.drop(train_drop_columns, axis=1, inplace=True)
test_drop_columns = [
'agency_name',
'inspector_name',
'violator_name',
'violation_street_number',
'violation_street_name',
'violation_zip_code',
'mailing_address_str_number',
'mailing_address_str_name',
'city',
'state',
'zip_code',
'non_us_str_code',
'country',
'ticket_issued_date',
'hearing_date',
'violation_code',
'violation_description',
'disposition',
'grafitti_status',
'address'
]
test.drop(test_drop_columns, axis=1, inplace=True)
X = train.drop(['compliance'], axis=1)
y = train['compliance']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
rf = RandomForestRegressor(random_state=0)
grid_values = {'n_estimators': [30, 50, 70], 'max_depth': [5, 10, 15]}
grid_clf_auc = GridSearchCV(rf, param_grid=grid_values, scoring='roc_auc', cv=5)
grid_clf_auc.fit(X_train, y_train)
y_decision_fn_scores_auc = grid_clf_auc.predict(X_test)
print('Test set AUC: ', roc_auc_score(y_test, y_decision_fn_scores_auc))
print('Grid best parameter (max. AUC): ', grid_clf_auc.best_params_)
print('Grid best score (AUC): ', grid_clf_auc.best_score_)
import pandas as pd
import numpy as np
def blight_model():
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import roc_auc_score
# load data
train = pd.read_csv('train.csv', encoding = 'ISO-8859-1')
test = pd.read_csv('test.csv')
addresses = pd.read_csv('addresses.csv')
latlons = pd.read_csv('latlons.csv')
# Preprocessing
train = train[train['compliance'].notnull()]
train = pd.merge(train, pd.merge(addresses, latlons, on='address'), on='ticket_id')
test = pd.merge(test, pd.merge(addresses, latlons, on='address'), on='ticket_id')
lat_mean = train['lat'].mean()
train['lat'].fillna(lat_mean, inplace=True)
test['lat'].fillna(lat_mean, inplace=True)
lon_mean = train['lon'].mean()
train['lon'].fillna(lon_mean, inplace=True)
test['lon'].fillna(lon_mean, inplace=True)
# Mean encoding
violation_code_mean = train.groupby('violation_code').compliance.mean()
train['violation_code_enc'] = train['violation_code'].map(violation_code_mean)
test['violation_code_enc'] = test['violation_code'].map(violation_code_mean)
test['violation_code_enc'].fillna(test['violation_code_enc'].mean(), inplace=True)
disposition_mean = train.groupby('disposition').compliance.mean()
train['disposition_enc'] = train['disposition'].map(disposition_mean)
test['disposition_enc'] = test['disposition'].map(disposition_mean)
test['disposition_enc'].fillna(test['disposition_enc'].mean(), inplace=True)
# drop some unused columns
train_drop_columns = [
'ticket_id',
'agency_name',
'inspector_name',
'violator_name',
'violation_street_number',
'violation_street_name',
'violation_zip_code',
'mailing_address_str_number',
'mailing_address_str_name',
'city',
'state',
'zip_code',
'non_us_str_code',
'country',
'ticket_issued_date',
'hearing_date',
'violation_code',
'violation_description',
'disposition',
'payment_amount',
'balance_due',
'payment_date',
'payment_status',
'collection_status',
'grafitti_status',
'compliance_detail',
'address'
]
train.drop(train_drop_columns, axis=1, inplace=True)
test_drop_columns = [
'agency_name',
'inspector_name',
'violator_name',
'violation_street_number',
'violation_street_name',
'violation_zip_code',
'mailing_address_str_number',
'mailing_address_str_name',
'city',
'state',
'zip_code',
'non_us_str_code',
'country',
'ticket_issued_date',
'hearing_date',
'violation_code',
'violation_description',
'disposition',
'grafitti_status',
'address'
]
test.drop(test_drop_columns, axis=1, inplace=True)
# Train the model
X = train.drop(['compliance'], axis=1)
y = train['compliance']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Based on previous grid search
rf = RandomForestRegressor(random_state=0, n_estimators=70, max_depth=10)
rf.fit(X_train, y_train)
y_decision_fn_scores_auc = rf.predict(X_test)
print('Test set AUC: ', roc_auc_score(y_test, y_decision_fn_scores_auc))
# Create test output
test_prob = rf.predict(test.drop(['ticket_id'], axis=1))
return pd.DataFrame(test_prob, index=test['ticket_id'])
blight_model()
| 0.5144 | 0.942082 |
# Monty Python Integrator
### Written by Eric Griswold, Peter Willits, Daniel Douty and Yaman Ibrahim
#### Rather than working individually on different parts of the program, we all got together and worked on one jupyter notebook simultaneously.
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
def f(x):
return np.cos(x) #input any integrable function here!
```
### The following function finds the range of the given function on the given domain, and adds 0.5 to each side for aesthetic purposes.
```
c = 0
d = 0
def rangefinder(f, lowerbound, upperbound):
xvals = np.linspace(lowerbound, upperbound, 10000)
yvals = []
for i in xvals:
yvals.append(f(i))
ymin = min(yvals)
ymax = max(yvals)
global c
c = ymin - .5
global d
d = ymax + .5
return c, d
```
### This function creates random points for the monte carlo integral, classifies them by position, and returns the fraction of points that lie in between the curve and the x-axis
```
def notplotter(f, lowerbound, upperbound, n):
x = np.random.uniform(lowerbound, upperbound,n)
y = np.random.uniform(c,d,n)
under = np.where((f(x) > y) & (y > 0))[0]
over = np.where((f(x) < y) & (y < 0))[0]
outsideover = np.where((f(x) >= y) & (y < 0))[0]
outsideunder = np.where((f(x) <= y) & (y > 0))[0]
underlen = len(under)
overlen = len(over)
outsideunderlen = len(outsideunder)
outsideoverlen = len(outsideover)
frac = (underlen-overlen)/(underlen+overlen+outsideunderlen+outsideoverlen)
return frac
```
### This function does the same thing, but it also plots the points, along with the function and x-axis. We made two similar functions because we needed the simpler form of it above so we could efficiently use its output in calculations, while still keeping this version that outputs the plot.
```
def plotter(f, lowerbound, upperbound, n):
x = np.random.uniform(lowerbound, upperbound,n)
y = np.random.uniform(c,d,n)
line = np.linspace(lowerbound, upperbound, 10000)
under = np.where((f(x) > y) & (y > 0))[0]
over = np.where((f(x) < y) & (y < 0))[0]
outsideover = np.where((f(x) >= y) & (y < 0))[0]
outsideunder = np.where((f(x) <= y) & (y > 0))[0]
underlen = len(under)
overlen = len(over)
outsideunderlen = len(outsideunder)
outsideoverlen = len(outsideover)
fig = plt.figure(figsize=(7,7))
plt.xlim([lowerbound, upperbound])
plt.ylim([c,d])
plt.plot(x[under],y[under],',', color = "green")
plt.plot(x[over],y[over],',', color = "green")
plt.plot(x[outsideunder],y[outsideunder],',', color = "red")
plt.plot(x[outsideover],y[outsideover],',', color = "red")
plt.plot(line,f(line),',', color = 'black')
plt.plot(line, f(line)-f(line),',', color = 'black')
plt.show()
frac = (underlen-overlen)/(underlen+overlen+outsideunderlen+outsideoverlen)
return frac
```
### This is the basic integral calulation.
```
def integrator(f, lowerbound, upperbound, n):
rangefinder(f, lowerbound, upperbound)
frac = notplotter(f, lowerbound, upperbound, n)
area = (d-c)*(upperbound-lowerbound)
integral = area*frac
return integral
```
### This is the integral calculation using the function that graphs the given function and monte carlo points, which satisfies parts 1 and 3 of the assignment.
```
def integratorplotter(f, lowerbound, upperbound, n):
rangefinder(f, lowerbound, upperbound)
frac = plotter(f, lowerbound, upperbound, n)
area = (d-c)*(upperbound-lowerbound)
integral = area*frac
print ("F({}, {}) found to be {}".format(lowerbound, upperbound, integral))
return integral
def integratortolerance(f, lowerbound, upperbound, n):
rangefinder(f, lowerbound, upperbound)
frac = notplotter(f, lowerbound, upperbound, n)
area = (d-c)*(upperbound-lowerbound)
integral = area*frac
integralarray = np.zeros(100)
for i in range (100):
integralarray[i] = integrator(f, lowerbound, upperbound, n)
return integralarray
def toleranceplot(f, lowerbound, upperbound):
n = 100
integralarray = np.zeros(100)
for i in range (100):
narray = np.full(100, n)
for i in range (100):
integralarray[i] = integrator(f, lowerbound, upperbound, n)
plt.plot (narray, integralarray,'.', color = 'green', alpha=0.02)
n += 1000
plt.xlim([90, 100000])
plt.show
return
def tolerance(f, lowerbound, upperbound, n):
rangefinder(f, lowerbound, upperbound)
frac = notplotter(f, lowerbound, upperbound, n)
area = (d-c)*(upperbound-lowerbound)
integral = area*frac
n += 10000
frac2 = notplotter(f, lowerbound, upperbound, n)
area2 = (d-c)*(upperbound-lowerbound)
moreaccurateintegral = area2*frac2
tolerance = moreaccurateintegral-integral
return tolerance
def finalintegrator(f, lowerbound, upperbound, desired_tolerance):
n = 1000
counter = 1
t = abs(tolerance(f, lowerbound, upperbound, n))
while (t > desired_tolerance):
n += 1000
counter += 1
t = abs(tolerance(f, lowerbound, upperbound, n))
integratorplotter(f, lowerbound, upperbound, n)
return "specified tolerance reached in {} iterations, using {} points".format(counter, n)
```
### specify lowerbound and upperbound (domain) here
```
finalintegrator(f, 0, 1.75, .0001) #(function, lowerbound, upperbound, number of points)
```
### this takes a few minutes to run, but it graphs the convergence of integral values calculated using the monte carlo method, with number of points used increasing along the x axis.
```
toleranceplot(np.cos, 0, 1.75)
```
|
github_jupyter
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
def f(x):
return np.cos(x) #input any integrable function here!
c = 0
d = 0
def rangefinder(f, lowerbound, upperbound):
xvals = np.linspace(lowerbound, upperbound, 10000)
yvals = []
for i in xvals:
yvals.append(f(i))
ymin = min(yvals)
ymax = max(yvals)
global c
c = ymin - .5
global d
d = ymax + .5
return c, d
def notplotter(f, lowerbound, upperbound, n):
x = np.random.uniform(lowerbound, upperbound,n)
y = np.random.uniform(c,d,n)
under = np.where((f(x) > y) & (y > 0))[0]
over = np.where((f(x) < y) & (y < 0))[0]
outsideover = np.where((f(x) >= y) & (y < 0))[0]
outsideunder = np.where((f(x) <= y) & (y > 0))[0]
underlen = len(under)
overlen = len(over)
outsideunderlen = len(outsideunder)
outsideoverlen = len(outsideover)
frac = (underlen-overlen)/(underlen+overlen+outsideunderlen+outsideoverlen)
return frac
def plotter(f, lowerbound, upperbound, n):
x = np.random.uniform(lowerbound, upperbound,n)
y = np.random.uniform(c,d,n)
line = np.linspace(lowerbound, upperbound, 10000)
under = np.where((f(x) > y) & (y > 0))[0]
over = np.where((f(x) < y) & (y < 0))[0]
outsideover = np.where((f(x) >= y) & (y < 0))[0]
outsideunder = np.where((f(x) <= y) & (y > 0))[0]
underlen = len(under)
overlen = len(over)
outsideunderlen = len(outsideunder)
outsideoverlen = len(outsideover)
fig = plt.figure(figsize=(7,7))
plt.xlim([lowerbound, upperbound])
plt.ylim([c,d])
plt.plot(x[under],y[under],',', color = "green")
plt.plot(x[over],y[over],',', color = "green")
plt.plot(x[outsideunder],y[outsideunder],',', color = "red")
plt.plot(x[outsideover],y[outsideover],',', color = "red")
plt.plot(line,f(line),',', color = 'black')
plt.plot(line, f(line)-f(line),',', color = 'black')
plt.show()
frac = (underlen-overlen)/(underlen+overlen+outsideunderlen+outsideoverlen)
return frac
def integrator(f, lowerbound, upperbound, n):
rangefinder(f, lowerbound, upperbound)
frac = notplotter(f, lowerbound, upperbound, n)
area = (d-c)*(upperbound-lowerbound)
integral = area*frac
return integral
def integratorplotter(f, lowerbound, upperbound, n):
rangefinder(f, lowerbound, upperbound)
frac = plotter(f, lowerbound, upperbound, n)
area = (d-c)*(upperbound-lowerbound)
integral = area*frac
print ("F({}, {}) found to be {}".format(lowerbound, upperbound, integral))
return integral
def integratortolerance(f, lowerbound, upperbound, n):
rangefinder(f, lowerbound, upperbound)
frac = notplotter(f, lowerbound, upperbound, n)
area = (d-c)*(upperbound-lowerbound)
integral = area*frac
integralarray = np.zeros(100)
for i in range (100):
integralarray[i] = integrator(f, lowerbound, upperbound, n)
return integralarray
def toleranceplot(f, lowerbound, upperbound):
n = 100
integralarray = np.zeros(100)
for i in range (100):
narray = np.full(100, n)
for i in range (100):
integralarray[i] = integrator(f, lowerbound, upperbound, n)
plt.plot (narray, integralarray,'.', color = 'green', alpha=0.02)
n += 1000
plt.xlim([90, 100000])
plt.show
return
def tolerance(f, lowerbound, upperbound, n):
rangefinder(f, lowerbound, upperbound)
frac = notplotter(f, lowerbound, upperbound, n)
area = (d-c)*(upperbound-lowerbound)
integral = area*frac
n += 10000
frac2 = notplotter(f, lowerbound, upperbound, n)
area2 = (d-c)*(upperbound-lowerbound)
moreaccurateintegral = area2*frac2
tolerance = moreaccurateintegral-integral
return tolerance
def finalintegrator(f, lowerbound, upperbound, desired_tolerance):
n = 1000
counter = 1
t = abs(tolerance(f, lowerbound, upperbound, n))
while (t > desired_tolerance):
n += 1000
counter += 1
t = abs(tolerance(f, lowerbound, upperbound, n))
integratorplotter(f, lowerbound, upperbound, n)
return "specified tolerance reached in {} iterations, using {} points".format(counter, n)
finalintegrator(f, 0, 1.75, .0001) #(function, lowerbound, upperbound, number of points)
toleranceplot(np.cos, 0, 1.75)
| 0.464659 | 0.916222 |
```
%matplotlib inline
from time import time
import os.path as op
import gzip
import numpy as np
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import matplotlib.pyplot as plt
```
## Computational environment
```
try:
tfe.enable_eager_execution()
except ValueError:
pass
def get_data_format():
return 'channels_first' if tfe.num_gpus() else 'channels_last'
def get_device():
return "gpu:0" if tfe.num_gpus() else "cpu:0"
print(get_device(), get_data_format())
```
## Data preparation
```
from keras.datasets import fashion_mnist
(images_train, labels_train), (images_test, labels_test) = fashion_mnist.load_data()
TARGET_NAMES = [
'T-shirt/top',
'Trouser',
'Pullover',
'Dress',
'Coat',
'Sandal',
'Shirt',
'Sneaker',
'Bag',
'Ankle boot',
]
images_train.shape
def get_batch_image_shape(image_size=(28, 28), data_format=None):
if data_format is None:
data_format = get_data_format()
if data_format == 'channels_first':
return (-1, 1) + image_size
elif data_format == 'channels_last':
return (-1,) + image_size + (1,)
else:
raise ValueError('invalid format: %r' % data_format)
get_batch_image_shape((28, 28))
images_train = images_train.reshape(get_batch_image_shape((28, 28)))
images_train.shape
images_test = images_test.reshape(get_batch_image_shape((28, 28)))
images_test.shape
images_train.dtype
labels_train.shape
def plot_sample_gallery(images, labels):
plt.figure(figsize=(10, 8))
if hasattr(images, 'numpy'):
images = images.numpy()
for i, image, label in zip(range(12), images, labels):
plt.subplot(3, 4, i + 1)
plt.imshow(image.reshape(28, 28), cmap=plt.cm.gray)
plt.title(TARGET_NAMES[label])
plt.axis('off')
plot_sample_gallery(images_train, labels_train)
def onehot_encode(num_classes, y):
return np.eye(num_classes)[y]
scaled_images_train = images_train.astype(np.float32) / 255
scaled_images_test = images_test.astype(np.float32) / 255
train_ds = tf.data.Dataset.from_tensor_slices(
(scaled_images_train, onehot_encode(10, labels_train)))
test_ds = tf.data.Dataset.from_tensor_slices(
(scaled_images_test, onehot_encode(10, labels_test)))
scaled_images_train.dtype
plot_sample_gallery(scaled_images_train, labels_train)
def single_image_data_augment(image):
image = tf.image.random_flip_left_right(image)
image = tf.image.random_brightness(image, max_delta=63 / 255.0)
image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
return image
def data_augment(input_tensor):
return tf.map_fn(single_image_data_augment, input_tensor)
```
Let's plot some data augmented samples. **Rerun the execution of this sell several times using `ctrl-enter`** to see the random changes in constrast and horizontal flips happen:
```
plot_sample_gallery(data_augment(images_train[:12]), labels_train)
# Subsample a small training set with labels
iterator = tfe.Iterator(train_ds.shuffle(60000).batch(1000))
small_train_images, small_train_labels = next(iterator)
small_val_images, small_val_labels = next(iterator)
small_train_ds = tf.data.Dataset.from_tensor_slices(
(small_train_images, small_train_labels))
```
## A first classification model
```
class Model(tfe.Network):
def __init__(self, data_format, dropout=0.5):
super(Model, self).__init__(name='fashion-mnist')
self._input_shape = get_batch_image_shape(
image_size=(28, 28), data_format=data_format)
self.conv1 = self.track_layer(
tf.layers.Conv2D(32, 5, data_format=data_format, activation=tf.nn.relu))
self.conv2 = self.track_layer(
tf.layers.Conv2D(64, 5, data_format=data_format, activation=tf.nn.relu))
self.fc1 = self.track_layer(tf.layers.Dense(1024, activation=tf.nn.relu))
self.fc2 = self.track_layer(tf.layers.Dense(10))
self.dropout = self.track_layer(tf.layers.Dropout(dropout))
self.max_pool2d = self.track_layer(
tf.layers.MaxPooling2D(
(2, 2), (2, 2), padding='SAME', data_format=data_format))
def call(self, inputs, training=False):
# Compute the forward pass by plugin the parameterized layers
# of the model along with parameter-free operations such as
# max pooling and dropout.
# The graph of the model is dynamically defined each time
# we execute this forward pass.
x = tf.reshape(inputs, self._input_shape)
x = self.conv1(x)
x = self.max_pool2d(x)
x = self.conv2(x)
x = self.max_pool2d(x)
x = tf.layers.flatten(x)
x = self.fc1(x)
if training:
x = self.dropout(x)
x = self.fc2(x)
return x
def loss_from_logits(logits, targets):
return tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=targets, logits=logits))
def loss(model, inputs, targets):
return loss_from_logits(model.call(inputs), targets)
def accuracy_from_logits(logits, targets):
match = tf.equal(tf.argmax(logits, axis=1, output_type=tf.int32),
tf.argmax(targets, axis=1, output_type=tf.int32))
return tf.reduce_mean(tf.cast(match, dtype=tf.float32))
model = Model(get_data_format(), dropout=0.5)
model_ewa = Model(get_data_format(), dropout=0.5)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
grad = tfe.implicit_gradients(loss)
train_duration = 0
with tf.device(get_device()):
for e in range(101):
if e % 10 == 0:
tic = time()
train_loss = loss(model, small_train_images, small_train_labels).numpy()
val_logits = model.call(small_val_images)
val_loss = loss_from_logits(val_logits, small_val_labels).numpy()
val_acc = accuracy_from_logits(val_logits, small_val_labels).numpy()
eval_duration = time() - tic
print("Epoch %d: train loss %f, val loss %f, val acc %0.3f, train time %0.3fs, eval time %0.3fs" %
(e, train_loss, val_loss, val_acc, train_duration, eval_duration))
tic = time()
for (inputs, targets) in tfe.Iterator(small_train_ds.shuffle(1000).batch(50)):
optimizer.apply_gradients(grad(model, data_augment(inputs), targets, training=True))
train_duration = time() - tic
dataset_test_images, dataset_test_labels = next(tfe.Iterator(test_ds.batch(1000)))
print("Loss on test set: %0.4f" % loss(
model, dataset_test_images, dataset_test_labels).numpy())
labels_pred = model.predict(scaled_images_test).numpy().argmax(axis=1)
test_acc = np.mean(labels_pred == labels_test)
print("Accuracy on test set: %0.3f" % test_acc)
model_ewa.predict(scaled_images_test).numpy().argmax(axis=1)
c1w = model_ewa.conv1.trainable_variables[0]
model_ewa.conv1.trainable_variables[0]
```
|
github_jupyter
|
%matplotlib inline
from time import time
import os.path as op
import gzip
import numpy as np
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import matplotlib.pyplot as plt
try:
tfe.enable_eager_execution()
except ValueError:
pass
def get_data_format():
return 'channels_first' if tfe.num_gpus() else 'channels_last'
def get_device():
return "gpu:0" if tfe.num_gpus() else "cpu:0"
print(get_device(), get_data_format())
from keras.datasets import fashion_mnist
(images_train, labels_train), (images_test, labels_test) = fashion_mnist.load_data()
TARGET_NAMES = [
'T-shirt/top',
'Trouser',
'Pullover',
'Dress',
'Coat',
'Sandal',
'Shirt',
'Sneaker',
'Bag',
'Ankle boot',
]
images_train.shape
def get_batch_image_shape(image_size=(28, 28), data_format=None):
if data_format is None:
data_format = get_data_format()
if data_format == 'channels_first':
return (-1, 1) + image_size
elif data_format == 'channels_last':
return (-1,) + image_size + (1,)
else:
raise ValueError('invalid format: %r' % data_format)
get_batch_image_shape((28, 28))
images_train = images_train.reshape(get_batch_image_shape((28, 28)))
images_train.shape
images_test = images_test.reshape(get_batch_image_shape((28, 28)))
images_test.shape
images_train.dtype
labels_train.shape
def plot_sample_gallery(images, labels):
plt.figure(figsize=(10, 8))
if hasattr(images, 'numpy'):
images = images.numpy()
for i, image, label in zip(range(12), images, labels):
plt.subplot(3, 4, i + 1)
plt.imshow(image.reshape(28, 28), cmap=plt.cm.gray)
plt.title(TARGET_NAMES[label])
plt.axis('off')
plot_sample_gallery(images_train, labels_train)
def onehot_encode(num_classes, y):
return np.eye(num_classes)[y]
scaled_images_train = images_train.astype(np.float32) / 255
scaled_images_test = images_test.astype(np.float32) / 255
train_ds = tf.data.Dataset.from_tensor_slices(
(scaled_images_train, onehot_encode(10, labels_train)))
test_ds = tf.data.Dataset.from_tensor_slices(
(scaled_images_test, onehot_encode(10, labels_test)))
scaled_images_train.dtype
plot_sample_gallery(scaled_images_train, labels_train)
def single_image_data_augment(image):
image = tf.image.random_flip_left_right(image)
image = tf.image.random_brightness(image, max_delta=63 / 255.0)
image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
return image
def data_augment(input_tensor):
return tf.map_fn(single_image_data_augment, input_tensor)
plot_sample_gallery(data_augment(images_train[:12]), labels_train)
# Subsample a small training set with labels
iterator = tfe.Iterator(train_ds.shuffle(60000).batch(1000))
small_train_images, small_train_labels = next(iterator)
small_val_images, small_val_labels = next(iterator)
small_train_ds = tf.data.Dataset.from_tensor_slices(
(small_train_images, small_train_labels))
class Model(tfe.Network):
def __init__(self, data_format, dropout=0.5):
super(Model, self).__init__(name='fashion-mnist')
self._input_shape = get_batch_image_shape(
image_size=(28, 28), data_format=data_format)
self.conv1 = self.track_layer(
tf.layers.Conv2D(32, 5, data_format=data_format, activation=tf.nn.relu))
self.conv2 = self.track_layer(
tf.layers.Conv2D(64, 5, data_format=data_format, activation=tf.nn.relu))
self.fc1 = self.track_layer(tf.layers.Dense(1024, activation=tf.nn.relu))
self.fc2 = self.track_layer(tf.layers.Dense(10))
self.dropout = self.track_layer(tf.layers.Dropout(dropout))
self.max_pool2d = self.track_layer(
tf.layers.MaxPooling2D(
(2, 2), (2, 2), padding='SAME', data_format=data_format))
def call(self, inputs, training=False):
# Compute the forward pass by plugin the parameterized layers
# of the model along with parameter-free operations such as
# max pooling and dropout.
# The graph of the model is dynamically defined each time
# we execute this forward pass.
x = tf.reshape(inputs, self._input_shape)
x = self.conv1(x)
x = self.max_pool2d(x)
x = self.conv2(x)
x = self.max_pool2d(x)
x = tf.layers.flatten(x)
x = self.fc1(x)
if training:
x = self.dropout(x)
x = self.fc2(x)
return x
def loss_from_logits(logits, targets):
return tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=targets, logits=logits))
def loss(model, inputs, targets):
return loss_from_logits(model.call(inputs), targets)
def accuracy_from_logits(logits, targets):
match = tf.equal(tf.argmax(logits, axis=1, output_type=tf.int32),
tf.argmax(targets, axis=1, output_type=tf.int32))
return tf.reduce_mean(tf.cast(match, dtype=tf.float32))
model = Model(get_data_format(), dropout=0.5)
model_ewa = Model(get_data_format(), dropout=0.5)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
grad = tfe.implicit_gradients(loss)
train_duration = 0
with tf.device(get_device()):
for e in range(101):
if e % 10 == 0:
tic = time()
train_loss = loss(model, small_train_images, small_train_labels).numpy()
val_logits = model.call(small_val_images)
val_loss = loss_from_logits(val_logits, small_val_labels).numpy()
val_acc = accuracy_from_logits(val_logits, small_val_labels).numpy()
eval_duration = time() - tic
print("Epoch %d: train loss %f, val loss %f, val acc %0.3f, train time %0.3fs, eval time %0.3fs" %
(e, train_loss, val_loss, val_acc, train_duration, eval_duration))
tic = time()
for (inputs, targets) in tfe.Iterator(small_train_ds.shuffle(1000).batch(50)):
optimizer.apply_gradients(grad(model, data_augment(inputs), targets, training=True))
train_duration = time() - tic
dataset_test_images, dataset_test_labels = next(tfe.Iterator(test_ds.batch(1000)))
print("Loss on test set: %0.4f" % loss(
model, dataset_test_images, dataset_test_labels).numpy())
labels_pred = model.predict(scaled_images_test).numpy().argmax(axis=1)
test_acc = np.mean(labels_pred == labels_test)
print("Accuracy on test set: %0.3f" % test_acc)
model_ewa.predict(scaled_images_test).numpy().argmax(axis=1)
c1w = model_ewa.conv1.trainable_variables[0]
model_ewa.conv1.trainable_variables[0]
| 0.779616 | 0.880386 |
```
import pandas as pd
import numpy as np
import seaborn as sns
import nltk
from nltk.corpus import stopwords
from nltk import PorterStemmer as stemmer
from bs4 import BeautifulSoup
import string
import re
import warnings
warnings.filterwarnings('ignore')
data = pd.read_csv("C:/Users/khist/AmazonProductReview/reviews/Roborock_reviews.csv")
data.head()
data.info()
data.dropna(inplace = True)
def strip_rating(r):
return str(r).split('.')[0]
data['Rating'] = data['Rating'].apply(strip_rating)
data.info()
data.head()
nltk.download('stopwords')
STOPWORDS=stopwords.words("english") #is, he, that, etc.
def Remove_Emojify(review):
return review.encode('ascii', 'ignore').decode('ascii')
def clean_text(text):
ps=stemmer()
text = Remove_Emojify(text) # remove Emojis
text_cleaned = "".join([x for x in text if x not in string.punctuation]) # remove punctuation
text_cleaned = re.sub(' +', ' ', text_cleaned) # remove extra white spaces
text_cleaned = text_cleaned.lower() # converting to lowercase
tokens = text_cleaned.split(" ")
tokens = [token for token in tokens if token not in STOPWORDS] # Taking only those words which are not stopwords
text_cleaned = " ".join([ps.stem(token) for token in tokens])
return text_cleaned
data['cleaned_review']=data['Product_review'].apply(lambda x:clean_text(x))
data.head()
df = data[['Rating','cleaned_review']]
df['Rating'] = df['Rating'].apply(lambda x: 'neg' if int(x) <= 3 else 'pos')
df.columns = ['label', 'text']
df = pd.concat([df, df.label.astype('str').str.get_dummies()], axis=1, sort=False)
df = df[['text', 'neg','pos']]
df.head()
import ktrain
from ktrain import text
trn, val, preproc = text.texts_from_df(df,
'text', # name of column containing review text
label_columns=['neg','pos'],
maxlen=75,
max_features=100000,
preprocess_mode='bert',
val_pct=0.1)
model = text.text_classifier('bert', train_data=trn, preproc=preproc)
learner = ktrain.get_learner(model, train_data=trn, batch_size=15)
#learner.lr_find(max_epochs=5)
#learner.lr_plot()
#learner.validate(val_data=val)
learner.fit_onecycle(lr = 2e-5, epochs = 5)
learner.validate(val_data=val)
predictor = ktrain.get_predictor(learner.model, preproc)
data = ['waste of money',
'beautiful as a gift',
'I use it all day, everyday',
'Worst product']
predictor.predict(data)
predictor.save('C:/Users/khist/Documents/GitHub/Amazon-Product-Review/my_predictor')
# save Predictor (i.e., model and Preprocessor instance) after partially training
predictor.save('C:/Users/khist/Documents/GitHub/Amazon-Product-Review/my_predictor')
# reload Predictor and extract model
model = ktrain.load_predictor('C:/Users/khist/Documents/GitHub/Amazon-Product-Review/my_predictor').model
preproc = ktrain.load_predictor('C:/Users/khist/Documents/GitHub/Amazon-Product-Review/my_predictor').preproc
predictor = ktrain.get_predictor(model, preproc)
predictor.predict(data)
import ktrain
from ktrain import text
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import seaborn as sns
import nltk
from nltk.corpus import stopwords
from nltk import PorterStemmer as stemmer
from bs4 import BeautifulSoup
import string
import re
import warnings
warnings.filterwarnings('ignore')
data = pd.read_csv("C:/Users/khist/AmazonProductReview/reviews/Roborock_reviews.csv")
data.head()
data.info()
data.dropna(inplace = True)
def strip_rating(r):
return str(r).split('.')[0]
data['Rating'] = data['Rating'].apply(strip_rating)
data.info()
data.head()
nltk.download('stopwords')
STOPWORDS=stopwords.words("english") #is, he, that, etc.
def Remove_Emojify(review):
return review.encode('ascii', 'ignore').decode('ascii')
def clean_text(text):
ps=stemmer()
text = Remove_Emojify(text) # remove Emojis
text_cleaned = "".join([x for x in text if x not in string.punctuation]) # remove punctuation
text_cleaned = re.sub(' +', ' ', text_cleaned) # remove extra white spaces
text_cleaned = text_cleaned.lower() # converting to lowercase
tokens = text_cleaned.split(" ")
tokens = [token for token in tokens if token not in STOPWORDS] # Taking only those words which are not stopwords
text_cleaned = " ".join([ps.stem(token) for token in tokens])
return text_cleaned
data['cleaned_review']=data['Product_review'].apply(lambda x:clean_text(x))
data.head()
df = data[['Rating','cleaned_review']]
df['Rating'] = df['Rating'].apply(lambda x: 'neg' if int(x) <= 3 else 'pos')
df.columns = ['label', 'text']
df = pd.concat([df, df.label.astype('str').str.get_dummies()], axis=1, sort=False)
df = df[['text', 'neg','pos']]
df.head()
import ktrain
from ktrain import text
trn, val, preproc = text.texts_from_df(df,
'text', # name of column containing review text
label_columns=['neg','pos'],
maxlen=75,
max_features=100000,
preprocess_mode='bert',
val_pct=0.1)
model = text.text_classifier('bert', train_data=trn, preproc=preproc)
learner = ktrain.get_learner(model, train_data=trn, batch_size=15)
#learner.lr_find(max_epochs=5)
#learner.lr_plot()
#learner.validate(val_data=val)
learner.fit_onecycle(lr = 2e-5, epochs = 5)
learner.validate(val_data=val)
predictor = ktrain.get_predictor(learner.model, preproc)
data = ['waste of money',
'beautiful as a gift',
'I use it all day, everyday',
'Worst product']
predictor.predict(data)
predictor.save('C:/Users/khist/Documents/GitHub/Amazon-Product-Review/my_predictor')
# save Predictor (i.e., model and Preprocessor instance) after partially training
predictor.save('C:/Users/khist/Documents/GitHub/Amazon-Product-Review/my_predictor')
# reload Predictor and extract model
model = ktrain.load_predictor('C:/Users/khist/Documents/GitHub/Amazon-Product-Review/my_predictor').model
preproc = ktrain.load_predictor('C:/Users/khist/Documents/GitHub/Amazon-Product-Review/my_predictor').preproc
predictor = ktrain.get_predictor(model, preproc)
predictor.predict(data)
import ktrain
from ktrain import text
| 0.255994 | 0.124027 |
# 数据加载及处理
[](https://gitee.com/mindspore/docs/blob/master/tutorials/source_zh_cn/dataset.ipynb) [](https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/master/quick_start/mindspore_dataset.ipynb) 
MindSpore提供了部分常用数据集和标准格式数据集的加载接口,用户可以直接使用`mindspore.dataset`中对应的数据集加载类进行数据加载。数据集类为用户提供了常用的数据处理接口,使得用户能够快速进行数据处理操作。
## 加载数据集
下面的样例通过`Cifar10Dataset`接口加载CIFAR-10数据集,使用顺序采样器获取前5个样本。
```
import mindspore.dataset as ds
DATA_DIR = "./datasets/cifar-10-batches-bin/train"
sampler = ds.SequentialSampler(num_samples=5)
dataset = ds.Cifar10Dataset(DATA_DIR, sampler=sampler)
```
## 迭代数据集
用户可以用`create_dict_iterator`创建数据迭代器,迭代访问数据,下面展示了对应图片的形状和标签。
```
for data in dataset.create_dict_iterator():
print("Image shape: {}".format(data['image'].shape), ", Label: {}".format(data['label']))
```
## 自定义数据集
对于目前MindSpore不支持直接加载的数据集,可以构造自定义数据集类,然后通过`GeneratorDataset`接口实现自定义方式的数据加载。
```
import numpy as np
np.random.seed(58)
class DatasetGenerator:
def __init__(self):
self.data = np.random.sample((5, 2))
self.label = np.random.sample((5, 1))
def __getitem__(self, index):
return self.data[index], self.label[index]
def __len__(self):
return len(self.data)
```
其中用户需要自定义的类函数如下:
- **\_\_init\_\_**
实例化数据集对象时,`__init__`函数被调用,用户可以在此进行数据初始化等操作。
```python
def __init__(self):
self.data = np.random.sample((5, 2))
self.label = np.random.sample((5, 1))
```
- **\_\_getitem\_\_**
定义数据集类的`__getitem__`函数,使其支持随机访问,能够根据给定的索引值`index`,获取数据集中的数据并返回。
```python
def __getitem__(self, index):
return self.data[index], self.label[index]
```
- **\_\_len\_\_**
定义数据集类的`__len__`函数,返回数据集的样本数量。
```python
def __len__(self):
return len(self.data)
```
定义数据集类之后,就可以通过`GeneratorDataset`接口按照用户定义的方式加载并访问数据集样本。
```
dataset_generator = DatasetGenerator()
dataset = ds.GeneratorDataset(dataset_generator, ["data", "label"], shuffle=False)
for data in dataset.create_dict_iterator():
print('{}'.format(data["data"]), '{}'.format(data["label"]))
```
## 数据处理及增强
### 数据处理
MindSpore提供的数据集接口具备常用的数据处理方法,用户只需调用相应的函数接口即可快速进行数据处理。
下面的样例先将数据集随机打乱顺序,然后将样本两两组成一个批次。
```
ds.config.set_seed(58)
# 随机打乱数据顺序
dataset = dataset.shuffle(buffer_size=10)
# 对数据集进行分批
dataset = dataset.batch(batch_size=2)
for data in dataset.create_dict_iterator():
print("data: {}".format(data["data"]))
print("label: {}".format(data["label"]))
```
其中,
`buffer_size`:数据集中进行shuffle操作的缓存区的大小。
`batch_size`:每组包含的数据个数,现设置每组包含2个数据。
### 数据增强
数据量过小或是样本场景单一等问题会影响模型的训练效果,用户可以通过数据增强操作扩充样本多样性,从而提升模型的泛化能力。
下面的样例使用`mindspore.dataset.vision.c_transforms`模块中的算子对MNIST数据集进行数据增强。
导入`c_transforms`模块,加载MNIST数据集。
```
import matplotlib.pyplot as plt
from mindspore.dataset.vision import Inter
import mindspore.dataset.vision.c_transforms as c_vision
DATA_DIR = './datasets/MNIST_Data/train'
mnist_dataset = ds.MnistDataset(DATA_DIR, num_samples=6, shuffle=False)
# 查看数据原图
mnist_it = mnist_dataset.create_dict_iterator()
data = next(mnist_it)
plt.imshow(data['image'].asnumpy().squeeze(), cmap=plt.cm.gray)
plt.title(data['label'].asnumpy(), fontsize=20)
plt.show()
```
定义数据增强算子,对数据集进行`Resize`和`RandomCrop`操作,然后通过`map`映射将其插入数据处理管道。
```
resize_op = c_vision.Resize(size=(200,200), interpolation=Inter.LINEAR)
crop_op = c_vision.RandomCrop(150)
transforms_list = [resize_op, crop_op]
mnist_dataset = mnist_dataset.map(operations=transforms_list, input_columns=["image"])
```
查看数据增强效果。
```
mnist_dataset = mnist_dataset.create_dict_iterator()
data = next(mnist_dataset)
plt.imshow(data['image'].asnumpy().squeeze(), cmap=plt.cm.gray)
plt.title(data['label'].asnumpy(), fontsize=20)
plt.show()
```
想要了解更多可以参考编程指南中[数据增强](https://www.mindspore.cn/doc/programming_guide/zh-CN/master/augmentation.html)章节。
|
github_jupyter
|
import mindspore.dataset as ds
DATA_DIR = "./datasets/cifar-10-batches-bin/train"
sampler = ds.SequentialSampler(num_samples=5)
dataset = ds.Cifar10Dataset(DATA_DIR, sampler=sampler)
for data in dataset.create_dict_iterator():
print("Image shape: {}".format(data['image'].shape), ", Label: {}".format(data['label']))
import numpy as np
np.random.seed(58)
class DatasetGenerator:
def __init__(self):
self.data = np.random.sample((5, 2))
self.label = np.random.sample((5, 1))
def __getitem__(self, index):
return self.data[index], self.label[index]
def __len__(self):
return len(self.data)
def __init__(self):
self.data = np.random.sample((5, 2))
self.label = np.random.sample((5, 1))
```
- **\_\_getitem\_\_**
定义数据集类的`__getitem__`函数,使其支持随机访问,能够根据给定的索引值`index`,获取数据集中的数据并返回。
```python
def __getitem__(self, index):
return self.data[index], self.label[index]
```
- **\_\_len\_\_**
定义数据集类的`__len__`函数,返回数据集的样本数量。
```python
def __len__(self):
return len(self.data)
```
定义数据集类之后,就可以通过`GeneratorDataset`接口按照用户定义的方式加载并访问数据集样本。
## 数据处理及增强
### 数据处理
MindSpore提供的数据集接口具备常用的数据处理方法,用户只需调用相应的函数接口即可快速进行数据处理。
下面的样例先将数据集随机打乱顺序,然后将样本两两组成一个批次。
其中,
`buffer_size`:数据集中进行shuffle操作的缓存区的大小。
`batch_size`:每组包含的数据个数,现设置每组包含2个数据。
### 数据增强
数据量过小或是样本场景单一等问题会影响模型的训练效果,用户可以通过数据增强操作扩充样本多样性,从而提升模型的泛化能力。
下面的样例使用`mindspore.dataset.vision.c_transforms`模块中的算子对MNIST数据集进行数据增强。
导入`c_transforms`模块,加载MNIST数据集。
定义数据增强算子,对数据集进行`Resize`和`RandomCrop`操作,然后通过`map`映射将其插入数据处理管道。
查看数据增强效果。
| 0.468547 | 0.919859 |
FastText Implementation, filtering stopwords and punctuations
```
from gensim.models import FastText
import pandas as pd
import nltk
from google.colab import drive
drive.mount('/content/gdrive')
df = pd.read_pickle('/content/gdrive/My Drive/40k_bangla_newspaper_article.p')
STOP_WORDS = pd.read_json('/content/gdrive/My Drive/stopwords-bn.json',encoding='utf-8')
stops = [i for i in STOP_WORDS.values]
#selecting first 1000 news
#selected_news = df[:1000]
selected_news = df
#creating title list
#title = [t['title'] for t in selected_news]
#title[:10]
content = [con['content'] for con in selected_news]
c = 0
x = 200
def sentence_to_wordlist(sentence, filters="!\"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n?,।!.'0123456789০১২৩৪৫৬৭৮৯‘\u200c–“”…‘"):
translate_dict = dict((c, ' ') for c in filters)
translate_map = str.maketrans(translate_dict)
wordlist = sentence.translate(translate_map).split()
global c,x;
c+=1
if c>x:
print(x,end=' ')
x+=100
return list(filter(lambda x: x not in stops, wordlist))
#newsVec = [nltk.word_tokenize(text) for text in title]
filter_text = [sentence_to_wordlist(text) for text in content]
dataFrame = pd.DataFrame(filter_text)
X = dataFrame
```
### FastText Model
**window = 5**
**dimenson = 32**
- larger windows tends to produce more topical similarities
- smaller windows tend to produce more functional and syntactic similarities
<a name="ref-1"/> [cite](#cite-goldberg2017neural)
- Using more data and higher dimetinality word vector will improve the accuracy
[cite](#cite-mikolov2013efficient)
```
model = FastText(filter_text, min_count = 5, size=32)
model.most_similar('দেশ')
```
#### Before Filtering
<pre>
[('দেশমাতৃকার', 0.9997934103012085),
('কার', 0.9997575879096985),
('সুরকার', 0.9997130632400513),
('পাঠিকার', 0.9996902942657471),
('টাম্পাকোর', 0.9996019601821899),
('টাইব্রেকারেও', 0.9995642900466919),
('কেনাকাটার', 0.9995537996292114),
('টানাটানি', 0.999542236328125),
('টার্ন', 0.9995319843292236),
('ভালুকার', 0.9995165467262268)
</pre>
[('হাসপাতালেও', 0.9999659061431885),
('পরিবারটিকে', 0.9999624490737915),
('নেচেগেয়েউল্লাসে', 0.9999513030052185),
('শাহজাহানকে', 0.9999475479125977),
('এক্সওয়াইজেড', 0.9999469518661499),
('লক্ষ্যমাত্রা', 0.9999465942382812),
('পোস্টে', 0.9999465346336365),
('জনি', 0.9999462366104126),
('এক্ষুনি', 0.9999443888664246),
('কার্ডধারী', 0.9999440908432007)]
[('সালভাদরে', 0.9999572038650513),
('হাসানপুর', 0.9999555349349976),
('হাসপাতালেও', 0.9999511241912842),
('হানাহানি', 0.9999462366104126),
('মাস্টারকে', 0.9999418258666992),
('সাঁতরে', 0.9999407529830933),
('রাজাপুর', 0.9999330043792725),
('ইয়েমেনে', 0.9999305009841919),
('ওয়েস্টমিনস্টারে', 0.9999287724494934),
('হানিমুন', 0.9999287128448486)]
### Saving Model
```
from sklearn.externals import joblib
filename = '/content/gdrive/My Drive/fastText_Bangla_content_full.sav'
joblib.dump(model, filename)
loaded_model = joblib.load(filename)
loaded_model.most_similar('সরকার')
result
```
references
<!--
@article{goldberg2017neural,
title={Neural network methods for natural language processing},
author={Goldberg, Yoav},
journal={Synthesis Lectures on Human Language Technologies},
volume={10},
number={1},
pages={1--309},
year={2017},
publisher={Morgan \& Claypool Publishers}
}
@article{mikolov2013efficient,
title={Efficient estimation of word representations in vector space},
author={Mikolov, Tomas and Chen, Kai and Corrado, Greg and Dean, Jeffrey},
journal={arXiv preprint arXiv:1301.3781},
year={2013}
}
-->
|
github_jupyter
|
from gensim.models import FastText
import pandas as pd
import nltk
from google.colab import drive
drive.mount('/content/gdrive')
df = pd.read_pickle('/content/gdrive/My Drive/40k_bangla_newspaper_article.p')
STOP_WORDS = pd.read_json('/content/gdrive/My Drive/stopwords-bn.json',encoding='utf-8')
stops = [i for i in STOP_WORDS.values]
#selecting first 1000 news
#selected_news = df[:1000]
selected_news = df
#creating title list
#title = [t['title'] for t in selected_news]
#title[:10]
content = [con['content'] for con in selected_news]
c = 0
x = 200
def sentence_to_wordlist(sentence, filters="!\"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n?,।!.'0123456789০১২৩৪৫৬৭৮৯‘\u200c–“”…‘"):
translate_dict = dict((c, ' ') for c in filters)
translate_map = str.maketrans(translate_dict)
wordlist = sentence.translate(translate_map).split()
global c,x;
c+=1
if c>x:
print(x,end=' ')
x+=100
return list(filter(lambda x: x not in stops, wordlist))
#newsVec = [nltk.word_tokenize(text) for text in title]
filter_text = [sentence_to_wordlist(text) for text in content]
dataFrame = pd.DataFrame(filter_text)
X = dataFrame
model = FastText(filter_text, min_count = 5, size=32)
model.most_similar('দেশ')
from sklearn.externals import joblib
filename = '/content/gdrive/My Drive/fastText_Bangla_content_full.sav'
joblib.dump(model, filename)
loaded_model = joblib.load(filename)
loaded_model.most_similar('সরকার')
result
| 0.15876 | 0.46217 |
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# Solution Notebook
## Problem: Implement quick sort.
* [Constraints](#Constraints)
* [Test Cases](#Test-Cases)
* [Algorithm](#Algorithm)
* [Code](#Code)
* [Pythonic-Code](#Pythonic-Code)
* [Unit Test](#Unit-Test)
## Constraints
* Is a naiive solution sufficient (ie not in-place)?
* Yes
* Are duplicates allowed?
* Yes
## Test Cases
* None -> None
* Empty input -> []
* One element -> [element]
* Two or more elements
## Algorithm
Wikipedia's animation:

* Set pivot to the middle element in the data
* For each element:
* If current element is the pivot, continue
* If the element is less than the pivot, add to left array
* Else, add to right array
* Recursively apply quicksort to the left array
* Recursively apply quicksort to the right array
* Merge the left array + pivot + right array
Complexity:
* Time: O(n log(n)) average, best, O(n^2) worst
* Space: O(n+m), n = number of elements, m = recursion depth
Most implementations are not stable.
## Code
```
from __future__ import division
def quick_sort(data):
if data is None or len(data) < 2:
return data
equal = []
left = []
right = []
pivot_index = len(data) // 2
pivot_value = data[pivot_index]
# Build the left and right partitions
for i in range(len(data)):
if data[i] == pivot_value:
equal.append(data[i])
elif data[i] < pivot_value:
left.append(data[i])
else:
right.append(data[i])
# Recursively apply quick_sort
left = quick_sort(left)
right = quick_sort(right)
return left + equal + right
```
## Unit Test
```
%%writefile test_quick_sort.py
from nose.tools import assert_equal
class TestQuickSort(object):
def test_quick_sort(self, func):
print('None input')
data = None
sorted_data = func(data)
assert_equal(sorted_data, None)
print('Empty input')
data = []
sorted_data = func(data)
assert_equal(sorted_data, [])
print('One element')
data = [5]
sorted_data = func(data)
assert_equal(sorted_data, [5])
print('Two or more elements')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
sorted_data = func(data)
assert_equal(sorted_data, sorted(data))
print('Success: test_quick_sort\n')
def main():
test = TestQuickSort()
test.test_quick_sort(quick_sort)
if __name__ == '__main__':
main()
%run -i test_quick_sort.py
```
|
github_jupyter
|
from __future__ import division
def quick_sort(data):
if data is None or len(data) < 2:
return data
equal = []
left = []
right = []
pivot_index = len(data) // 2
pivot_value = data[pivot_index]
# Build the left and right partitions
for i in range(len(data)):
if data[i] == pivot_value:
equal.append(data[i])
elif data[i] < pivot_value:
left.append(data[i])
else:
right.append(data[i])
# Recursively apply quick_sort
left = quick_sort(left)
right = quick_sort(right)
return left + equal + right
%%writefile test_quick_sort.py
from nose.tools import assert_equal
class TestQuickSort(object):
def test_quick_sort(self, func):
print('None input')
data = None
sorted_data = func(data)
assert_equal(sorted_data, None)
print('Empty input')
data = []
sorted_data = func(data)
assert_equal(sorted_data, [])
print('One element')
data = [5]
sorted_data = func(data)
assert_equal(sorted_data, [5])
print('Two or more elements')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
sorted_data = func(data)
assert_equal(sorted_data, sorted(data))
print('Success: test_quick_sort\n')
def main():
test = TestQuickSort()
test.test_quick_sort(quick_sort)
if __name__ == '__main__':
main()
%run -i test_quick_sort.py
| 0.632843 | 0.959573 |
# Introduction
Describe CRM queries and objectives
## Imports and Loads
```
# Load Libraries and Functions
import pandas as pd
import pandas.io.sql as sqlio
import psycopg2
import psycopg2.extras
import math
import dbconnection.py as db
```
## Create Numerical Reviews
To facilitate queries we will redo the Reviews Dimension using numerical variables.
#### Pre-Process
```
# Retreive Current Reviews
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
table_name = 'review'
df_reviews = query_table(conn, table_name)
df_reviews.head()
# Map Categorical Reviews to Numerical Values
mapping = {
'rating': {
'Bellow average': 0,
'Average': 1,
'Good': 2,
'Very good': 3,
'Excelent': 4
},
'accuracy': {
'Description is not accurate': 0,
'Accurate description': 1
},
'cleanliness': {
'Not clean': 0,
'Clean': 1
},
'communication': {
'Bad communication': 0,
'Good communication': 1
},
'location': {
'Bad location': 0,
'Good location': 1
}
}
try:
df_reviews.replace(mapping, inplace=True)
except TypeError:
pass
df_reviews.head()
# Add Sum Column
df_reviews['score'] = df_reviews.sum(axis=1)
df_reviews.head()
```
#### Inject Database
```
# Insert Table
insert_sql = """
DROP TABLE IF EXISTS CS;
CREATE TABLE CS (
REVIEW_ID SERIAL PRIMARY KEY NOT NULL,
RATING INT NOT NULL,
ACCURACY INT NOT NULL,
CLEANLINESS INT NOT NULL,
COMMUNICATION INT NOT NULL,
LOCATION INT NOT NULL,
SCORE INT NOT NULL
);
"""
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
run_sql_command(insert_sql, conn)
# Insert DATA
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
insert_data(df_reviews, 'CS', conn)
# Insert Constrains
insert_c = """
ALTER TABLE listings
DROP CONSTRAINT IF EXISTS constraint_fkey;
ALTER TABLE listings
ADD CONSTRAINT cn FOREIGN KEY (review_id) REFERENCES CS (review_id);
"""
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
success = run_sql_command(insert_c, conn)
if success is True: print('Done')
```
## KPI's
#### Create Metrics View
```
# Normalized Customer Satisfaction Metric
norm_cs_sql = """
CREATE OR REPLACE FUNCTION normalized_cs()
RETURNS TABLE
(
id INT,
normalized_cs NUMERIC
)
AS $$
BEGIN
RETURN QUERY
SELECT
x.id, 1.00*(sample-min_metric)/range_metric
FROM
(
SELECT
cs.id,
metric AS sample,
MIN(metric) OVER () AS min_metric,
MAX(metric) OVER () - MIN(metric) OVER () AS range_metric
FROM
(SELECT property.property_id as id, cs.score as metric
FROM cs
INNER JOIN listings
ON cs.review_id = listings.review_id
INNER JOIN property
ON listings.property_id = property.property_id
) as cs
) x ;
END;
$$ LANGUAGE plpgsql;
"""
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
success = run_sql_command(norm_cs_sql, conn)
# Normalized Profit Metric
norm_profit_sql = """
CREATE OR REPLACE FUNCTION normalized_profit()
RETURNS TABLE
(
id INT,
normalized_profit NUMERIC
)
AS $$
BEGIN
RETURN QUERY
SELECT
x.id, 1.00*(sample-min_metric)/range_metric as normalized_profit
FROM
(
SELECT
cs.id,
metric AS sample,
MIN(metric) OVER () AS min_metric,
MAX(metric) OVER () - MIN(metric) OVER () AS range_metric
FROM
(SELECT p.id, AVG(rented_days * avg_price_per_night)/30 as metric
FROM (
SELECT CAST(count(*) as NUMERIC) as rented_days, booking.property_id as id, AVG(booking.price_per_night) as avg_price_per_night
FROM booking
INNER JOIN date
ON booking.date_id = date.date_id
GROUP BY property_id, month, year
) as p
GROUP BY p.id
) as cs
) x ;
END;
$$ LANGUAGE plpgsql;
"""
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
success = run_sql_command(norm_profit_sql, conn)
# Create View
metrics_sql = """
DROP MATERIALIZED VIEW IF EXISTS metrics;
CREATE MATERIALIZED VIEW IF NOT EXISTS metrics AS
SELECT normalized_cs.id, avg(normalized_cs) as cs, avg(normalized_profit) as profit, avg(normalized_cs + normalized_profit) as score
FROM normalized_cs(), normalized_profit()
WHERE normalized_cs.id = normalized_profit.id
GROUP BY normalized_cs.id;
"""
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
success = run_sql_command(metrics_sql, conn)
# Test
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
test = query_table(conn, 'metrics')
test.head()
# get KPI's
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
cs = query_kpis(conn, method='cs')
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
profit = query_kpis(conn, method='profit')
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
score = query_kpis(conn, method='score')
# Print
print('Customer Satisfaction {}%'.format(cs))
print('Monthly Profit Per Property {}%'.format(profit))
print("Company's Performance {}%".format(score))
```
These KPI represent the potential of the company and show the untapped potential.
The metrics are normalized, this means that 100% represents the best case scenario where all the properties are maximizing their potential.
* Customer Satisfaction, in a best-case scenario all properties would have maximum ratings.
* Monthly Profit Per Property, in a best-case scenario all properties would be as profitable as the most profitable property.
* Company’s Performance is a combination of the previous two and shows how much of their potential the company is currently tapping into.
## TOP 10 Ranking Properties
```
# Function to get top N ranking properties
topn_sql = """
CREATE OR REPLACE FUNCTION topN(N int)
RETURNS TABLE
(
id INT,
score NUMERIC,
property_type VARCHAR,
room_type VARCHAR,
accommodates VARCHAR,
bathrooms VARCHAR,
bedrooms VARCHAR,
beds VARCHAR,
bed_type VARCHAR
)
AS $$
BEGIN
RETURN QUERY
SELECT metrics.id, metrics.score/2, property.property_type,
property.room_type, property.accommodates, property.bathrooms, property.bedrooms,
property.beds, property.bed_type
FROM metrics
INNER JOIN property
ON metrics.id = property.property_id
ORDER BY metrics.score desc
LIMIT N;
END;
$$ LANGUAGE plpgsql;
"""
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
success = run_sql_command(topn_sql, conn)
# Retrieve Properties
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
top10 = query_table(conn, 'topN(10)')
top10
```
Properties in the top 10 are mostly big properties that accommodate many guests, some interesting characteristics are:
* All are private properties
* Accommodate mostly 7 or more guests
* Have 4+ Beds
* All beds are real beds and not couches
* Scores are at maximum 50%, so properties are not near their full potential
## Bottlenecks and Optimization
#### Normalized Customer Satisfaction

The R system is doing 3 full scans:
* The first is optional since we are merging tables `listings` and `property` on `property_id`. This column as an automatic hash index, but since these two tables have a one-one relashionship, it's a full scan is more efficient.
* The second full scan is to load the `cs` table, since we want to retrieve all the rows from this table, this is the more efficient approach
* The last full scan is also the must efficient approach since we are merging `listings` to `cs`, where all values of cs are necessary.
All these scans are fully optimized since we need to load completely all these tables. Then R uses hashing to join the tables.
This query is optimized.
#### Normalized Profit

This is also optimized the only full scans that are made, are in tables that need to be completely load to memory. The aggregations are also necessary and cannot be avoided.
#### Metrics View

This is a simple query to collect the previous results and build a materialized view. Difficult to optimize since its calling functions, this way is not possible to set an index for the merge, and so two sorts are a merge join are necessary.
#### TOP10 Properties

This query is definitely not a bottleneck, but it can be slightly optimized by having an index on the score column, to remove the need for sorting.
```
index_sql = """
CREATE INDEX score ON metrics(score desc);
"""
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
success = run_sql_command(topn_sql, conn)
if success is True: print('Index Created')
```

#### Conclusions
There were no bottlenecks, all queries run smoothly, and we can run this notebook almost instantly. The only modification that could improve performance, would be to score review information as numeric variables, but that could prejudice further analyses, like data mining.
|
github_jupyter
|
# Load Libraries and Functions
import pandas as pd
import pandas.io.sql as sqlio
import psycopg2
import psycopg2.extras
import math
import dbconnection.py as db
# Retreive Current Reviews
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
table_name = 'review'
df_reviews = query_table(conn, table_name)
df_reviews.head()
# Map Categorical Reviews to Numerical Values
mapping = {
'rating': {
'Bellow average': 0,
'Average': 1,
'Good': 2,
'Very good': 3,
'Excelent': 4
},
'accuracy': {
'Description is not accurate': 0,
'Accurate description': 1
},
'cleanliness': {
'Not clean': 0,
'Clean': 1
},
'communication': {
'Bad communication': 0,
'Good communication': 1
},
'location': {
'Bad location': 0,
'Good location': 1
}
}
try:
df_reviews.replace(mapping, inplace=True)
except TypeError:
pass
df_reviews.head()
# Add Sum Column
df_reviews['score'] = df_reviews.sum(axis=1)
df_reviews.head()
# Insert Table
insert_sql = """
DROP TABLE IF EXISTS CS;
CREATE TABLE CS (
REVIEW_ID SERIAL PRIMARY KEY NOT NULL,
RATING INT NOT NULL,
ACCURACY INT NOT NULL,
CLEANLINESS INT NOT NULL,
COMMUNICATION INT NOT NULL,
LOCATION INT NOT NULL,
SCORE INT NOT NULL
);
"""
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
run_sql_command(insert_sql, conn)
# Insert DATA
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
insert_data(df_reviews, 'CS', conn)
# Insert Constrains
insert_c = """
ALTER TABLE listings
DROP CONSTRAINT IF EXISTS constraint_fkey;
ALTER TABLE listings
ADD CONSTRAINT cn FOREIGN KEY (review_id) REFERENCES CS (review_id);
"""
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
success = run_sql_command(insert_c, conn)
if success is True: print('Done')
# Normalized Customer Satisfaction Metric
norm_cs_sql = """
CREATE OR REPLACE FUNCTION normalized_cs()
RETURNS TABLE
(
id INT,
normalized_cs NUMERIC
)
AS $$
BEGIN
RETURN QUERY
SELECT
x.id, 1.00*(sample-min_metric)/range_metric
FROM
(
SELECT
cs.id,
metric AS sample,
MIN(metric) OVER () AS min_metric,
MAX(metric) OVER () - MIN(metric) OVER () AS range_metric
FROM
(SELECT property.property_id as id, cs.score as metric
FROM cs
INNER JOIN listings
ON cs.review_id = listings.review_id
INNER JOIN property
ON listings.property_id = property.property_id
) as cs
) x ;
END;
$$ LANGUAGE plpgsql;
"""
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
success = run_sql_command(norm_cs_sql, conn)
# Normalized Profit Metric
norm_profit_sql = """
CREATE OR REPLACE FUNCTION normalized_profit()
RETURNS TABLE
(
id INT,
normalized_profit NUMERIC
)
AS $$
BEGIN
RETURN QUERY
SELECT
x.id, 1.00*(sample-min_metric)/range_metric as normalized_profit
FROM
(
SELECT
cs.id,
metric AS sample,
MIN(metric) OVER () AS min_metric,
MAX(metric) OVER () - MIN(metric) OVER () AS range_metric
FROM
(SELECT p.id, AVG(rented_days * avg_price_per_night)/30 as metric
FROM (
SELECT CAST(count(*) as NUMERIC) as rented_days, booking.property_id as id, AVG(booking.price_per_night) as avg_price_per_night
FROM booking
INNER JOIN date
ON booking.date_id = date.date_id
GROUP BY property_id, month, year
) as p
GROUP BY p.id
) as cs
) x ;
END;
$$ LANGUAGE plpgsql;
"""
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
success = run_sql_command(norm_profit_sql, conn)
# Create View
metrics_sql = """
DROP MATERIALIZED VIEW IF EXISTS metrics;
CREATE MATERIALIZED VIEW IF NOT EXISTS metrics AS
SELECT normalized_cs.id, avg(normalized_cs) as cs, avg(normalized_profit) as profit, avg(normalized_cs + normalized_profit) as score
FROM normalized_cs(), normalized_profit()
WHERE normalized_cs.id = normalized_profit.id
GROUP BY normalized_cs.id;
"""
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
success = run_sql_command(metrics_sql, conn)
# Test
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
test = query_table(conn, 'metrics')
test.head()
# get KPI's
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
cs = query_kpis(conn, method='cs')
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
profit = query_kpis(conn, method='profit')
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
score = query_kpis(conn, method='score')
# Print
print('Customer Satisfaction {}%'.format(cs))
print('Monthly Profit Per Property {}%'.format(profit))
print("Company's Performance {}%".format(score))
# Function to get top N ranking properties
topn_sql = """
CREATE OR REPLACE FUNCTION topN(N int)
RETURNS TABLE
(
id INT,
score NUMERIC,
property_type VARCHAR,
room_type VARCHAR,
accommodates VARCHAR,
bathrooms VARCHAR,
bedrooms VARCHAR,
beds VARCHAR,
bed_type VARCHAR
)
AS $$
BEGIN
RETURN QUERY
SELECT metrics.id, metrics.score/2, property.property_type,
property.room_type, property.accommodates, property.bathrooms, property.bedrooms,
property.beds, property.bed_type
FROM metrics
INNER JOIN property
ON metrics.id = property.property_id
ORDER BY metrics.score desc
LIMIT N;
END;
$$ LANGUAGE plpgsql;
"""
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
success = run_sql_command(topn_sql, conn)
# Retrieve Properties
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
top10 = query_table(conn, 'topN(10)')
top10
index_sql = """
CREATE INDEX score ON metrics(score desc);
"""
conn = psycopg2.connect(host = dbconnection.server_host,database = dbconnection.dbname, user = dbconnection.dbusername,password = dbconnection.dbpassword,sslmode=dbconnection.sslmode)
success = run_sql_command(topn_sql, conn)
if success is True: print('Index Created')
| 0.500732 | 0.547585 |
# Day 6
batch size 256 lr 1e-3, normed weighted, rotated, cartesian, post BN
### Import modules
```
%matplotlib inline
from __future__ import division
import sys
import os
os.environ['MKL_THREADING_LAYER']='GNU'
sys.path.append('../')
from Modules.Basics import *
from Modules.Class_Basics import *
```
## Options
```
classTrainFeatures = ['DER_mass_MMC', 'DER_mass_transverse_met_lep', 'DER_mass_vis', 'DER_pt_h', 'DER_deltaeta_jet_jet', 'DER_mass_jet_jet', 'DER_prodeta_jet_jet', 'DER_deltar_tau_lep', 'DER_pt_tot', 'DER_sum_pt', 'DER_pt_ratio_lep_tau', 'DER_met_phi_centrality', 'DER_lep_eta_centrality', 'PRI_met_pt', 'PRI_met_sumet', 'PRI_jet_num', 'PRI_jet_all_pt', 'PRI_tau_px', 'PRI_tau_py', 'PRI_tau_pz', 'PRI_lep_px', 'PRI_lep_pz', 'PRI_jet_leading_px', 'PRI_jet_leading_py', 'PRI_jet_leading_pz', 'PRI_jet_subleading_px', 'PRI_jet_subleading_py', 'PRI_jet_subleading_pz', 'PRI_met_px', 'PRI_met_py']
inputPipe, outputPipe = getPreProcPipes(normIn=True)
classModel = 'modelSwish'
varSet = "basic_rot_features"
nSplits = 10
ensembleSize = 10
ensembleMode = 'loss'
maxEpochs = 200
compileArgs = {'loss':'binary_crossentropy', 'optimizer':'adam', 'bn':'post'}
trainParams = {'epochs' : 1, 'batch_size' : 256, 'verbose' : 0}
modelParams = {'version':classModel, 'nIn':len(classTrainFeatures), 'compileArgs':compileArgs}
print "\nTraining on", len(classTrainFeatures), "features:", [var for var in classTrainFeatures]
```
## Import data
```
trainData = h5py.File(dirLoc + 'train.hdf5', "r+")
valData = h5py.File(dirLoc + 'val.hdf5', "r+")
```
## Determine LR
```
lrFinder = batchLRFindClassifier(trainData, nSplits, getClassifier, modelParams, trainParams, lrBounds=[1e-5,1e-1], trainOnWeights=True, verbose=0)
compileArgs['lr'] = 1e-3
```
## Train classifier
```
results, histories = batchTrainClassifier(trainData, nSplits, getClassifier, modelParams, trainParams, patience=60, cosAnnealMult=2, trainOnWeights=True, maxEpochs=maxEpochs, verbose=1)
```
## Construct ensemble
```
with open('train_weights/resultsFile.pkl', 'r') as fin:
results = pickle.load(fin)
ensemble, weights = assembleEnsemble(results, ensembleSize, ensembleMode, compileArgs)
```
## Response on development data
```
batchEnsemblePredict(ensemble, weights, trainData, ensembleSize=10, verbose=1)
print 'Training ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', trainData), getFeature('pred', trainData)),
roc_auc_score(getFeature('targets', trainData), getFeature('pred', trainData), sample_weight=getFeature('weights', trainData)))
```
## Response on val data
```
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=10, verbose=1)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData), getFeature('pred', valData)),
roc_auc_score(getFeature('targets', valData), getFeature('pred', valData), sample_weight=getFeature('weights', valData)))
```
## Evaluation
### Import in dataframe
```
valframe = convertToDF(valData)
sigVal = (valframe.gen_target == 1)
bkgVal = (valframe.gen_target == 0)
```
### MVA distributions
```
getClassPredPlot([valframe[bkgVal], valframe[sigVal]], weightName='gen_weight')
valframe['ams'] = amsScan(valframe)[0]
bests = foldAMSScan(valframe, 10)
scoreTest(ensemble, weights)
saveTest(0.9634438912585132, 'Day_6')
!kaggle competitions submit -c higgs-boson -f ../Data/Day_6_test.csv -m"Day_6 post bn"
```
|
github_jupyter
|
%matplotlib inline
from __future__ import division
import sys
import os
os.environ['MKL_THREADING_LAYER']='GNU'
sys.path.append('../')
from Modules.Basics import *
from Modules.Class_Basics import *
classTrainFeatures = ['DER_mass_MMC', 'DER_mass_transverse_met_lep', 'DER_mass_vis', 'DER_pt_h', 'DER_deltaeta_jet_jet', 'DER_mass_jet_jet', 'DER_prodeta_jet_jet', 'DER_deltar_tau_lep', 'DER_pt_tot', 'DER_sum_pt', 'DER_pt_ratio_lep_tau', 'DER_met_phi_centrality', 'DER_lep_eta_centrality', 'PRI_met_pt', 'PRI_met_sumet', 'PRI_jet_num', 'PRI_jet_all_pt', 'PRI_tau_px', 'PRI_tau_py', 'PRI_tau_pz', 'PRI_lep_px', 'PRI_lep_pz', 'PRI_jet_leading_px', 'PRI_jet_leading_py', 'PRI_jet_leading_pz', 'PRI_jet_subleading_px', 'PRI_jet_subleading_py', 'PRI_jet_subleading_pz', 'PRI_met_px', 'PRI_met_py']
inputPipe, outputPipe = getPreProcPipes(normIn=True)
classModel = 'modelSwish'
varSet = "basic_rot_features"
nSplits = 10
ensembleSize = 10
ensembleMode = 'loss'
maxEpochs = 200
compileArgs = {'loss':'binary_crossentropy', 'optimizer':'adam', 'bn':'post'}
trainParams = {'epochs' : 1, 'batch_size' : 256, 'verbose' : 0}
modelParams = {'version':classModel, 'nIn':len(classTrainFeatures), 'compileArgs':compileArgs}
print "\nTraining on", len(classTrainFeatures), "features:", [var for var in classTrainFeatures]
trainData = h5py.File(dirLoc + 'train.hdf5', "r+")
valData = h5py.File(dirLoc + 'val.hdf5', "r+")
lrFinder = batchLRFindClassifier(trainData, nSplits, getClassifier, modelParams, trainParams, lrBounds=[1e-5,1e-1], trainOnWeights=True, verbose=0)
compileArgs['lr'] = 1e-3
results, histories = batchTrainClassifier(trainData, nSplits, getClassifier, modelParams, trainParams, patience=60, cosAnnealMult=2, trainOnWeights=True, maxEpochs=maxEpochs, verbose=1)
with open('train_weights/resultsFile.pkl', 'r') as fin:
results = pickle.load(fin)
ensemble, weights = assembleEnsemble(results, ensembleSize, ensembleMode, compileArgs)
batchEnsemblePredict(ensemble, weights, trainData, ensembleSize=10, verbose=1)
print 'Training ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', trainData), getFeature('pred', trainData)),
roc_auc_score(getFeature('targets', trainData), getFeature('pred', trainData), sample_weight=getFeature('weights', trainData)))
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=10, verbose=1)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData), getFeature('pred', valData)),
roc_auc_score(getFeature('targets', valData), getFeature('pred', valData), sample_weight=getFeature('weights', valData)))
valframe = convertToDF(valData)
sigVal = (valframe.gen_target == 1)
bkgVal = (valframe.gen_target == 0)
getClassPredPlot([valframe[bkgVal], valframe[sigVal]], weightName='gen_weight')
valframe['ams'] = amsScan(valframe)[0]
bests = foldAMSScan(valframe, 10)
scoreTest(ensemble, weights)
saveTest(0.9634438912585132, 'Day_6')
!kaggle competitions submit -c higgs-boson -f ../Data/Day_6_test.csv -m"Day_6 post bn"
| 0.40592 | 0.642376 |
```
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
import re
import time
import collections
import os
def build_dataset(words, n_words, atleast=1):
count = [['GO', 0], ['PAD', 1], ['EOS', 2], ['UNK', 3]]
counter = collections.Counter(words).most_common(n_words)
counter = [i for i in counter if i[1] >= atleast]
count.extend(counter)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0:
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
lines = open('movie_lines.txt', encoding='utf-8', errors='ignore').read().split('\n')
conv_lines = open('movie_conversations.txt', encoding='utf-8', errors='ignore').read().split('\n')
id2line = {}
for line in lines:
_line = line.split(' +++$+++ ')
if len(_line) == 5:
id2line[_line[0]] = _line[4]
convs = [ ]
for line in conv_lines[:-1]:
_line = line.split(' +++$+++ ')[-1][1:-1].replace("'","").replace(" ","")
convs.append(_line.split(','))
questions = []
answers = []
for conv in convs:
for i in range(len(conv)-1):
questions.append(id2line[conv[i]])
answers.append(id2line[conv[i+1]])
def clean_text(text):
text = text.lower()
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"it's", "it is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"what's", "that is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"how's", "how is", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"can't", "cannot", text)
text = re.sub(r"n't", " not", text)
text = re.sub(r"n'", "ng", text)
text = re.sub(r"'bout", "about", text)
text = re.sub(r"'til", "until", text)
text = re.sub(r"[-()\"#/@;:<>{}`+=~|.!?,]", "", text)
return ' '.join([i.strip() for i in filter(None, text.split())])
clean_questions = []
for question in questions:
clean_questions.append(clean_text(question))
clean_answers = []
for answer in answers:
clean_answers.append(clean_text(answer))
min_line_length = 2
max_line_length = 5
short_questions_temp = []
short_answers_temp = []
i = 0
for question in clean_questions:
if len(question.split()) >= min_line_length and len(question.split()) <= max_line_length:
short_questions_temp.append(question)
short_answers_temp.append(clean_answers[i])
i += 1
short_questions = []
short_answers = []
i = 0
for answer in short_answers_temp:
if len(answer.split()) >= min_line_length and len(answer.split()) <= max_line_length:
short_answers.append(answer)
short_questions.append(short_questions_temp[i])
i += 1
question_test = short_questions[500:550]
answer_test = short_answers[500:550]
short_questions = short_questions[:500]
short_answers = short_answers[:500]
concat_from = ' '.join(short_questions+question_test).split()
vocabulary_size_from = len(list(set(concat_from)))
data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(concat_from, vocabulary_size_from)
print('vocab from size: %d'%(vocabulary_size_from))
print('Most common words', count_from[4:10])
print('Sample data', data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])
print('filtered vocab size:',len(dictionary_from))
print("% of vocab used: {}%".format(round(len(dictionary_from)/vocabulary_size_from,4)*100))
concat_to = ' '.join(short_answers+answer_test).split()
vocabulary_size_to = len(list(set(concat_to)))
data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)
print('vocab from size: %d'%(vocabulary_size_to))
print('Most common words', count_to[4:10])
print('Sample data', data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])
print('filtered vocab size:',len(dictionary_to))
print("% of vocab used: {}%".format(round(len(dictionary_to)/vocabulary_size_to,4)*100))
GO = dictionary_from['GO']
PAD = dictionary_from['PAD']
EOS = dictionary_from['EOS']
UNK = dictionary_from['UNK']
for i in range(len(short_answers)):
short_answers[i] += ' EOS'
class Chatbot:
def __init__(self, size_layer, num_layers, embedded_size,
from_dict_size, to_dict_size, learning_rate,
batch_size, dropout = 0.5, beam_width = 15):
def lstm_cell(size, reuse=False):
return tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(),
reuse=reuse)
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None, None])
self.X_seq_len = tf.placeholder(tf.int32, [None])
self.Y_seq_len = tf.placeholder(tf.int32, [None])
# encoder
encoder_embeddings = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1))
encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = lstm_cell(size_layer // 2),
cell_bw = lstm_cell(size_layer // 2),
inputs = encoder_embedded,
sequence_length = self.X_seq_len,
dtype = tf.float32,
scope = 'bidirectional_rnn_%d'%(n))
encoder_embedded = tf.concat((out_fw, out_bw), 2)
bi_state_c = tf.concat((state_fw.c, state_bw.c), -1)
bi_state_h = tf.concat((state_fw.h, state_bw.h), -1)
bi_lstm_state = tf.nn.rnn_cell.LSTMStateTuple(c=bi_state_c, h=bi_state_h)
self.encoder_state = tuple([bi_lstm_state] * num_layers)
self.encoder_state = tuple(self.encoder_state[-1] for _ in range(num_layers))
main = tf.strided_slice(self.Y, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
# decoder
decoder_embeddings = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1))
decoder_cells = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(size_layer) for _ in range(num_layers)])
dense_layer = tf.layers.Dense(to_dict_size)
training_helper = tf.contrib.seq2seq.TrainingHelper(
inputs = tf.nn.embedding_lookup(decoder_embeddings, decoder_input),
sequence_length = self.Y_seq_len,
time_major = False)
training_decoder = tf.contrib.seq2seq.BasicDecoder(
cell = decoder_cells,
helper = training_helper,
initial_state = self.encoder_state,
output_layer = dense_layer)
training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = training_decoder,
impute_finished = True,
maximum_iterations = tf.reduce_max(self.Y_seq_len))
predicting_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding = decoder_embeddings,
start_tokens = tf.tile(tf.constant([GO], dtype=tf.int32), [batch_size]),
end_token = EOS)
predicting_decoder = tf.contrib.seq2seq.BasicDecoder(
cell = decoder_cells,
helper = predicting_helper,
initial_state = self.encoder_state,
output_layer = dense_layer)
predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = predicting_decoder,
impute_finished = True,
maximum_iterations = 2 * tf.reduce_max(self.X_seq_len))
self.training_logits = training_decoder_output.rnn_output
self.predicting_ids = predicting_decoder_output.sample_id
masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,
targets = self.Y,
weights = masks)
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)
size_layer = 256
num_layers = 2
embedded_size = 128
learning_rate = 0.001
batch_size = 16
epoch = 20
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Chatbot(size_layer, num_layers, embedded_size, len(dictionary_from),
len(dictionary_to), learning_rate,batch_size)
sess.run(tf.global_variables_initializer())
def str_idx(corpus, dic):
X = []
for i in corpus:
ints = []
for k in i.split():
try:
ints.append(dic[k])
except Exception as e:
print(e)
ints.append(UNK)
X.append(ints)
return X
X = str_idx(short_questions, dictionary_from)
Y = str_idx(short_answers, dictionary_to)
X_test = str_idx(question_test, dictionary_from)
Y_test = str_idx(answer_test, dictionary_from)
def pad_sentence_batch(sentence_batch, pad_int):
padded_seqs = []
seq_lens = []
max_sentence_len = max([len(sentence) for sentence in sentence_batch])
for sentence in sentence_batch:
padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
seq_lens.append(len(sentence))
return padded_seqs, seq_lens
def check_accuracy(logits, Y):
acc = 0
for i in range(logits.shape[0]):
internal_acc = 0
count = 0
for k in range(len(Y[i])):
try:
if Y[i][k] == logits[i][k]:
internal_acc += 1
count += 1
if Y[i][k] == EOS:
break
except:
break
acc += (internal_acc / count)
return acc / logits.shape[0]
for i in range(epoch):
total_loss, total_accuracy = 0, 0
for k in range(0, (len(short_questions) // batch_size) * batch_size, batch_size):
batch_x, seq_x = pad_sentence_batch(X[k: k+batch_size], PAD)
batch_y, seq_y = pad_sentence_batch(Y[k: k+batch_size], PAD)
predicted, loss, _ = sess.run([model.predicting_ids, model.cost, model.optimizer],
feed_dict={model.X:batch_x,
model.Y:batch_y,
model.X_seq_len:seq_x,
model.Y_seq_len:seq_y})
total_loss += loss
total_accuracy += check_accuracy(predicted,batch_y)
total_loss /= (len(short_questions) // batch_size)
total_accuracy /= (len(short_questions) // batch_size)
print('epoch: %d, avg loss: %f, avg accuracy: %f'%(i+1, total_loss, total_accuracy))
for i in range(len(batch_x)):
print('row %d'%(i+1))
print('QUESTION:',' '.join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0,1,2,3]]))
print('REAL ANSWER:',' '.join([rev_dictionary_to[n] for n in batch_y[i] if n not in[0,1,2,3]]))
print('PREDICTED ANSWER:',' '.join([rev_dictionary_to[n] for n in predicted[i] if n not in[0,1,2,3]]),'\n')
batch_x, seq_x = pad_sentence_batch(X_test[:batch_size], PAD)
batch_y, seq_y = pad_sentence_batch(Y_test[:batch_size], PAD)
predicted = sess.run(model.predicting_ids, feed_dict={model.X:batch_x,model.X_seq_len:seq_x})
for i in range(len(batch_x)):
print('row %d'%(i+1))
print('QUESTION:',' '.join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0,1,2,3]]))
print('REAL ANSWER:',' '.join([rev_dictionary_to[n] for n in batch_y[i] if n not in[0,1,2,3]]))
print('PREDICTED ANSWER:',' '.join([rev_dictionary_to[n] for n in predicted[i] if n not in[0,1,2,3]]),'\n')
```
|
github_jupyter
|
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
import re
import time
import collections
import os
def build_dataset(words, n_words, atleast=1):
count = [['GO', 0], ['PAD', 1], ['EOS', 2], ['UNK', 3]]
counter = collections.Counter(words).most_common(n_words)
counter = [i for i in counter if i[1] >= atleast]
count.extend(counter)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0:
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
lines = open('movie_lines.txt', encoding='utf-8', errors='ignore').read().split('\n')
conv_lines = open('movie_conversations.txt', encoding='utf-8', errors='ignore').read().split('\n')
id2line = {}
for line in lines:
_line = line.split(' +++$+++ ')
if len(_line) == 5:
id2line[_line[0]] = _line[4]
convs = [ ]
for line in conv_lines[:-1]:
_line = line.split(' +++$+++ ')[-1][1:-1].replace("'","").replace(" ","")
convs.append(_line.split(','))
questions = []
answers = []
for conv in convs:
for i in range(len(conv)-1):
questions.append(id2line[conv[i]])
answers.append(id2line[conv[i+1]])
def clean_text(text):
text = text.lower()
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"it's", "it is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"what's", "that is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"how's", "how is", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"can't", "cannot", text)
text = re.sub(r"n't", " not", text)
text = re.sub(r"n'", "ng", text)
text = re.sub(r"'bout", "about", text)
text = re.sub(r"'til", "until", text)
text = re.sub(r"[-()\"#/@;:<>{}`+=~|.!?,]", "", text)
return ' '.join([i.strip() for i in filter(None, text.split())])
clean_questions = []
for question in questions:
clean_questions.append(clean_text(question))
clean_answers = []
for answer in answers:
clean_answers.append(clean_text(answer))
min_line_length = 2
max_line_length = 5
short_questions_temp = []
short_answers_temp = []
i = 0
for question in clean_questions:
if len(question.split()) >= min_line_length and len(question.split()) <= max_line_length:
short_questions_temp.append(question)
short_answers_temp.append(clean_answers[i])
i += 1
short_questions = []
short_answers = []
i = 0
for answer in short_answers_temp:
if len(answer.split()) >= min_line_length and len(answer.split()) <= max_line_length:
short_answers.append(answer)
short_questions.append(short_questions_temp[i])
i += 1
question_test = short_questions[500:550]
answer_test = short_answers[500:550]
short_questions = short_questions[:500]
short_answers = short_answers[:500]
concat_from = ' '.join(short_questions+question_test).split()
vocabulary_size_from = len(list(set(concat_from)))
data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(concat_from, vocabulary_size_from)
print('vocab from size: %d'%(vocabulary_size_from))
print('Most common words', count_from[4:10])
print('Sample data', data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])
print('filtered vocab size:',len(dictionary_from))
print("% of vocab used: {}%".format(round(len(dictionary_from)/vocabulary_size_from,4)*100))
concat_to = ' '.join(short_answers+answer_test).split()
vocabulary_size_to = len(list(set(concat_to)))
data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)
print('vocab from size: %d'%(vocabulary_size_to))
print('Most common words', count_to[4:10])
print('Sample data', data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])
print('filtered vocab size:',len(dictionary_to))
print("% of vocab used: {}%".format(round(len(dictionary_to)/vocabulary_size_to,4)*100))
GO = dictionary_from['GO']
PAD = dictionary_from['PAD']
EOS = dictionary_from['EOS']
UNK = dictionary_from['UNK']
for i in range(len(short_answers)):
short_answers[i] += ' EOS'
class Chatbot:
def __init__(self, size_layer, num_layers, embedded_size,
from_dict_size, to_dict_size, learning_rate,
batch_size, dropout = 0.5, beam_width = 15):
def lstm_cell(size, reuse=False):
return tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(),
reuse=reuse)
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None, None])
self.X_seq_len = tf.placeholder(tf.int32, [None])
self.Y_seq_len = tf.placeholder(tf.int32, [None])
# encoder
encoder_embeddings = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1))
encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = lstm_cell(size_layer // 2),
cell_bw = lstm_cell(size_layer // 2),
inputs = encoder_embedded,
sequence_length = self.X_seq_len,
dtype = tf.float32,
scope = 'bidirectional_rnn_%d'%(n))
encoder_embedded = tf.concat((out_fw, out_bw), 2)
bi_state_c = tf.concat((state_fw.c, state_bw.c), -1)
bi_state_h = tf.concat((state_fw.h, state_bw.h), -1)
bi_lstm_state = tf.nn.rnn_cell.LSTMStateTuple(c=bi_state_c, h=bi_state_h)
self.encoder_state = tuple([bi_lstm_state] * num_layers)
self.encoder_state = tuple(self.encoder_state[-1] for _ in range(num_layers))
main = tf.strided_slice(self.Y, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
# decoder
decoder_embeddings = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1))
decoder_cells = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(size_layer) for _ in range(num_layers)])
dense_layer = tf.layers.Dense(to_dict_size)
training_helper = tf.contrib.seq2seq.TrainingHelper(
inputs = tf.nn.embedding_lookup(decoder_embeddings, decoder_input),
sequence_length = self.Y_seq_len,
time_major = False)
training_decoder = tf.contrib.seq2seq.BasicDecoder(
cell = decoder_cells,
helper = training_helper,
initial_state = self.encoder_state,
output_layer = dense_layer)
training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = training_decoder,
impute_finished = True,
maximum_iterations = tf.reduce_max(self.Y_seq_len))
predicting_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding = decoder_embeddings,
start_tokens = tf.tile(tf.constant([GO], dtype=tf.int32), [batch_size]),
end_token = EOS)
predicting_decoder = tf.contrib.seq2seq.BasicDecoder(
cell = decoder_cells,
helper = predicting_helper,
initial_state = self.encoder_state,
output_layer = dense_layer)
predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = predicting_decoder,
impute_finished = True,
maximum_iterations = 2 * tf.reduce_max(self.X_seq_len))
self.training_logits = training_decoder_output.rnn_output
self.predicting_ids = predicting_decoder_output.sample_id
masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,
targets = self.Y,
weights = masks)
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)
size_layer = 256
num_layers = 2
embedded_size = 128
learning_rate = 0.001
batch_size = 16
epoch = 20
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Chatbot(size_layer, num_layers, embedded_size, len(dictionary_from),
len(dictionary_to), learning_rate,batch_size)
sess.run(tf.global_variables_initializer())
def str_idx(corpus, dic):
X = []
for i in corpus:
ints = []
for k in i.split():
try:
ints.append(dic[k])
except Exception as e:
print(e)
ints.append(UNK)
X.append(ints)
return X
X = str_idx(short_questions, dictionary_from)
Y = str_idx(short_answers, dictionary_to)
X_test = str_idx(question_test, dictionary_from)
Y_test = str_idx(answer_test, dictionary_from)
def pad_sentence_batch(sentence_batch, pad_int):
padded_seqs = []
seq_lens = []
max_sentence_len = max([len(sentence) for sentence in sentence_batch])
for sentence in sentence_batch:
padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
seq_lens.append(len(sentence))
return padded_seqs, seq_lens
def check_accuracy(logits, Y):
acc = 0
for i in range(logits.shape[0]):
internal_acc = 0
count = 0
for k in range(len(Y[i])):
try:
if Y[i][k] == logits[i][k]:
internal_acc += 1
count += 1
if Y[i][k] == EOS:
break
except:
break
acc += (internal_acc / count)
return acc / logits.shape[0]
for i in range(epoch):
total_loss, total_accuracy = 0, 0
for k in range(0, (len(short_questions) // batch_size) * batch_size, batch_size):
batch_x, seq_x = pad_sentence_batch(X[k: k+batch_size], PAD)
batch_y, seq_y = pad_sentence_batch(Y[k: k+batch_size], PAD)
predicted, loss, _ = sess.run([model.predicting_ids, model.cost, model.optimizer],
feed_dict={model.X:batch_x,
model.Y:batch_y,
model.X_seq_len:seq_x,
model.Y_seq_len:seq_y})
total_loss += loss
total_accuracy += check_accuracy(predicted,batch_y)
total_loss /= (len(short_questions) // batch_size)
total_accuracy /= (len(short_questions) // batch_size)
print('epoch: %d, avg loss: %f, avg accuracy: %f'%(i+1, total_loss, total_accuracy))
for i in range(len(batch_x)):
print('row %d'%(i+1))
print('QUESTION:',' '.join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0,1,2,3]]))
print('REAL ANSWER:',' '.join([rev_dictionary_to[n] for n in batch_y[i] if n not in[0,1,2,3]]))
print('PREDICTED ANSWER:',' '.join([rev_dictionary_to[n] for n in predicted[i] if n not in[0,1,2,3]]),'\n')
batch_x, seq_x = pad_sentence_batch(X_test[:batch_size], PAD)
batch_y, seq_y = pad_sentence_batch(Y_test[:batch_size], PAD)
predicted = sess.run(model.predicting_ids, feed_dict={model.X:batch_x,model.X_seq_len:seq_x})
for i in range(len(batch_x)):
print('row %d'%(i+1))
print('QUESTION:',' '.join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0,1,2,3]]))
print('REAL ANSWER:',' '.join([rev_dictionary_to[n] for n in batch_y[i] if n not in[0,1,2,3]]))
print('PREDICTED ANSWER:',' '.join([rev_dictionary_to[n] for n in predicted[i] if n not in[0,1,2,3]]),'\n')
| 0.311322 | 0.394784 |
# Kili Tutorial: Importing predictions
In this tutorial, we will show how to import predictions (pre-annotations) into Kili in order to help annotators and accelerate the whole annotation process. The goal of this tutorial is to illustrate some basic components and concepts of Kili in a simple way, but also to dive into the actual process of iteratively developing real applications in Kili.
Additionally:
For an overview of Kili, visit kili-technology.com You can also check out the Kili documentation https://kili-technology.github.io/kili-docs. Our goal is to export labels that can predict whether an image contains a Porsche or a Tesla.
The tutorial is divided into four parts:
1. Understanding the different types of labels
2. Understanding the data model of a label
3. Pushing predictions to Kili
4. Visualizing predictions in Kili
## 1. Understanding the different types of labels
A label is the annotation or combination of all annotations created on an asset. For example, all houses identified on the satellite image, or all the information annotated text on the document.
There are three categories of labels:
- **default**: an ordinary label, made by an annotator
- **prediction**: a pre-annotation, made by a model
- **review**: a check, carried out by a reviewer
When you export data (see [How to export labels](https://github.com/kili-technology/kili-playground/blob/master/recipes/export_labels.ipynb)), you can find out which category a label belongs to by looking at the field `labelType`. It can take the following values: `PREDICTION`, `DEFAULT`, `REVIEW`.
## 2. Understanding the data model of a label
Predictions under push in Kili in the form of python dictionaries. The format of the dictionary to be pushed depends on the type of data (text, image, audio), the machine learning task(s) the project is about (e.g. simple, multiple classification, transcription, named entity recognition, object detection, etc.) and their order. In summary, it depends on the JSON format that describes the interface of your annotation project.
The following cell shows you how to view this JSON. You need to update the credentials `email`, `password` and `project_id` before.
```
!pip install kili
from kili.authentication import KiliAuth
from kili.playground import Playground
email = 'YOUR EMAIL'
password = 'YOUR PASSWORD'
project_id = 'YOUR PROJECT ID'
kauth = KiliAuth(email=email, password=password)
playground = Playground(kauth)
project = playground.get_project(project_id=project_id)
assert 'jsonInterface' in project
print(project['jsonInterface'])
```
In the same way, we will visualize the data model of a label. To do this, make sure you have labeled at least one asset in Kili.
The following cell allows you to retrieve the first label in encounters.
```
assets = playground.get_assets(project_id=project_id)
for asset in assets:
labels = asset['labels']
if not labels or len(labels) == 0:
continue
label = labels[0]
print(label)
break
```
Taking the example of text classification task, here are the fields you will see:
- `createdAt`: Date of creation of the Label.
- `id`: Unique identifier of the Label in the Kili database.
- `jsonResponse`: List of label annotations. See detail lower.
- `JOB_ID_1`: First annotation task, as defined in the interface builder.
- `categories`: Category the Asset (text) belongs to.
- `name`: Name of the category.
- `confidence`: Category Confidence Index. The value is between 0 and 100 for a PREDICTION type Label (produced by a model). The value is always 100 for a Label created by a human.
- `labelType`: Type of label used to identify if the label is a prediction (made by a model), an ordinary label (made by an annotator) or a journal (carried out by a reviewer). Can take the following values PREDICTION, DEFAULT, REVIEW.
- `secondsToLabel`: Time spent creating this Label.
- `totalSecondsToLabel`: Time spent creating Labels for this Asset, all Labels combined.
For other types of tasks the fields will be the same except for the `jsonResponse` for which we follow more or less the data model of the Google APIs.
## 3. Pushing predictions to Kili
To make a prediction in Kili, you need 4 pieces of information:
- A project ID
- An external asset ID
- A model name (arbitrary)
- A jsonResponse, in the same format as the labels `jsonResponse` (see above)
This cell imports a prediction to the asset whose label we just saw.
```
externalId = asset['externalId']
model_name = 'v0.0.1'
json_response = label['jsonResponse']
playground.create_predictions(
project_id=project_id,
external_id_array=[externalId],
model_name_array=[model_name],
json_response_array=[json_response])
```
## 4. Visualizing predictions in Kili
To verify that the prediction in question was indeed pushed in Kili, you can go to https://cloud.kili-technology.com/label/projects/[PROJECT_ID]/dataset/labels?currentPage=1&pageSize=50. You should get a new `PREDICTION` line like this:

## Summary
In this tutorial, we accomplished the following:
We introduced the concept of Kili interface settings and the 3 different kind of labels (`DEFAULT`, `PREDICTION`, `REVIEW`). We demonstrated how to retrieve a `DEFAULT` label and how to create a `PREDICTION` label. If you enjoyed this tutorial, check out the other Recipes for other tutorials that you may find interesting, including demonstrations of how to use Kili.
You can also visit the Kili website or Kili documentation for more info!
|
github_jupyter
|
!pip install kili
from kili.authentication import KiliAuth
from kili.playground import Playground
email = 'YOUR EMAIL'
password = 'YOUR PASSWORD'
project_id = 'YOUR PROJECT ID'
kauth = KiliAuth(email=email, password=password)
playground = Playground(kauth)
project = playground.get_project(project_id=project_id)
assert 'jsonInterface' in project
print(project['jsonInterface'])
assets = playground.get_assets(project_id=project_id)
for asset in assets:
labels = asset['labels']
if not labels or len(labels) == 0:
continue
label = labels[0]
print(label)
break
externalId = asset['externalId']
model_name = 'v0.0.1'
json_response = label['jsonResponse']
playground.create_predictions(
project_id=project_id,
external_id_array=[externalId],
model_name_array=[model_name],
json_response_array=[json_response])
| 0.306735 | 0.985936 |
# Logistic Regression
Today we are going to learn about Logistic Regression as our first example of Classification. In order to get the best out of this class, I recommend reviewing the following topics:
- Linear Regression
- Cost Functions
- Regularization
- Cross Validation and Hyperparameter Tuning
The path we will take will be:
1. Define the classification problem statement
2. Introduce Logistic Regression from a perspective of solving this with Linear Regression
3. Introduce and Apply the Cross Entropy Cost function
4. Apply cross validation and hyperparameter tuning to tune regularization into Logistic Regression
Recall Classification is the branch of ML where we try to predict a **category** for a given input, instead of a real value. Let's get to an example!
```
# Normal setup and import cells with unimportant parts
import numpy as np
from sklearn import datasets
from sklearn.linear_model import LinearRegression, LogisticRegression
from scipy.special import expit
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.svm import l1_min_c
font = {'weight' : 'bold',
'size' : 22}
matplotlib.rc('font', **font)
SPECIES_MAP = {'Setosa': 0, "Versicolour": 1, "Virginica": 2}
# Helper methods to create our plots. This part is a lot of boilerplate code o =n using matplotlib that is not core to the lesson in question.
def add_legend(color_map=SPECIES_MAP, cm=plt.cm.coolwarm, **kwargs):
markers = [plt.Line2D([0,0],[0,0],color=cm(color*100), marker='o', linestyle='') for color in color_map.values()]
plt.legend(markers, color_map.keys(), numpoints=1, **kwargs)
def plot_with_coloured_labels_2d(x_data, y_data):
plt.figure(1, figsize=(20, 15))
plt.scatter(x_data[:, 0], x_data[:, 1], c=plt.cm.coolwarm(y_data*100), s=300)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.title("Species in the Iris Dataset by sepal length and width")
plt.plot([4.5], [3.0], marker='o', markersize=30, color="red")
plt.text(4.6, 2.9, s="Which species do we assign?", fontdict={"size": 15, "color": "red"})
add_legend()
plt.show()
def plot_with_coloured_labels_1d(x_data, y_data, threshold=False, color_map=SPECIES_MAP):
only_setosa = np.vectorize(lambda x: 0 if x == 0 else 1)(y_data)
plt.figure(1, figsize=(10, 5))
plt.scatter(x_data[:, 0], y=only_setosa, c=plt.cm.coolwarm(only_setosa*100), s=300)
plt.xlabel('Sepal length')
plt.title("Species in the Iris Dataset by sepal length")
plt.yticks([0,1,2])
markers = [plt.Line2D([0,0],[0,0],color=plt.cm.coolwarm(color*100), marker='o', linestyle='') for color in color_map.values() if color != 2]
plt.legend(markers, ['Setosa', 'Not Setosa'], numpoints=1)
plt.plot(4.5, 0, marker='o', markersize=30, color="red")
if threshold:
plt.plot([threshold_linear_reg, threshold_linear_reg], [0,2], color="violet")
plt.show()
def plot_with_coloured_labels_1d_and_reg(x_data, y_data, regression_model, threshold=False, color_map=SPECIES_MAP):
only_setosa = np.vectorize(lambda x: 0 if x == 0 else 1)(y_data)
plt.figure(1, figsize=(10, 10))
plt.scatter(x_data[:, 0], y=only_setosa, c=plt.cm.coolwarm(only_setosa*100), s=300)
plt.xlabel('Sepal length')
plt.title("Species in the Iris Dataset by sepal length")
markers = [plt.Line2D([0,0],[0,0],color=plt.cm.coolwarm(color*100), marker='o', linestyle='') for color in color_map.values() if color != 2]
plt.legend(markers, ['Setosa', 'Not Setosa'], numpoints=1)
plt.plot(4.5, 0, marker='o', markersize=30, color="red")
y_reg_pred = regression_model.predict(x_data[:, 0].reshape(-1, 1))
data = np.column_stack([x_data[:, 0], y_reg_pred])
plt.plot(data[:, 0], data[:, 1], color="yellow", label="Regression Line")
if threshold:
plt.plot([threshold_linear_reg, threshold_linear_reg], [0,2], color="violet")
plt.show()
def plot_cost_function_deduction(x_data, y_data, color_map=SPECIES_MAP):
only_setosa = np.vectorize(lambda x: 0 if x == 0 else 1)(y_data)
plt.figure(1, figsize=(20, 10))
plt.scatter(x_data[:, 0], y=only_setosa, c=plt.cm.coolwarm(only_setosa*100), s=300)
plt.xlabel('Sepal length')
plt.title("Species in the Iris Dataset by sepal length")
plt.yticks([0,1,2])
markers = [plt.Line2D([0,0],[0,0],color=plt.cm.coolwarm(color*100), marker='o', linestyle='') for color in color_map.values() if color != 2]
plt.legend(markers, ['Setosa', 'Not Setosa'], numpoints=1, loc=4)
plt.arrow(x=7, y=1, dx=0, dy=-0.1, length_includes_head=True, head_width=0.05, head_length=0.03)
plt.text(6, 0.82, s="This value should be penalized heavily\n if predicted Setosa (y=0)")
plt.arrow(x=4.2, y=0, dx=0, dy=0.1, length_includes_head=True, head_width=0.05, head_length=0.03)
plt.text(4.2, 0.11, s="This value should not be penalized\n if predicted Setosa (y=0)")
plt.show()
def plot_setosa_cost():
x = np.linspace(4,7.99)
y = -np.log((8-x)/4)
plt.plot(x,y)
plt.title("Cost function to predict Setosa correctly")
plt.show()
def plot_not_setosa_cost():
x = np.linspace(4.01,8)
y = -np.log((x-4)/4)
plt.plot(x,y)
plt.title("Cost function to predict Not Setosa correctly")
plt.show()
def plot_cost_function():
x = np.linspace(4.01,7.99)
y_0 = -np.log((8-x)/4)
y_1 = -np.log((x-4)/4)
plt.figure(1, figsize=(20, 10))
plt.plot(x,y_0, label="Cost function if model predicts Setosa (y=0)")
plt.plot(x, y_1, label="Cost function if model predicts Not Setosa (y=1)")
plt.title("Cost function for classification")
plt.legend(prop={'size': 14})
plt.show()
def plot_logistic():
x = np.linspace(-5, 5)
y = expit(x)
plt.plot(x, y)
plt.plot([0, 0], [0,0.5], color="violet", linestyle='dashed')
plt.plot([-5, 0], [0.5,0.5], color="violet", linestyle='dashed')
plt.title("Logistic Function")
plt.show()
def plot_model(x_data, logistic_reg_model):
plt.figure(1, figsize=(20, 15))
y_data = logistic_reg_model.predict(x_data)
plt.scatter(x_data[:, 0], x_data[:, 1], c=plt.cm.coolwarm(y_data*100), s=300)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.title("Classification of Iris Dataset by sepal length and width")
add_legend()
plt.show()
def plot_accuracies(inverse_proportion_outliers, accuracies, regularized_accuracies):
plt.figure(1, (10,10))
plt.plot(100/inverse_proportion_outliers, accuracies, label="Non-regularized model", color="b")
plt.plot(100/inverse_proportion_outliers, regularized_accuracies, label="Regularized model", color="r")
plt.xlabel("Proportion of Outliers")
plt.ylabel("Accuracy")
plt.legend()
plt.title("Evolution of accuracy with % of outliers")
plt.show()
def plot_gridsearch_cv(param_grid, results, scoring, adjustable_param="C"):
plt.figure(figsize=(20, 15))
plt.title("GridSearchCV evaluating using multiple scorers simultaneously",fontsize=16)
plt.xlabel(f"Inverse of regularization strength: {adjustable_param}")
plt.ylabel("Score")
plt.grid()
ax = plt.axes()
ax.set_xlim(0, param_grid[adjustable_param].max())
ax.set_ylim(0.0, 1)
# Get the regular numpy array from the MaskedArray
X_axis = np.array(results[f'param_{adjustable_param}'].data, dtype=float)
for scorer, color in zip(list(scoring.keys()), ['g', 'k', 'b']):
for sample, style in (('train', '--'), ('test', '-')):
sample_score_mean = -results['mean_%s_%s' % (sample, scorer)] if scoring[scorer]=='neg_log_loss' else results['mean_%s_%s' % (sample, scorer)]
sample_score_std = results['std_%s_%s' % (sample, scorer)]
ax.fill_between(X_axis, sample_score_mean - sample_score_std,
sample_score_mean + sample_score_std,
alpha=0.1 if sample == 'test' else 0, color=color)
ax.plot(X_axis, sample_score_mean, style, color=color,
alpha=1 if sample == 'test' else 0.7,
label="%s (%s)" % (scorer, sample))
best_index = np.nonzero(results['rank_test_%s' % scorer] == 1)[0][0]
best_score = -results['mean_test_%s' % scorer][best_index] if scoring[scorer]=='neg_log_loss' else results['mean_test_%s' % scorer][best_index]
# Plot a dotted vertical line at the best score for that scorer marked by x
ax.plot([X_axis[best_index], ] * 2, [0, best_score],
linestyle='-.', color=color, marker='x', markeredgewidth=3, ms=8)
# Annotate the best score for that scorer
ax.annotate("%0.2f" % best_score,
(X_axis[best_index], best_score + 0.005))
plt.legend(loc="best")
plt.grid('off')
plt.show()
def get_heatmap(matrix, labels):
plt.figure(1, (10,10))
sns.heatmap(matrix, annot=True, cmap="RdYlGn", xticklabels=labels, yticklabels=labels)
plt.title("Normalized confusion matrix of each classes predictions", fontdict={"size":16})
plt.show()
```
The problem becomes, given the following dataset of flowers (a famous dataset called *Iris* dataset), if we spot a new Iris flower in the red dot, which species should we assign? it would be clever to have a model that outputs Setosa, since that is the species of the sorrounding flowers.
```
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features - sepal length and sepal width.
Y = iris.target
plot_with_coloured_labels_2d(X,Y)
```
### How would **you** think of a solution to this problem?
## Simplify the problem
Now the problem is stated, let's think of a solution to a simplified problem. Let's only use the Sepal length for now and let's predict only one class [Setosa]!
```
plot_with_coloured_labels_1d(X,Y)
```
We need some value of Sepal Length such that any value below it would be Setosa, and it woudl be not if it's greater. One way to do it using what we know, is Linear Regression: If it returns a value greater than, say 0.5, then it is not Setosa, let's try it!
```
# Exercise 1: Build a Linear Regression model to fit the first feature of the dataset X (Sepal Length)
reg = None # Change None with fitted model (1/2 lines)
# Solution 1 :
#reg = LinearRegression().fit(X[:, 0].reshape(-1, 1), np.vectorize(lambda x: 0 if x == 0 else 1)(Y))
if reg is not None:
plot_with_coloured_labels_1d_and_reg(X, Y, regression_model=reg)
```
To find such threshold value, we only need to solve the equation: \\begin{equation*} A*X + b = 0.5 \end{equation*}.
```
if reg is not None:
# Get parameters of Model
slope = reg.coef_
intercept = reg.intercept_
# Solve X such that AX + b = 0.5
threshold_linear_reg = np.roots([slope, intercept - 0.5])[0]
print(f"The threshold by linear regression would be {threshold_linear_reg}")
plot_with_coloured_labels_1d_and_reg(X,Y, regression_model=reg, threshold=threshold_linear_reg)
```
It would look like this alternative has some sense into it, however, **what problems do you think we could have?**
## Another approach
One issue we have is , by how Linear Regression works, the model will try to reduce the distance between the two extreme points and the line. Therefore, we are not trying to make the maximum number of correct classifications within the training set! What if we try to maximize that measure instead? How would that cost function look like?
```
plot_cost_function_deduction(X,Y)
```
So, making a cost function that predicts Setosa correctly should be of the following form:
```
plot_setosa_cost()
```
This is because values with short sepal length would get small cost, while high sepal values would imply a high cost.
As we have seen previously from Linear Regression, this makes the optimizer choose a model that opts to assign high sepal length to other classes rather than *Setosa*. Now what we need is the other way around, to force a high value to avoid Setosa and be other one. It should have the following form:
```
plot_not_setosa_cost()
```
This way, if we combine this two by adding them, we end up with a cost function that could optimise what we want!
```
plot_cost_function()
```
This cost function is called **Log Loss / Cross entropy / Log Likelihood** and will be the cost function of our new classification model.
Now we only need to think of the best model!
## Finding the Model
Another issue Linear Regression had, was that it doesn't "know" of these classes, it only tries to predict a value to be a function of the features! It would be great to have a way of using the classes information and then, from that, output a **probability** of getting a class! How do you think we can do that?
From math there is a great trick to do this using a function called the **logistic function** that outputs values between 0 and 1
```
plot_logistic()
```
The logistic regression is a model that composes the logistic function with the linear regression to output a **probability** of be of class y=1 to a given input *x*: \\begin{equation*} y = logistic(A*x + b) \end{equation*}.
Let's fit a logistic regression model.
```
logreg = LogisticRegression() # Just like Linear Regression, it is on the same linear_model package!
only_setosas = np.vectorize(lambda x: 0 if x == 0 else 1)(Y) # We collapse values of Y 1 and 2 to 1 so we predict only 1 class
logreg.fit(X[:,0].reshape(-1, 1), only_setosas)
X_test = np.array([5, 8])
y_pred = logreg.predict(X_test.reshape(-1, 1))
predictions = [{0: "Setosa", 1: "Not Setosa"}.get(value) for value in y_pred]
print(f"The class predicted for Sepal length {X_test} cm is {predictions}")
```
Excellent! Let's do a new exercise and create a Logistic Regression model to predict any of the three classes by Sepal length, let's then plot how the predictions work.
```
logistic_model = LogisticRegression()
logistic_model.fit(X[:, 0].reshape(-1,1), Y)
test_sepal_length = np.linspace(3, 8)
predicted_classes = logistic_model.predict(test_sepal_length.reshape(-1, 1))
plt.scatter(test_sepal_length, predicted_classes, c=plt.cm.coolwarm(predicted_classes*100))
add_legend(loc=4, prop={"size": 12})
plt.show()
```
## Putting everything together
Now that we have a new model, and a new cost function it uses, let's try to plot how the new model classifies and let's get a metric of how good it is.
```
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
# Exercise 2: Build a Logistic Regression model on the Train set and evaluate accuracy on test set. [Hint: inspect accuracy_score]
logreg = None
y_pred = None
# Solution E2:
# logreg = LogisticRegression().fit(X_train, y_train)
# y_pred = logreg.predict(X_test)
if y_pred is not None:
print('Train/Test split results:\n')
print(logreg.__class__.__name__+" accuracy is %2.3f" % accuracy_score(y_test, y_pred))
plot_model(X, logistic_reg_model=logreg)
```
Luckily for us Scikit learn already has a common *classification_report* method to obtain all this metrics, including accuracy precision, recall and f1_score of each class. Also, we can plot the confusion matrix to get, for each class, which ones are more easily confused by the model.
```
if y_pred is not None:
target_names = ['Setosa', 'Versicolour', 'Virginica']
print(classification_report(y_test, y_pred, target_names=target_names))
resulted_confusion_matrix = confusion_matrix(y_true=y_test, y_pred=y_pred, labels=[0,1,2], normalize='true')
get_heatmap(matrix=resulted_confusion_matrix, labels=target_names)
```
### How would you interpret this results?
Recall that we define:
Accuracy: How many examples were correctly classified?
Precision: Out of the predicted as positive, how many were truly positive?
Recall: Out of the truly positives, how many were predicted as positive?
F1 Score: Geometric mean between Precision and Recall to analyze only 1 metric.
## Logistic Regression with Regularization
Usually, when working in Machine Learning, we need our models not only to be accurate but also *robust*. This means that they are not affected by outliers. Let's see how outliers may affect our model!
```
proportions = np.linspace(20, 10, num=10)
accuracies = []
regularized_accuracies = []
for proportion_of_outliers in proportions:
# Build the train and test sets with outliers
X_outliers = np.random.rand(int(len(X)/proportion_of_outliers), 2)*3 + 6
y_outliers = np.random.randint(0, 2, size=int(len(X)/proportion_of_outliers))
new_X = np.vstack([X, X_outliers])
new_Y = np.append(Y, y_outliers)
X_train, X_test, y_train, y_test = train_test_split(new_X, new_Y, test_size=0.2, random_state=42)
# Get the accuracy for a normal LG model
model = LogisticRegression(random_state=42)
model.fit(X_train, y_train)
y_pred_normal=model.predict(X_test)
accuracies.append(accuracy_score(y_true=y_test, y_pred=y_pred_normal))
# Get the accuracy for a regularized LG model
regularized_model = LogisticRegression(C=0.6, random_state=42)
regularized_model.fit(X_train, y_train)
y_pred_regularized=regularized_model.predict(X_test)
regularized_accuracies.append(accuracy_score(y_true=y_test, y_pred=y_pred_regularized))
plot_accuracies(proportions, accuracies=accuracies, regularized_accuracies=regularized_accuracies)
```
Regularization usually helps a bit to make our model more robust, however if applied to strongly it can go the other way. Do you imagine why? Let's see how the accuracy evolves when regularization gets stronger (C closer to 0)
```
model = LogisticRegression(penalty='l1',
solver='liblinear',
tol=1e-6,
max_iter=int(1e6),
warm_start=True,
intercept_scaling=10000.,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
cs = np.linspace(l1_min_c(X_train, y_train, loss='log'), 1, 100) # Get regularization values form the minimum possible up to 1
accuracies = []
for c in cs:
# Calculate accuracy
model.set_params(C=c)
model.fit(X_train, y_train)
y_pred=model.predict(X_test)
accuracies.append(accuracy_score(y_true=y_test, y_pred=y_pred))
plt.figure(1, (10, 10))
plt.plot(cs, accuracies)
plt.xlabel("Inverse of regularization (C)")
plt.ylabel("Accuracy")
plt.title("Accuracy as regularization decreases")
```
### Which do you think is the sweet spot in terms of regularization?
## Optimizing the model with CV and Hyperparameter Tuning
However, we may ask ourselves, can we do better? Usually the default parameters are almost always wrong! How can we find the **best** parameters? Let's perform hyperparameter tuning on this model to get a better one!
```
param_grid = {'C': np.arange(l1_min_c(X, Y, loss='log'), 3, 0.1)} # We will try to tune the regularization parameter C.
scoring = {'Accuracy': 'accuracy', 'Log_loss': 'neg_log_loss'} # And report for plotting the accuracy and cost
gs = GridSearchCV(LogisticRegression(), # Use Logistic Regression
return_train_score=True,
param_grid=param_grid, scoring=scoring,
cv=5, # Perform train test splits of 20%
refit='Accuracy') # Finally output a trained model with the best accuracy
gs.fit(X, Y)
results = gs.cv_results_
print('='*20)
print("best params: " + str(gs.best_estimator_))
print("best params: " + str(gs.best_params_))
print('best score:', gs.best_score_)
print('='*20)
plot_gridsearch_cv(param_grid=param_grid, results=results, scoring=scoring)
# Exercise 3: Perform Cross Validation and Hyiperparameter Tuning to select the best l1_ratio, setting the solver as 'saga', penalty as 'elasticnet', warm_start as False and n_iter as 10000
scoring = {'Accuracy': 'accuracy', 'Log_loss': 'neg_log_loss'}
param_grid = None
gs = None
# Solution E3:
# param_grid = {'l1_ratio': np.arange(l1_min_c(X, Y, loss='log'), 1, 0.1)}
# gs = GridSearchCV(LogisticRegression(penalty='elasticnet', solver='saga', max_iter=10000, warm_start=False), return_train_score=True,
# param_grid=param_grid, scoring=scoring, cv=5, refit='Accuracy')
# gs.fit(X, Y)
if gs is not None:
results = gs.cv_results_
print('='*20)
print("best params: " + str(gs.best_estimator_))
print("best params: " + str(gs.best_params_))
print('best score:', gs.best_score_)
print('='*20)
plot_gridsearch_cv(param_grid=param_grid, results=results, scoring=scoring, adjustable_param="l1_ratio")
```
## Questions to think about later:
- All the examples we used in this notebook are **linearly separable**, how could we use Logistic Regression in non-linearly separable settings?
- If we used a dataset with many more features, how could we test which ones are more useful for a Logistic Regression Model?
- The fact that extreme outliers have extreme cost for any class, eventually affects the optimal classes since the optimization algorithm will try to minimise the overall cost. This creates a biased and less robust model since we are not at the optima. How do you think we can fix that in the Cross Entropy cost function?
## Key Takeaways and Tips
- Logistic Regression is a classic classification model that predicts probabilities of classes given an input
- It uses the Cross Entropy cost function to maximize correct labels
- Tuning the regularization parameter is very beneficial to make our model more robust
- GridSearchCV has a great interface to do so in a couple of lines
|
github_jupyter
|
# Normal setup and import cells with unimportant parts
import numpy as np
from sklearn import datasets
from sklearn.linear_model import LinearRegression, LogisticRegression
from scipy.special import expit
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.svm import l1_min_c
font = {'weight' : 'bold',
'size' : 22}
matplotlib.rc('font', **font)
SPECIES_MAP = {'Setosa': 0, "Versicolour": 1, "Virginica": 2}
# Helper methods to create our plots. This part is a lot of boilerplate code o =n using matplotlib that is not core to the lesson in question.
def add_legend(color_map=SPECIES_MAP, cm=plt.cm.coolwarm, **kwargs):
markers = [plt.Line2D([0,0],[0,0],color=cm(color*100), marker='o', linestyle='') for color in color_map.values()]
plt.legend(markers, color_map.keys(), numpoints=1, **kwargs)
def plot_with_coloured_labels_2d(x_data, y_data):
plt.figure(1, figsize=(20, 15))
plt.scatter(x_data[:, 0], x_data[:, 1], c=plt.cm.coolwarm(y_data*100), s=300)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.title("Species in the Iris Dataset by sepal length and width")
plt.plot([4.5], [3.0], marker='o', markersize=30, color="red")
plt.text(4.6, 2.9, s="Which species do we assign?", fontdict={"size": 15, "color": "red"})
add_legend()
plt.show()
def plot_with_coloured_labels_1d(x_data, y_data, threshold=False, color_map=SPECIES_MAP):
only_setosa = np.vectorize(lambda x: 0 if x == 0 else 1)(y_data)
plt.figure(1, figsize=(10, 5))
plt.scatter(x_data[:, 0], y=only_setosa, c=plt.cm.coolwarm(only_setosa*100), s=300)
plt.xlabel('Sepal length')
plt.title("Species in the Iris Dataset by sepal length")
plt.yticks([0,1,2])
markers = [plt.Line2D([0,0],[0,0],color=plt.cm.coolwarm(color*100), marker='o', linestyle='') for color in color_map.values() if color != 2]
plt.legend(markers, ['Setosa', 'Not Setosa'], numpoints=1)
plt.plot(4.5, 0, marker='o', markersize=30, color="red")
if threshold:
plt.plot([threshold_linear_reg, threshold_linear_reg], [0,2], color="violet")
plt.show()
def plot_with_coloured_labels_1d_and_reg(x_data, y_data, regression_model, threshold=False, color_map=SPECIES_MAP):
only_setosa = np.vectorize(lambda x: 0 if x == 0 else 1)(y_data)
plt.figure(1, figsize=(10, 10))
plt.scatter(x_data[:, 0], y=only_setosa, c=plt.cm.coolwarm(only_setosa*100), s=300)
plt.xlabel('Sepal length')
plt.title("Species in the Iris Dataset by sepal length")
markers = [plt.Line2D([0,0],[0,0],color=plt.cm.coolwarm(color*100), marker='o', linestyle='') for color in color_map.values() if color != 2]
plt.legend(markers, ['Setosa', 'Not Setosa'], numpoints=1)
plt.plot(4.5, 0, marker='o', markersize=30, color="red")
y_reg_pred = regression_model.predict(x_data[:, 0].reshape(-1, 1))
data = np.column_stack([x_data[:, 0], y_reg_pred])
plt.plot(data[:, 0], data[:, 1], color="yellow", label="Regression Line")
if threshold:
plt.plot([threshold_linear_reg, threshold_linear_reg], [0,2], color="violet")
plt.show()
def plot_cost_function_deduction(x_data, y_data, color_map=SPECIES_MAP):
only_setosa = np.vectorize(lambda x: 0 if x == 0 else 1)(y_data)
plt.figure(1, figsize=(20, 10))
plt.scatter(x_data[:, 0], y=only_setosa, c=plt.cm.coolwarm(only_setosa*100), s=300)
plt.xlabel('Sepal length')
plt.title("Species in the Iris Dataset by sepal length")
plt.yticks([0,1,2])
markers = [plt.Line2D([0,0],[0,0],color=plt.cm.coolwarm(color*100), marker='o', linestyle='') for color in color_map.values() if color != 2]
plt.legend(markers, ['Setosa', 'Not Setosa'], numpoints=1, loc=4)
plt.arrow(x=7, y=1, dx=0, dy=-0.1, length_includes_head=True, head_width=0.05, head_length=0.03)
plt.text(6, 0.82, s="This value should be penalized heavily\n if predicted Setosa (y=0)")
plt.arrow(x=4.2, y=0, dx=0, dy=0.1, length_includes_head=True, head_width=0.05, head_length=0.03)
plt.text(4.2, 0.11, s="This value should not be penalized\n if predicted Setosa (y=0)")
plt.show()
def plot_setosa_cost():
x = np.linspace(4,7.99)
y = -np.log((8-x)/4)
plt.plot(x,y)
plt.title("Cost function to predict Setosa correctly")
plt.show()
def plot_not_setosa_cost():
x = np.linspace(4.01,8)
y = -np.log((x-4)/4)
plt.plot(x,y)
plt.title("Cost function to predict Not Setosa correctly")
plt.show()
def plot_cost_function():
x = np.linspace(4.01,7.99)
y_0 = -np.log((8-x)/4)
y_1 = -np.log((x-4)/4)
plt.figure(1, figsize=(20, 10))
plt.plot(x,y_0, label="Cost function if model predicts Setosa (y=0)")
plt.plot(x, y_1, label="Cost function if model predicts Not Setosa (y=1)")
plt.title("Cost function for classification")
plt.legend(prop={'size': 14})
plt.show()
def plot_logistic():
x = np.linspace(-5, 5)
y = expit(x)
plt.plot(x, y)
plt.plot([0, 0], [0,0.5], color="violet", linestyle='dashed')
plt.plot([-5, 0], [0.5,0.5], color="violet", linestyle='dashed')
plt.title("Logistic Function")
plt.show()
def plot_model(x_data, logistic_reg_model):
plt.figure(1, figsize=(20, 15))
y_data = logistic_reg_model.predict(x_data)
plt.scatter(x_data[:, 0], x_data[:, 1], c=plt.cm.coolwarm(y_data*100), s=300)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.title("Classification of Iris Dataset by sepal length and width")
add_legend()
plt.show()
def plot_accuracies(inverse_proportion_outliers, accuracies, regularized_accuracies):
plt.figure(1, (10,10))
plt.plot(100/inverse_proportion_outliers, accuracies, label="Non-regularized model", color="b")
plt.plot(100/inverse_proportion_outliers, regularized_accuracies, label="Regularized model", color="r")
plt.xlabel("Proportion of Outliers")
plt.ylabel("Accuracy")
plt.legend()
plt.title("Evolution of accuracy with % of outliers")
plt.show()
def plot_gridsearch_cv(param_grid, results, scoring, adjustable_param="C"):
plt.figure(figsize=(20, 15))
plt.title("GridSearchCV evaluating using multiple scorers simultaneously",fontsize=16)
plt.xlabel(f"Inverse of regularization strength: {adjustable_param}")
plt.ylabel("Score")
plt.grid()
ax = plt.axes()
ax.set_xlim(0, param_grid[adjustable_param].max())
ax.set_ylim(0.0, 1)
# Get the regular numpy array from the MaskedArray
X_axis = np.array(results[f'param_{adjustable_param}'].data, dtype=float)
for scorer, color in zip(list(scoring.keys()), ['g', 'k', 'b']):
for sample, style in (('train', '--'), ('test', '-')):
sample_score_mean = -results['mean_%s_%s' % (sample, scorer)] if scoring[scorer]=='neg_log_loss' else results['mean_%s_%s' % (sample, scorer)]
sample_score_std = results['std_%s_%s' % (sample, scorer)]
ax.fill_between(X_axis, sample_score_mean - sample_score_std,
sample_score_mean + sample_score_std,
alpha=0.1 if sample == 'test' else 0, color=color)
ax.plot(X_axis, sample_score_mean, style, color=color,
alpha=1 if sample == 'test' else 0.7,
label="%s (%s)" % (scorer, sample))
best_index = np.nonzero(results['rank_test_%s' % scorer] == 1)[0][0]
best_score = -results['mean_test_%s' % scorer][best_index] if scoring[scorer]=='neg_log_loss' else results['mean_test_%s' % scorer][best_index]
# Plot a dotted vertical line at the best score for that scorer marked by x
ax.plot([X_axis[best_index], ] * 2, [0, best_score],
linestyle='-.', color=color, marker='x', markeredgewidth=3, ms=8)
# Annotate the best score for that scorer
ax.annotate("%0.2f" % best_score,
(X_axis[best_index], best_score + 0.005))
plt.legend(loc="best")
plt.grid('off')
plt.show()
def get_heatmap(matrix, labels):
plt.figure(1, (10,10))
sns.heatmap(matrix, annot=True, cmap="RdYlGn", xticklabels=labels, yticklabels=labels)
plt.title("Normalized confusion matrix of each classes predictions", fontdict={"size":16})
plt.show()
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features - sepal length and sepal width.
Y = iris.target
plot_with_coloured_labels_2d(X,Y)
plot_with_coloured_labels_1d(X,Y)
# Exercise 1: Build a Linear Regression model to fit the first feature of the dataset X (Sepal Length)
reg = None # Change None with fitted model (1/2 lines)
# Solution 1 :
#reg = LinearRegression().fit(X[:, 0].reshape(-1, 1), np.vectorize(lambda x: 0 if x == 0 else 1)(Y))
if reg is not None:
plot_with_coloured_labels_1d_and_reg(X, Y, regression_model=reg)
if reg is not None:
# Get parameters of Model
slope = reg.coef_
intercept = reg.intercept_
# Solve X such that AX + b = 0.5
threshold_linear_reg = np.roots([slope, intercept - 0.5])[0]
print(f"The threshold by linear regression would be {threshold_linear_reg}")
plot_with_coloured_labels_1d_and_reg(X,Y, regression_model=reg, threshold=threshold_linear_reg)
plot_cost_function_deduction(X,Y)
plot_setosa_cost()
plot_not_setosa_cost()
plot_cost_function()
plot_logistic()
logreg = LogisticRegression() # Just like Linear Regression, it is on the same linear_model package!
only_setosas = np.vectorize(lambda x: 0 if x == 0 else 1)(Y) # We collapse values of Y 1 and 2 to 1 so we predict only 1 class
logreg.fit(X[:,0].reshape(-1, 1), only_setosas)
X_test = np.array([5, 8])
y_pred = logreg.predict(X_test.reshape(-1, 1))
predictions = [{0: "Setosa", 1: "Not Setosa"}.get(value) for value in y_pred]
print(f"The class predicted for Sepal length {X_test} cm is {predictions}")
logistic_model = LogisticRegression()
logistic_model.fit(X[:, 0].reshape(-1,1), Y)
test_sepal_length = np.linspace(3, 8)
predicted_classes = logistic_model.predict(test_sepal_length.reshape(-1, 1))
plt.scatter(test_sepal_length, predicted_classes, c=plt.cm.coolwarm(predicted_classes*100))
add_legend(loc=4, prop={"size": 12})
plt.show()
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
# Exercise 2: Build a Logistic Regression model on the Train set and evaluate accuracy on test set. [Hint: inspect accuracy_score]
logreg = None
y_pred = None
# Solution E2:
# logreg = LogisticRegression().fit(X_train, y_train)
# y_pred = logreg.predict(X_test)
if y_pred is not None:
print('Train/Test split results:\n')
print(logreg.__class__.__name__+" accuracy is %2.3f" % accuracy_score(y_test, y_pred))
plot_model(X, logistic_reg_model=logreg)
if y_pred is not None:
target_names = ['Setosa', 'Versicolour', 'Virginica']
print(classification_report(y_test, y_pred, target_names=target_names))
resulted_confusion_matrix = confusion_matrix(y_true=y_test, y_pred=y_pred, labels=[0,1,2], normalize='true')
get_heatmap(matrix=resulted_confusion_matrix, labels=target_names)
proportions = np.linspace(20, 10, num=10)
accuracies = []
regularized_accuracies = []
for proportion_of_outliers in proportions:
# Build the train and test sets with outliers
X_outliers = np.random.rand(int(len(X)/proportion_of_outliers), 2)*3 + 6
y_outliers = np.random.randint(0, 2, size=int(len(X)/proportion_of_outliers))
new_X = np.vstack([X, X_outliers])
new_Y = np.append(Y, y_outliers)
X_train, X_test, y_train, y_test = train_test_split(new_X, new_Y, test_size=0.2, random_state=42)
# Get the accuracy for a normal LG model
model = LogisticRegression(random_state=42)
model.fit(X_train, y_train)
y_pred_normal=model.predict(X_test)
accuracies.append(accuracy_score(y_true=y_test, y_pred=y_pred_normal))
# Get the accuracy for a regularized LG model
regularized_model = LogisticRegression(C=0.6, random_state=42)
regularized_model.fit(X_train, y_train)
y_pred_regularized=regularized_model.predict(X_test)
regularized_accuracies.append(accuracy_score(y_true=y_test, y_pred=y_pred_regularized))
plot_accuracies(proportions, accuracies=accuracies, regularized_accuracies=regularized_accuracies)
model = LogisticRegression(penalty='l1',
solver='liblinear',
tol=1e-6,
max_iter=int(1e6),
warm_start=True,
intercept_scaling=10000.,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
cs = np.linspace(l1_min_c(X_train, y_train, loss='log'), 1, 100) # Get regularization values form the minimum possible up to 1
accuracies = []
for c in cs:
# Calculate accuracy
model.set_params(C=c)
model.fit(X_train, y_train)
y_pred=model.predict(X_test)
accuracies.append(accuracy_score(y_true=y_test, y_pred=y_pred))
plt.figure(1, (10, 10))
plt.plot(cs, accuracies)
plt.xlabel("Inverse of regularization (C)")
plt.ylabel("Accuracy")
plt.title("Accuracy as regularization decreases")
param_grid = {'C': np.arange(l1_min_c(X, Y, loss='log'), 3, 0.1)} # We will try to tune the regularization parameter C.
scoring = {'Accuracy': 'accuracy', 'Log_loss': 'neg_log_loss'} # And report for plotting the accuracy and cost
gs = GridSearchCV(LogisticRegression(), # Use Logistic Regression
return_train_score=True,
param_grid=param_grid, scoring=scoring,
cv=5, # Perform train test splits of 20%
refit='Accuracy') # Finally output a trained model with the best accuracy
gs.fit(X, Y)
results = gs.cv_results_
print('='*20)
print("best params: " + str(gs.best_estimator_))
print("best params: " + str(gs.best_params_))
print('best score:', gs.best_score_)
print('='*20)
plot_gridsearch_cv(param_grid=param_grid, results=results, scoring=scoring)
# Exercise 3: Perform Cross Validation and Hyiperparameter Tuning to select the best l1_ratio, setting the solver as 'saga', penalty as 'elasticnet', warm_start as False and n_iter as 10000
scoring = {'Accuracy': 'accuracy', 'Log_loss': 'neg_log_loss'}
param_grid = None
gs = None
# Solution E3:
# param_grid = {'l1_ratio': np.arange(l1_min_c(X, Y, loss='log'), 1, 0.1)}
# gs = GridSearchCV(LogisticRegression(penalty='elasticnet', solver='saga', max_iter=10000, warm_start=False), return_train_score=True,
# param_grid=param_grid, scoring=scoring, cv=5, refit='Accuracy')
# gs.fit(X, Y)
if gs is not None:
results = gs.cv_results_
print('='*20)
print("best params: " + str(gs.best_estimator_))
print("best params: " + str(gs.best_params_))
print('best score:', gs.best_score_)
print('='*20)
plot_gridsearch_cv(param_grid=param_grid, results=results, scoring=scoring, adjustable_param="l1_ratio")
| 0.783492 | 0.954605 |
```
import gym
import random
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from collections import deque
print("Gym:", gym.__version__)
print("Tensorflow:", tf.__version__)
env_name = 'CartPole-v0'
env = gym.make(env_name)
print("Observation space:", env.observation_space)
print("Action space:", env.action_space)
class QNetwork():
def __init__(self, state_dim, action_size, tau=0.01):
tf.reset_default_graph()
self.state_in = tf.placeholder(tf.float32, shape=[None, *state_dim])
self.action_in = tf.placeholder(tf.int32, shape=[None])
self.q_target_in = tf.placeholder(tf.float32, shape=[None])
action_one_hot = tf.one_hot(self.action_in, depth=action_size)
self.q_state_local = self.build_model(action_size, "local")
self.q_state_target = self.build_model(action_size, "target")
self.q_state_action = tf.reduce_sum(tf.multiply(self.q_state_local, action_one_hot), axis=1)
self.loss = tf.reduce_mean(tf.square(self.q_state_action - self.q_target_in))
self.optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(self.loss)
self.local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="local")
self.target_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="target")
self.updater = tf.group([tf.assign(t, t+tau*(l-t)) for t, l in zip(self.target_vars, self.local_vars)])
def build_model(self, action_size, scope):
with tf.variable_scope(scope):
hidden1 = tf.layers.dense(self.state_in, 100, activation=tf.nn.relu)
q_state = tf.layers.dense(hidden1, action_size, activation=None)
return q_state
def update_model(self, session, state, action, q_target):
feed = {self.state_in: state, self.action_in: action, self.q_target_in: q_target}
session.run([self.optimizer, self.updater], feed_dict=feed)
def get_q_state(self, session, state, use_target=False):
q_state_op = self.q_state_target if use_target else self.q_state_local
q_state = session.run(q_state_op, feed_dict={self.state_in: state})
return q_state
class ReplayBuffer():
def __init__(self, maxlen):
self.buffer = deque(maxlen=maxlen)
def add(self, experience):
self.buffer.append(experience)
def sample(self, batch_size):
sample_size = min(len(self.buffer), batch_size)
samples = random.choices(self.buffer, k=sample_size)
return map(list, zip(*samples))
class DDQNAgent():
def __init__(self, env):
self.state_dim = env.observation_space.shape
self.action_size = env.action_space.n
self.q_network = QNetwork(self.state_dim, self.action_size)
self.replay_buffer = ReplayBuffer(maxlen=10000)
self.gamma = 0.97
self.eps = 1.0
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def get_action(self, state):
q_state = self.q_network.get_q_state(self.sess, [state])
action_greedy = np.argmax(q_state)
action_random = np.random.randint(self.action_size)
action = action_random if random.random() < self.eps else action_greedy
return action
def train(self, state, action, next_state, reward, done, use_DDQN=True):
self.replay_buffer.add((state, action, next_state, reward, done))
states, actions, next_states, rewards, dones = self.replay_buffer.sample(50)
next_actions = np.argmax(self.q_network.get_q_state(self.sess, next_states, use_target=False), axis=1)
q_next_states = self.q_network.get_q_state(self.sess, next_states, use_target=use_DDQN)
q_next_states[dones] = np.zeros([self.action_size])
q_next_states_next_actions = q_next_states[np.arange(next_actions.shape[0]), next_actions]
q_targets = rewards + self.gamma * q_next_states_next_actions
self.q_network.update_model(self.sess, states, actions, q_targets)
if done:
self.eps = max(0.1, 0.99*self.eps)
def __del__(self):
self.sess.close()
num_runs = 6
run_rewards = []
for n in range(num_runs):
print("Run {}".format(n))
ep_rewards = []
agent = None
agent = DDQNAgent(env)
num_episodes = 200
for ep in range(num_episodes):
state = env.reset()
total_reward = 0
done = False
while not done:
action = agent.get_action(state)
next_state, reward, done, info = env.step(action)
agent.train(state, action, next_state, reward, done, use_DDQN=(n%2==0))
# env.render()
total_reward += reward
state = next_state
ep_rewards.append(total_reward)
# print("Episode: {}, total_reward: {:.2f}".format(ep, total_reward))
run_rewards.append(ep_rewards)
env.close()
import matplotlib.pyplot as plt
%matplotlib inline
for n, ep_rewards in enumerate(run_rewards):
x = range(len(ep_rewards))
cumsum = np.cumsum(ep_rewards)
avgs = [cumsum[ep]/(ep+1) if ep<100 else (cumsum[ep]-cumsum[ep-100])/100 for ep in x]
col = 'r' if (n%2==0) else 'b'
plt.plot(x, avgs, color=col, label=n)
plt.title("DDQN vs DQN performance")
plt.xlabel("Episode")
plt.ylabel("Last 100 episode average rewards")
plt.legend()
```
|
github_jupyter
|
import gym
import random
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from collections import deque
print("Gym:", gym.__version__)
print("Tensorflow:", tf.__version__)
env_name = 'CartPole-v0'
env = gym.make(env_name)
print("Observation space:", env.observation_space)
print("Action space:", env.action_space)
class QNetwork():
def __init__(self, state_dim, action_size, tau=0.01):
tf.reset_default_graph()
self.state_in = tf.placeholder(tf.float32, shape=[None, *state_dim])
self.action_in = tf.placeholder(tf.int32, shape=[None])
self.q_target_in = tf.placeholder(tf.float32, shape=[None])
action_one_hot = tf.one_hot(self.action_in, depth=action_size)
self.q_state_local = self.build_model(action_size, "local")
self.q_state_target = self.build_model(action_size, "target")
self.q_state_action = tf.reduce_sum(tf.multiply(self.q_state_local, action_one_hot), axis=1)
self.loss = tf.reduce_mean(tf.square(self.q_state_action - self.q_target_in))
self.optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(self.loss)
self.local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="local")
self.target_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="target")
self.updater = tf.group([tf.assign(t, t+tau*(l-t)) for t, l in zip(self.target_vars, self.local_vars)])
def build_model(self, action_size, scope):
with tf.variable_scope(scope):
hidden1 = tf.layers.dense(self.state_in, 100, activation=tf.nn.relu)
q_state = tf.layers.dense(hidden1, action_size, activation=None)
return q_state
def update_model(self, session, state, action, q_target):
feed = {self.state_in: state, self.action_in: action, self.q_target_in: q_target}
session.run([self.optimizer, self.updater], feed_dict=feed)
def get_q_state(self, session, state, use_target=False):
q_state_op = self.q_state_target if use_target else self.q_state_local
q_state = session.run(q_state_op, feed_dict={self.state_in: state})
return q_state
class ReplayBuffer():
def __init__(self, maxlen):
self.buffer = deque(maxlen=maxlen)
def add(self, experience):
self.buffer.append(experience)
def sample(self, batch_size):
sample_size = min(len(self.buffer), batch_size)
samples = random.choices(self.buffer, k=sample_size)
return map(list, zip(*samples))
class DDQNAgent():
def __init__(self, env):
self.state_dim = env.observation_space.shape
self.action_size = env.action_space.n
self.q_network = QNetwork(self.state_dim, self.action_size)
self.replay_buffer = ReplayBuffer(maxlen=10000)
self.gamma = 0.97
self.eps = 1.0
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def get_action(self, state):
q_state = self.q_network.get_q_state(self.sess, [state])
action_greedy = np.argmax(q_state)
action_random = np.random.randint(self.action_size)
action = action_random if random.random() < self.eps else action_greedy
return action
def train(self, state, action, next_state, reward, done, use_DDQN=True):
self.replay_buffer.add((state, action, next_state, reward, done))
states, actions, next_states, rewards, dones = self.replay_buffer.sample(50)
next_actions = np.argmax(self.q_network.get_q_state(self.sess, next_states, use_target=False), axis=1)
q_next_states = self.q_network.get_q_state(self.sess, next_states, use_target=use_DDQN)
q_next_states[dones] = np.zeros([self.action_size])
q_next_states_next_actions = q_next_states[np.arange(next_actions.shape[0]), next_actions]
q_targets = rewards + self.gamma * q_next_states_next_actions
self.q_network.update_model(self.sess, states, actions, q_targets)
if done:
self.eps = max(0.1, 0.99*self.eps)
def __del__(self):
self.sess.close()
num_runs = 6
run_rewards = []
for n in range(num_runs):
print("Run {}".format(n))
ep_rewards = []
agent = None
agent = DDQNAgent(env)
num_episodes = 200
for ep in range(num_episodes):
state = env.reset()
total_reward = 0
done = False
while not done:
action = agent.get_action(state)
next_state, reward, done, info = env.step(action)
agent.train(state, action, next_state, reward, done, use_DDQN=(n%2==0))
# env.render()
total_reward += reward
state = next_state
ep_rewards.append(total_reward)
# print("Episode: {}, total_reward: {:.2f}".format(ep, total_reward))
run_rewards.append(ep_rewards)
env.close()
import matplotlib.pyplot as plt
%matplotlib inline
for n, ep_rewards in enumerate(run_rewards):
x = range(len(ep_rewards))
cumsum = np.cumsum(ep_rewards)
avgs = [cumsum[ep]/(ep+1) if ep<100 else (cumsum[ep]-cumsum[ep-100])/100 for ep in x]
col = 'r' if (n%2==0) else 'b'
plt.plot(x, avgs, color=col, label=n)
plt.title("DDQN vs DQN performance")
plt.xlabel("Episode")
plt.ylabel("Last 100 episode average rewards")
plt.legend()
| 0.671578 | 0.280629 |
SOP034 - Wait for BDC to be Healthy
===================================
Blocks until the Big Data Cluster is healthy, or the specified timeout
expires.
The min\_pod\_count parameter indicates that the health check will not
pass until at least this number of pods exists in the cluster. If any
existing pods beyond this limit are unhealthy, the cluster is not
healthy.
Steps
-----
### Parameters
```
timeout = 600 # amount of time to wait before cluster is healthy: default to 10 minutes
check_interval = 5 # amount of time between health checks - default 5 seconds
min_pod_count = 10 # minimum number of healthy pods required to assert health
```
### Instantiate Kubernetes client
```
# Instantiate the Python Kubernetes client into 'api' variable
import os
try:
from kubernetes import client, config
from kubernetes.stream import stream
if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ:
config.load_incluster_config()
else:
try:
config.load_kube_config()
except:
display(Markdown(f'HINT: Use [TSG112 - App-Deploy Proxy Nginx Logs](../log-analyzers/tsg112-get-approxy-nginx-logs.ipynb) to resolve this issue.'))
raise
api = client.CoreV1Api()
print('Kubernetes client instantiated')
except ImportError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.'))
raise
```
### Get the namespace for the big data cluster
Get the namespace of the Big Data Cluster from the Kuberenetes API.
**NOTE:**
If there is more than one Big Data Cluster in the target Kubernetes
cluster, then either:
- set \[0\] to the correct value for the big data cluster.
- set the environment variable AZDATA\_NAMESPACE, before starting
Azure Data Studio.
```
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = api.list_namespace(label_selector='MSSQL_CLUSTER').items[0].metadata.name
except IndexError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print('The kubernetes namespace for your big data cluster is: ' + namespace)
```
### Define functions
```
import threading
import time
import sys
isRunning = True
def all_containers_ready(pod):
"""helper method returns true if all the containers within the given pod are ready
Arguments:
pod {v1Pod} -- Metadata retrieved from the api call to.
"""
return all(map(lambda c: c.ready is True, pod.status.container_statuses))
def pod_is_ready(pod):
"""tests that the pod, and all containers are ready
Arguments:
pod {v1Pod} -- Metadata retrieved from api call.
"""
return pod.status.phase == "Running" and all_containers_ready(pod)
def waitReady():
"""Waits for all pods, and containers to become ready.
"""
while isRunning:
try:
pods = None
if namespace is not None:
display("Checking namespace {0}".format(namespace))
pods = api.list_namespaced_pod(namespace, _request_timeout=30)
else:
display("Checking all namespaces".format(namespace))
pods = api.list_pod_for_all_namespaces(_request_timeout=30)
allReady = len(pods.items) > min_pod_count and all(map(pod_is_ready, pods.items))
if allReady:
cluster_healthy = True
return True
else:
display("cluster not healthy, rechecking in {0} seconds.".format(check_interval))
time.sleep(check_interval)
except:
last_error_message = str(sys.exc_info())
display(last_error_message)
time.sleep(check_interval)
print("Functions defined")
```
### Wait for cluster to become healthy
```
mt = threading.Thread(target=waitReady)
mt.start()
mt.join(timeout=timeout)
if mt.isAlive():
raise SystemExit("Timeout waiting for pods to become ready.")
else:
display("Cluster is healthy")
isRunning = False
print('Notebook execution complete.')
```
|
github_jupyter
|
timeout = 600 # amount of time to wait before cluster is healthy: default to 10 minutes
check_interval = 5 # amount of time between health checks - default 5 seconds
min_pod_count = 10 # minimum number of healthy pods required to assert health
# Instantiate the Python Kubernetes client into 'api' variable
import os
try:
from kubernetes import client, config
from kubernetes.stream import stream
if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ:
config.load_incluster_config()
else:
try:
config.load_kube_config()
except:
display(Markdown(f'HINT: Use [TSG112 - App-Deploy Proxy Nginx Logs](../log-analyzers/tsg112-get-approxy-nginx-logs.ipynb) to resolve this issue.'))
raise
api = client.CoreV1Api()
print('Kubernetes client instantiated')
except ImportError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.'))
raise
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = api.list_namespace(label_selector='MSSQL_CLUSTER').items[0].metadata.name
except IndexError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print('The kubernetes namespace for your big data cluster is: ' + namespace)
import threading
import time
import sys
isRunning = True
def all_containers_ready(pod):
"""helper method returns true if all the containers within the given pod are ready
Arguments:
pod {v1Pod} -- Metadata retrieved from the api call to.
"""
return all(map(lambda c: c.ready is True, pod.status.container_statuses))
def pod_is_ready(pod):
"""tests that the pod, and all containers are ready
Arguments:
pod {v1Pod} -- Metadata retrieved from api call.
"""
return pod.status.phase == "Running" and all_containers_ready(pod)
def waitReady():
"""Waits for all pods, and containers to become ready.
"""
while isRunning:
try:
pods = None
if namespace is not None:
display("Checking namespace {0}".format(namespace))
pods = api.list_namespaced_pod(namespace, _request_timeout=30)
else:
display("Checking all namespaces".format(namespace))
pods = api.list_pod_for_all_namespaces(_request_timeout=30)
allReady = len(pods.items) > min_pod_count and all(map(pod_is_ready, pods.items))
if allReady:
cluster_healthy = True
return True
else:
display("cluster not healthy, rechecking in {0} seconds.".format(check_interval))
time.sleep(check_interval)
except:
last_error_message = str(sys.exc_info())
display(last_error_message)
time.sleep(check_interval)
print("Functions defined")
mt = threading.Thread(target=waitReady)
mt.start()
mt.join(timeout=timeout)
if mt.isAlive():
raise SystemExit("Timeout waiting for pods to become ready.")
else:
display("Cluster is healthy")
isRunning = False
print('Notebook execution complete.')
| 0.466603 | 0.814828 |
# Pytorch-Struct
[](https://travis-ci.org/harvardnlp/pytorch-struct)
[](https://coveralls.io/github/harvardnlp/pytorch-struct?branch=master)
<p align="center">
<img src="https://github.com/harvardnlp/pytorch-struct/raw/master/download.png">
</p>
A library of tested, GPU implementations of core structured prediction algorithms for deep learning applications.
(or an implementation of <a href="https://www.cs.jhu.edu/~jason/papers/eisner.spnlp16.pdf">Inside-Outside and Forward-Backward Algorithms Are Just Backprop"<a/>)
## Getting Started
```
!pip install -qU git+https://github.com/harvardnlp/pytorch-struct
!pip install -q matplotlib
import torch
from torch_struct import DepTree, LinearChain, MaxSemiring, SampledSemiring
import matplotlib.pyplot as plt
def show(x): plt.imshow(x.detach())
# Make some data.
vals = torch.zeros(2, 10, 10) + 1e-5
vals[:, :5, :5] = torch.rand(5)
vals[:, 5:, 5:] = torch.rand(5)
vals = vals.log()
show(vals[0])
# Compute marginals
marginals = DepTree().marginals(vals)
show(marginals[0])
# Compute argmax
argmax = DepTree(MaxSemiring).marginals(vals)
show(argmax.detach()[0])
# Compute scoring and enumeration (forward / inside)
log_partition = DepTree().sum(vals)
max_score = DepTree(MaxSemiring).sum(vals)
max_score = DepTree().score(argmax, vals)
# Compute samples
sample = DepTree(SampledSemiring).marginals(vals)
show(sample.detach()[0])
# Padding/Masking built into library.
marginals = DepTree().marginals(
vals,
lengths=torch.tensor([10, 7]))
show(marginals[0])
plt.show()
show(marginals[1])
# Many other structured prediction approaches
chain = torch.zeros(2, 10, 10, 10) + 1e-5
chain[:, :, :, :] = vals.unsqueeze(-1).exp()
chain[:, :, :, :] += torch.eye(10, 10).view(1, 1, 10, 10)
chain[:, 0, :, 0] = 1
chain[:, -1,9, :] = 1
chain = chain.log()
marginals = LinearChain().marginals(chain)
show(marginals.detach()[0].sum(-1))
```
## Library
Current algorithms implemented:
* Linear Chain (CRF / HMM)
* Semi-Markov (CRF / HSMM)
* Dependency Parsing (Projective and Non-Projective)
* CKY (CFG)
* Integration with `torchtext` and `pytorch-transformers`
Design Strategy:
1) Minimal implementatations. Most are 10 lines.
2) Batched for GPU.
3) Code can be ported to other backends
Semirings:
* Log Marginals
* Max and MAP computation
* Sampling through specialized backprop
## Examples
* BERT <a href="https://github.com/harvardnlp/pytorch-struct/blob/master/notebooks/BertTagger.ipynb">Part-of-Speech</a>
* BERT <a href="https://github.com/harvardnlp/pytorch-struct/blob/master/notebooks/BertDependencies.ipynb">Dependency Parsing</a>
* Unsupervised Learning (to come)
* Structured VAE (to come)
|
github_jupyter
|
!pip install -qU git+https://github.com/harvardnlp/pytorch-struct
!pip install -q matplotlib
import torch
from torch_struct import DepTree, LinearChain, MaxSemiring, SampledSemiring
import matplotlib.pyplot as plt
def show(x): plt.imshow(x.detach())
# Make some data.
vals = torch.zeros(2, 10, 10) + 1e-5
vals[:, :5, :5] = torch.rand(5)
vals[:, 5:, 5:] = torch.rand(5)
vals = vals.log()
show(vals[0])
# Compute marginals
marginals = DepTree().marginals(vals)
show(marginals[0])
# Compute argmax
argmax = DepTree(MaxSemiring).marginals(vals)
show(argmax.detach()[0])
# Compute scoring and enumeration (forward / inside)
log_partition = DepTree().sum(vals)
max_score = DepTree(MaxSemiring).sum(vals)
max_score = DepTree().score(argmax, vals)
# Compute samples
sample = DepTree(SampledSemiring).marginals(vals)
show(sample.detach()[0])
# Padding/Masking built into library.
marginals = DepTree().marginals(
vals,
lengths=torch.tensor([10, 7]))
show(marginals[0])
plt.show()
show(marginals[1])
# Many other structured prediction approaches
chain = torch.zeros(2, 10, 10, 10) + 1e-5
chain[:, :, :, :] = vals.unsqueeze(-1).exp()
chain[:, :, :, :] += torch.eye(10, 10).view(1, 1, 10, 10)
chain[:, 0, :, 0] = 1
chain[:, -1,9, :] = 1
chain = chain.log()
marginals = LinearChain().marginals(chain)
show(marginals.detach()[0].sum(-1))
| 0.794982 | 0.969699 |
### 1 导入包
```
from collections import defaultdict
import matplotlib.pyplot as plt
# 常用包
import numpy as np
import pandas as pd
import seaborn as sns
# 导入torch
import torch
import torch.nn.functional as F
from pylab import rcParams
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
from torch import nn
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
# 导入transformers
from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup
%matplotlib inline
%config InlineBackend.figure_format='retina' # 主题
sns.set(style='whitegrid', palette='muted', font_scale=1.2)
HAPPY_COLORS_PALETTE = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#ADFF02", "#8F00FF"]
sns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE))
rcParams['figure.figsize'] = 12, 8
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
torch.cuda.is_available()
```
### 2 加载数据
```
with open('data/train_dataset_v2.tsv', 'r', encoding='utf-8') as handler:
lines = handler.read().split('\n')[1:-1]
data = list()
for line in tqdm(lines):
sp = line.split('\t')
if len(sp) != 4:
print("ERROR:", sp)
continue
data.append(sp)
train = pd.DataFrame(data)
train.columns = ['id', 'content', 'character', 'emotions']
test = pd.read_csv('data/test_dataset.tsv', sep='\t')
submit = pd.read_csv('data/submit_example.tsv', sep='\t')
train = train[train['emotions'] != '']
```
### 3 数据预处理处理
### 3.1 文本拼接
```
train['text'] = train[ 'content'].astype(str) +'角色: ' + train['character'].astype(str)
test['text'] = test['content'].astype(str) + ' 角色: ' + test['character'].astype(str)
```
### 3.2 标签转换
```
train['emotions'] = train['emotions'].apply(lambda x: [int(_i) for _i in x.split(',')])
train[['love', 'joy', 'fright', 'anger', 'fear', 'sorrow']] = train['emotions'].values.tolist()
test[['love', 'joy', 'fright', 'anger', 'fear', 'sorrow']] =[0,0,0,0,0,0]
train['love'].value_counts()
train.head()
```
### 4 加载Tokenizer
> Byte Pair Encoding(BPE)/WordPiece
### 4.1 tokenizer
```
PRE_TRAINED_MODEL_NAME = 'hfl/chinese-roberta-wwm-ext'
tokenizer = BertTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME)
```
### 4.2 选取文本最大长度
```
token_lens = []
for txt in tqdm(train.text):
tokens = tokenizer.encode(txt, max_length=512)
token_lens.append(len(tokens))
sns.distplot(token_lens)
plt.xlim([0, 256]);
plt.xlabel('Token count');
```
可以看到大多数文本的ids长度在300我们设置最大长度为300
```
pd.Series(token_lens).describe()
MAX_LEN=128 # 这里我们暂时选定128
```
### 5 构建学术论文数据集
```
target_cols=['love', 'joy', 'fright', 'anger', 'fear', 'sorrow']
```
### 5.1 自定义数据集
```
class RoleDataset(Dataset):
def __init__(self,texts,labels,tokenizer,max_len):
self.texts=texts
self.labels=labels
self.tokenizer=tokenizer
self.max_len=max_len
def __len__(self):
return len(self.texts)
def __getitem__(self,item):
"""
item 为数据索引,迭代取第item条数据
"""
text=str(self.texts[item])
label=self.labels[item]
encoding=self.tokenizer.encode_plus(
text,
add_special_tokens=True,
max_length=self.max_len,
return_token_type_ids=True,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors='pt',
)
# print(encoding['input_ids'])
sample = {
'texts': text,
'input_ids': encoding['input_ids'].flatten(),
'attention_mask': encoding['attention_mask'].flatten()
}
for label_col in target_cols:
sample[label_col] = torch.tensor(label[label_col], dtype=torch.float)
return sample
```
### 5.2 划分数据集并创建生成器
```
df_train, df_val = train_test_split(train, test_size=0.1, random_state=RANDOM_SEED)
def create_data_loader(df,tokenizer,max_len,batch_size):
ds=RoleDataset(
texts=df['text'].values,
labels=df[target_cols].to_dict('records'),
tokenizer=tokenizer,
max_len=max_len
)
return DataLoader(
ds,
batch_size=batch_size,
# num_workers=4 # windows多线程
)
BATCH_SIZE = 16
train_data_loader = create_data_loader(df_train, tokenizer, MAX_LEN, BATCH_SIZE)
val_data_loader = create_data_loader(df_val, tokenizer, MAX_LEN, BATCH_SIZE)
# test_data_loader = create_data_loader(df_test, tokenizer, MAX_LEN, BATCH_SIZE)
data = next(iter(train_data_loader))
data.keys()
print(data['input_ids'].shape)
print(data['attention_mask'].shape)
print(data['love'].shape)
```
## 6 多目标回归模型构建
```
bert_model = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME)
```
### 定义分类模型
```
class EmotionClassifier(nn.Module):
def __init__(self, n_classes):
super(EmotionClassifier, self).__init__()
self.bert = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME)
self.out_love = nn.Linear(self.bert.config.hidden_size, n_classes)
self.out_joy = nn.Linear(self.bert.config.hidden_size, n_classes)
self.out_fright = nn.Linear(self.bert.config.hidden_size, n_classes)
self.out_anger = nn.Linear(self.bert.config.hidden_size, n_classes)
self.out_fear = nn.Linear(self.bert.config.hidden_size, n_classes)
self.out_sorrow = nn.Linear(self.bert.config.hidden_size, n_classes)
def forward(self, input_ids, attention_mask):
_, pooled_output = self.bert(
input_ids=input_ids,
attention_mask=attention_mask,
return_dict = False
)
love = self.out_love(pooled_output)
joy = self.out_joy(pooled_output)
fright = self.out_fright(pooled_output)
anger = self.out_anger(pooled_output)
fear = self.out_fear(pooled_output)
sorrow = self.out_sorrow(pooled_output)
return {
'love': love, 'joy': joy, 'fright': fright,
'anger': anger, 'fear': fear, 'sorrow': sorrow,
}
# class_names=train.love.unique()
class_names=[1]
model = EmotionClassifier(len(class_names))
model = model.to(device)
```
## 7 模型训练
```
EPOCHS = 1 # 训练轮数
optimizer = AdamW(model.parameters(), lr=3e-5, correct_bias=False)
total_steps = len(train_data_loader) * EPOCHS
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=total_steps
)
loss_fn = nn.MSELoss().to(device)
def train_epoch(
model,
data_loader,
criterion,
optimizer,
device,
scheduler,
n_examples
):
model = model.train()
losses = []
correct_predictions = 0
for sample in tqdm(data_loader):
input_ids = sample["input_ids"].to(device)
attention_mask = sample["attention_mask"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
loss_love = criterion(outputs['love'], sample['love'].to(device))
loss_joy = criterion(outputs['joy'], sample['joy'].to(device))
loss_fright = criterion(outputs['fright'], sample['fright'].to(device))
loss_anger = criterion(outputs['anger'], sample['anger'].to(device))
loss_fear = criterion(outputs['fear'], sample['fear'].to(device))
loss_sorrow = criterion(outputs['sorrow'], sample['sorrow'].to(device))
loss = loss_love + loss_joy + loss_fright + loss_anger + loss_fear + loss_sorrow
losses.append(loss.item())
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
# return correct_predictions.double() / (n_examples*6), np.mean(losses)
return np.mean(losses)
def eval_model(model, data_loader, criterion, device, n_examples):
model = model.eval() # 验证预测模式
losses = []
correct_predictions = 0
with torch.no_grad():
for sample in tqdm(data_loader):
input_ids = sample["input_ids"].to(device)
attention_mask = sample["attention_mask"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
loss_love = criterion(outputs['love'], sample['love'].to(device))
loss_joy = criterion(outputs['joy'], sample['joy'].to(device))
loss_fright = criterion(outputs['fright'], sample['fright'].to(device))
loss_anger = criterion(outputs['anger'], sample['anger'].to(device))
loss_fear = criterion(outputs['fear'], sample['fear'].to(device))
loss_sorrow = criterion(outputs['sorrow'], sample['sorrow'].to(device))
loss = loss_love + loss_joy + loss_fright + loss_anger + loss_fear + loss_sorrow
losses.append(loss.item())
return np.mean(losses)
history = defaultdict(list) # 记录10轮loss和acc
best_loss = float('inf')
for epoch in range(EPOCHS):
print(f'Epoch {epoch + 1}/{EPOCHS}')
print('-' * 10)
# train_loss = train_epoch(
# model,
# train_data_loader,
# loss_fn,
# optimizer,
# device,
# scheduler,
# len(df_train)
# )
print(f'Train loss {train_loss}')
val_loss = eval_model(
model,
val_data_loader,
loss_fn,
device,
len(df_val)
)
print(f'Val loss {val_loss} ')
print()
history['train_loss'].append(train_loss)
history['val_loss'].append(val_loss)
if val_loss < best_loss:
torch.save(model.state_dict(), 'best_model_state.bin')
```
## 8 模型预测
```
test_data_loader = create_data_loader(test, tokenizer, MAX_LEN, BATCH_SIZE)
def predict(model):
val_loss = 0
test_pred = defaultdict(list)
model.eval()
for step, batch in tqdm(enumerate(test_data_loader)):
b_input_ids = batch['input_ids'].to(device)
b_attention_mask = batch['attention_mask'].to(device)
with torch.no_grad():
logits = model(input_ids=b_input_ids, attention_mask=b_attention_mask)
for col in target_cols:
test_pred[col].append(logits[col].to('cpu').numpy())
preds = {}
for col in target_cols:
print(len(np.concatenate(test_pred[col])))
preds[col] = (np.concatenate(test_pred[col]))
return preds
submit = pd.read_csv('data/submit_example.tsv', sep='\t')
best_model = EmotionClassifier(len(class_names))
path = f'best_model_state.bin'
best_model.load_state_dict(torch.load(path))
best_model.to(device)
test_pred = predict(best_model)
label_preds = []
for col in target_cols:
preds = test_pred[col]
label_preds.append(preds.flatten())
sub = submit.copy()
sub['emotion'] = np.stack(label_preds, axis=1).tolist()
sub['emotion'] = sub['emotion'].apply(lambda x: ','.join([str(i) for i in x]))
sub.head()
sub.to_csv(f'baseline.tsv', sep='\t', index=False)
# 0.67+
```
## ChallengeHub
分享人:致Great ChallengeHub成员 中国人民大学硕士

关注回复**“爱奇艺”** 获取本notebook
|
github_jupyter
|
from collections import defaultdict
import matplotlib.pyplot as plt
# 常用包
import numpy as np
import pandas as pd
import seaborn as sns
# 导入torch
import torch
import torch.nn.functional as F
from pylab import rcParams
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
from torch import nn
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
# 导入transformers
from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup
%matplotlib inline
%config InlineBackend.figure_format='retina' # 主题
sns.set(style='whitegrid', palette='muted', font_scale=1.2)
HAPPY_COLORS_PALETTE = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#ADFF02", "#8F00FF"]
sns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE))
rcParams['figure.figsize'] = 12, 8
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
torch.cuda.is_available()
with open('data/train_dataset_v2.tsv', 'r', encoding='utf-8') as handler:
lines = handler.read().split('\n')[1:-1]
data = list()
for line in tqdm(lines):
sp = line.split('\t')
if len(sp) != 4:
print("ERROR:", sp)
continue
data.append(sp)
train = pd.DataFrame(data)
train.columns = ['id', 'content', 'character', 'emotions']
test = pd.read_csv('data/test_dataset.tsv', sep='\t')
submit = pd.read_csv('data/submit_example.tsv', sep='\t')
train = train[train['emotions'] != '']
train['text'] = train[ 'content'].astype(str) +'角色: ' + train['character'].astype(str)
test['text'] = test['content'].astype(str) + ' 角色: ' + test['character'].astype(str)
train['emotions'] = train['emotions'].apply(lambda x: [int(_i) for _i in x.split(',')])
train[['love', 'joy', 'fright', 'anger', 'fear', 'sorrow']] = train['emotions'].values.tolist()
test[['love', 'joy', 'fright', 'anger', 'fear', 'sorrow']] =[0,0,0,0,0,0]
train['love'].value_counts()
train.head()
PRE_TRAINED_MODEL_NAME = 'hfl/chinese-roberta-wwm-ext'
tokenizer = BertTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME)
token_lens = []
for txt in tqdm(train.text):
tokens = tokenizer.encode(txt, max_length=512)
token_lens.append(len(tokens))
sns.distplot(token_lens)
plt.xlim([0, 256]);
plt.xlabel('Token count');
pd.Series(token_lens).describe()
MAX_LEN=128 # 这里我们暂时选定128
target_cols=['love', 'joy', 'fright', 'anger', 'fear', 'sorrow']
class RoleDataset(Dataset):
def __init__(self,texts,labels,tokenizer,max_len):
self.texts=texts
self.labels=labels
self.tokenizer=tokenizer
self.max_len=max_len
def __len__(self):
return len(self.texts)
def __getitem__(self,item):
"""
item 为数据索引,迭代取第item条数据
"""
text=str(self.texts[item])
label=self.labels[item]
encoding=self.tokenizer.encode_plus(
text,
add_special_tokens=True,
max_length=self.max_len,
return_token_type_ids=True,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors='pt',
)
# print(encoding['input_ids'])
sample = {
'texts': text,
'input_ids': encoding['input_ids'].flatten(),
'attention_mask': encoding['attention_mask'].flatten()
}
for label_col in target_cols:
sample[label_col] = torch.tensor(label[label_col], dtype=torch.float)
return sample
df_train, df_val = train_test_split(train, test_size=0.1, random_state=RANDOM_SEED)
def create_data_loader(df,tokenizer,max_len,batch_size):
ds=RoleDataset(
texts=df['text'].values,
labels=df[target_cols].to_dict('records'),
tokenizer=tokenizer,
max_len=max_len
)
return DataLoader(
ds,
batch_size=batch_size,
# num_workers=4 # windows多线程
)
BATCH_SIZE = 16
train_data_loader = create_data_loader(df_train, tokenizer, MAX_LEN, BATCH_SIZE)
val_data_loader = create_data_loader(df_val, tokenizer, MAX_LEN, BATCH_SIZE)
# test_data_loader = create_data_loader(df_test, tokenizer, MAX_LEN, BATCH_SIZE)
data = next(iter(train_data_loader))
data.keys()
print(data['input_ids'].shape)
print(data['attention_mask'].shape)
print(data['love'].shape)
bert_model = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME)
class EmotionClassifier(nn.Module):
def __init__(self, n_classes):
super(EmotionClassifier, self).__init__()
self.bert = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME)
self.out_love = nn.Linear(self.bert.config.hidden_size, n_classes)
self.out_joy = nn.Linear(self.bert.config.hidden_size, n_classes)
self.out_fright = nn.Linear(self.bert.config.hidden_size, n_classes)
self.out_anger = nn.Linear(self.bert.config.hidden_size, n_classes)
self.out_fear = nn.Linear(self.bert.config.hidden_size, n_classes)
self.out_sorrow = nn.Linear(self.bert.config.hidden_size, n_classes)
def forward(self, input_ids, attention_mask):
_, pooled_output = self.bert(
input_ids=input_ids,
attention_mask=attention_mask,
return_dict = False
)
love = self.out_love(pooled_output)
joy = self.out_joy(pooled_output)
fright = self.out_fright(pooled_output)
anger = self.out_anger(pooled_output)
fear = self.out_fear(pooled_output)
sorrow = self.out_sorrow(pooled_output)
return {
'love': love, 'joy': joy, 'fright': fright,
'anger': anger, 'fear': fear, 'sorrow': sorrow,
}
# class_names=train.love.unique()
class_names=[1]
model = EmotionClassifier(len(class_names))
model = model.to(device)
EPOCHS = 1 # 训练轮数
optimizer = AdamW(model.parameters(), lr=3e-5, correct_bias=False)
total_steps = len(train_data_loader) * EPOCHS
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=total_steps
)
loss_fn = nn.MSELoss().to(device)
def train_epoch(
model,
data_loader,
criterion,
optimizer,
device,
scheduler,
n_examples
):
model = model.train()
losses = []
correct_predictions = 0
for sample in tqdm(data_loader):
input_ids = sample["input_ids"].to(device)
attention_mask = sample["attention_mask"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
loss_love = criterion(outputs['love'], sample['love'].to(device))
loss_joy = criterion(outputs['joy'], sample['joy'].to(device))
loss_fright = criterion(outputs['fright'], sample['fright'].to(device))
loss_anger = criterion(outputs['anger'], sample['anger'].to(device))
loss_fear = criterion(outputs['fear'], sample['fear'].to(device))
loss_sorrow = criterion(outputs['sorrow'], sample['sorrow'].to(device))
loss = loss_love + loss_joy + loss_fright + loss_anger + loss_fear + loss_sorrow
losses.append(loss.item())
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
# return correct_predictions.double() / (n_examples*6), np.mean(losses)
return np.mean(losses)
def eval_model(model, data_loader, criterion, device, n_examples):
model = model.eval() # 验证预测模式
losses = []
correct_predictions = 0
with torch.no_grad():
for sample in tqdm(data_loader):
input_ids = sample["input_ids"].to(device)
attention_mask = sample["attention_mask"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
loss_love = criterion(outputs['love'], sample['love'].to(device))
loss_joy = criterion(outputs['joy'], sample['joy'].to(device))
loss_fright = criterion(outputs['fright'], sample['fright'].to(device))
loss_anger = criterion(outputs['anger'], sample['anger'].to(device))
loss_fear = criterion(outputs['fear'], sample['fear'].to(device))
loss_sorrow = criterion(outputs['sorrow'], sample['sorrow'].to(device))
loss = loss_love + loss_joy + loss_fright + loss_anger + loss_fear + loss_sorrow
losses.append(loss.item())
return np.mean(losses)
history = defaultdict(list) # 记录10轮loss和acc
best_loss = float('inf')
for epoch in range(EPOCHS):
print(f'Epoch {epoch + 1}/{EPOCHS}')
print('-' * 10)
# train_loss = train_epoch(
# model,
# train_data_loader,
# loss_fn,
# optimizer,
# device,
# scheduler,
# len(df_train)
# )
print(f'Train loss {train_loss}')
val_loss = eval_model(
model,
val_data_loader,
loss_fn,
device,
len(df_val)
)
print(f'Val loss {val_loss} ')
print()
history['train_loss'].append(train_loss)
history['val_loss'].append(val_loss)
if val_loss < best_loss:
torch.save(model.state_dict(), 'best_model_state.bin')
test_data_loader = create_data_loader(test, tokenizer, MAX_LEN, BATCH_SIZE)
def predict(model):
val_loss = 0
test_pred = defaultdict(list)
model.eval()
for step, batch in tqdm(enumerate(test_data_loader)):
b_input_ids = batch['input_ids'].to(device)
b_attention_mask = batch['attention_mask'].to(device)
with torch.no_grad():
logits = model(input_ids=b_input_ids, attention_mask=b_attention_mask)
for col in target_cols:
test_pred[col].append(logits[col].to('cpu').numpy())
preds = {}
for col in target_cols:
print(len(np.concatenate(test_pred[col])))
preds[col] = (np.concatenate(test_pred[col]))
return preds
submit = pd.read_csv('data/submit_example.tsv', sep='\t')
best_model = EmotionClassifier(len(class_names))
path = f'best_model_state.bin'
best_model.load_state_dict(torch.load(path))
best_model.to(device)
test_pred = predict(best_model)
label_preds = []
for col in target_cols:
preds = test_pred[col]
label_preds.append(preds.flatten())
sub = submit.copy()
sub['emotion'] = np.stack(label_preds, axis=1).tolist()
sub['emotion'] = sub['emotion'].apply(lambda x: ','.join([str(i) for i in x]))
sub.head()
sub.to_csv(f'baseline.tsv', sep='\t', index=False)
# 0.67+
| 0.637257 | 0.748007 |

<table align="center">
<td align="center"><a target="_blank" href="https://deeplearning.mit.edu">
<img src="https://deeplearning.mit.edu/files/images/github/icon_mit.png" style="padding-bottom:5px;" />
Visit MIT Deep Learning</a></td>
<td align="center"><a target="_blank" href="http://colab.research.google.com/github/lexfridman/mit-deep-learning/blob/master/tutorial_deep_learning_basics/deep_learning_basics.ipynb">
<img src="https://deeplearning.mit.edu/files/images/github/icon_google_colab.png" style="padding-bottom:5px;" />Run in Google Colab</a></td>
<td align="center"><a target="_blank" href="https://github.com/lexfridman/mit-deep-learning/blob/master/tutorial_deep_learning_basics/deep_learning_basics.ipynb">
<img src="https://deeplearning.mit.edu/files/images/github/icon_github.png" style="padding-bottom:5px;" />View Source on GitHub</a></td>
<td align="center"><a target="_blank" align="center" href="https://www.youtube.com/watch?v=O5xeyoRL95U&list=PLrAXtmErZgOeiKm4sgNOknGvNjby9efdf">
<img src="https://deeplearning.mit.edu/files/images/github/icon_youtube.png" style="padding-bottom:5px;" />Watch YouTube Videos</a></td>
<!-- <td><a target="_blank" href="link">
<img src="image" />text</a></td> -->
</table>
# ディープラーニングの基本
このチュートリアルは、MIT Deep Learningの一部として提供される[Deep Learning Basicsの講義](https://www.youtube.com/watch?list=PLrAXtmErZgOeiKm4sgNOknGvNjby9efdf&v=O5xeyoRL95U)に付属しています。
YouTubeでビデオを見ることができます。
[](https://www.youtube.com/watch?list=PLrAXtmErZgOeiKm4sgNOknGvNjby9efdf&v=O5xeyoRL95U)
このチュートリアルでは、ディープラーニングにおける7つの重要なタイプ/概念/アプローチに言及し、最初の2つを紹介し、他のチュートリアルを紹介します。 7つの視覚的な表現を次に示します。

高レベルでは、ニューラルネットワークはエンコーダー、デコーダー、または両方の組み合わせたものです。
エンコーダーは、生データ内のパターンを見つけて、コンパクトで有用な表現を形成します。
デコーダーは、これらの表現から新しいデータまたは高解像度の有用な情報を生成します。
講義で説明されているように、ディープラーニングでは、世界を「表現する」方法を発見して、それらについて推論することができます。
残りは、視覚情報、言語、音声(#1-6)を効果的に処理し、この情報と時折の報酬(#7)に基づいて世界で行動するのに役立つ巧妙な方法です。
1. **Feed Forward Neural Networks (FFNNs)** -特徴に基づく分類と回帰。 [パート1]を参照(#Part-1:-フィードフォワードニューラルネットワーク(FFN)によるボストン住宅価格の予測)
2. **Convolutional Neural Networks (CNNs)** - 画像分類、オブジェクト検出、ビデオアクション認識など。例については、このチュートリアルの[Part 2](#Part-2:-畳み込みニューラルネットワークによるMNISTの分類) を参照してください。
3. **Recurrent Neural Networks (RNNs)** -言語モデリング、音声認識/生成など。[テキスト生成に関するこのTFチュートリアル] [this TF tutorial on text generation](https://www.tensorflow.org/tutorials/sequences/text_generation) を参照してください。
4. **Encoder Decoder Architectures** - セマンティックセグメンテーション、機械翻訳など [our tutorial on semantic segmentation](https://github.com/lexfridman/mit-deep-learning/blob/master/tutorial_driving_scene_segmentation/tutorial_driving_scene_segmentation.ipynb) を参照してください。
5. **Autoencoder** - 監視なしの埋め込み、ノイズ除去など
6. **Generative Adversarial Networks (GANs)** - 現実的な画像の教師なし生成, etc. See [this TF tutorial on DCGANs](https://github.com/tensorflow/tensorflow/blob/r1.11/tensorflow/contrib/eager/python/examples/generative_examples/dcgan.ipynb) を参照してください。
7. **Deep Reinforcement Learning(深層強化学習)** - ゲームプレイ、シミュレーションのロボット工学、セルフプレイ、ニューラルアーキテクチャの検索などです。近日中にノートブックをリリースする予定
これらのチュートリアルでは、基本的なアイデアの本質を失わずに、選択的な省略と簡略化が行われています。 アインシュタインの引用を参照してください..
## Part 0: 前提条件:
まだ実行していない場合は、このノートブックをGoogle Colabのクラウドで実行することをお勧めします(上部のアイコンのリンクを参照)。 これは、開始する最も簡単な方法です。
[TensorFlowをローカルにインストール](https://www.tensorflow.org/install/) もできます。 しかし、繰り返しますが、単純なものが最適です(注意事項があります)。

[tf.keras](https://www.tensorflow.org/guide/keras) は、TensorFlowでニューラルネットワークモデルを構築およびトレーニングする最も簡単な方法です。 そのため、このチュートリアルでは、モデルが低レベルのAPIを必要としない限り、これに固執します。
[tf.keras](https://www.tensorflow.org/guide/keras) (TensorFlowに付属) と [Keras](https://keras.io/) (スタンドアロン)の2つがあることに注意してください。.
[tf.keras](https://www.tensorflow.org/guide/keras) を使用する必要があります。
(1) TensorFlowに付属しているため、余分なものをインストールする必要がありません。
(2) 強力な TensorFlow固有の機能があります。
```
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
# Commonly used modules
import numpy as np
import os
import sys
# Images, plots, display, and visualization
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import cv2
import IPython
from six.moves import urllib
print(tf.__version__)
```
## パート1:フィードフォワードニューラルネットワーク(FFN)によるボストン住宅価格の予測
全結合ニューラルネットワークを使ってボストン住宅価格の予想を行います。
次の図は、回帰と分類の違いをハイライトしています。(パート2を参照)。
入力として観測値が与えられると、回帰は連続値(正確な温度など)を出力し、分類は観測値が属するクラス/カテゴリを出力します。
<img src="https://i.imgur.com/vvSoAzg.jpg" alt="classification_regression" width="400"/>
ボストンの住宅データセットの場合、506行のデータを取得し、それぞれに13個の特徴があります。
私たちのタスクは、これらの13の特徴を入力として使用し、「所有者の家の中央値($ 1000)」の単一値予測を出力する回帰モデルを構築することです。
データセットをロードします。
データセットをロードすると、4つのNumPy配列が返されます。
- train_imagesおよびtrain_labels配列は、トレーニングセットです(モデル学習時に利用)
- モデルは、テストセット、test_images、およびtest_labels配列に対してテストされます。
トレーニングセットから機能ごとの統計情報(平均、標準偏差)を取得して正規化します。
```
(train_features, train_labels), (test_features, test_labels) = keras.datasets.boston_housing.load_data()
# get per-feature statistics (mean, standard deviation) from the training set to normalize by
train_mean = np.mean(train_features, axis=0)
train_std = np.std(train_features, axis=0)
train_features = (train_features - train_mean) / train_std
```
### モデルを構築する
ニューラルネットワークを構築するには、モデルのレイヤーを構成してから、モデルをコンパイルする必要があります
まず、*keras.Sequential*を使用していくつかのレイヤーを積み重ねます。
次に、損失関数、オプティマイザー、および監視するメトリックを構成します。
これらは、モデルのコンパイルステップ中に追加されます。
* *損失関数* - トレーニング中のモデルの精度を測定するため、オプティマイザーでこれを最小限に抑える。
* *オプティマイザ* - 表示されるデータとその損失関数に基づいてモデルがどのように更新するか。
* *指標* - トレーニングおよびテストの各ステップを監視するために使用されます。
20個のニューロンの1つの隠れ層でネットワークを構築する。
損失関数として平均二乗誤差(MSE)を使用します(回帰問題の最も一般的なものです)。
```
def build_model():
model = keras.Sequential([
Dense(20, activation=tf.nn.relu, input_shape=[len(train_features[0])]),
Dense(1)
])
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='mse',
metrics=['mae', 'mse'])
return model
```
### モデルを学習する
ニューラルネットワークモデルのトレーニングには、次の手順が必要です。
1. トレーニングデータをモデル(この例では`train_features`および`train_labels`配列)にフィードします。
2. モデルは、機能とラベルの関連付けを学習します。
3. モデルにテストセット(この例では`test_features`配列)について予測するように依頼します。予測が`test_labels`配列のラベルと一致することを確認します。
トレーニングを開始するには、`model.fit`メソッドを呼び出します。モデルはトレーニングデータに「適合」します
```
# これにより、出力の冗長性は低下しますが、進行状況は表示されます。
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
model = build_model()
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=50)
history = model.fit(train_features, train_labels, epochs=1000, verbose=0, validation_split = 0.1,
callbacks=[early_stop, PrintDot()])
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
# https://www.kaggle.com/c/boston-housing/leaderboardでKaggleリーダーボードと比較するためのRMSE測定値を表示します
rmse_final = np.sqrt(float(hist['val_mean_squared_error'].tail(1)))
print()
print('Final Root Mean Square Error on validation set: {}'.format(round(rmse_final, 3)))
```
次に、トレーニングセットと検証セットで損失関数の測定値をプロットします。
検証セットは、過剰適合を防ぐために使用されます。
ただし、ネットワークが小さいため、プロットが示すように、データを顕著に過剰適合させることなくトレーニングが収束します。
```
def plot_history():
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [Thousand Dollars$^2$]')
plt.plot(hist['epoch'], hist['mean_squared_error'], label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'], label = 'Val Error')
plt.legend()
plt.ylim([0,50])
plot_history()
```
次に、テストデータセットでモデルが実行する方法を比較します。
```
test_features_norm = (test_features - train_mean) / train_std
mse, _, _ = model.evaluate(test_features_norm, test_labels)
rmse = np.sqrt(mse)
print('Root Mean Square Error on test set: {}'.format(round(rmse, 3)))
```
## Part 2: 畳み込みニューラルネットワークによるMNISTの分類
次に、畳み込みニューラルネットワーク(CNN)分類器を構築して、MNISTデータセット内の手書き数字の画像をひねりを付けて分類し、データセット外からの高解像度手書き数字で分類器をテストします。
```
# 初期設定
this_repo_url = 'https://github.com/lexfridman/mit-deep-learning/raw/master/'
this_tutorial_url = this_repo_url + 'tutorial_deep_learning_basics'
```
MNISTデータセットには、28 x 28ピクセルの解像度で手書き数字の70,000グレースケールイメージが含まれています。
タスクは、これらの画像の1つを入力として取得し、画像に含まれる最も可能性の高い数字を予測することです(この予測の相対的な信頼度と一緒に)
<img src="https://i.imgur.com/ITrm9x4.png" width="500px">
次に、データセットをロードします。画像は28x28 NumPy配列で、ピクセル値は0〜255の範囲です。ラベルは0〜9の整数の配列です。
```
(train_images, train_labels), (test_images, test_labels) = keras.datasets.mnist.load_data()
print(train_images.shape)
print(train_labels.shape)
print(test_images.shape)
print(test_labels.shape)
# 単一チャネルであることを指定するために画像を変形
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1)
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1)
print(train_images.shape)
print(test_images.shape)
```
これらの値を0から1の範囲にスケーリングしてから、ニューラルネットワークモデルにフィードします。
このために、値を255で除算します。トレーニングセットとテストセットが同じ方法で前処理されることが重要です。
```
def preprocess_images(imgs): # 単一の画像と複数の画像の両方で動作する
sample_img = imgs if len(imgs.shape) == 2 else imgs[0]
assert sample_img.shape in [(28, 28, 1), (28, 28)], sample_img.shape # 画像が28x28およびシングルChannelであることを確認
return imgs / 255.0
train_images = preprocess_images(train_images)
test_images = preprocess_images(test_images)
```
トレーニングセットの最初の5つの画像を表示し、各画像の下にクラス名を表示します。
データが正しい形式であり、ネットワークを構築してトレーニングする準備ができていることを確認します。
```
plt.figure(figsize=(10,2))
for i in range(5):
plt.subplot(1,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i].reshape(28, 28), cmap=plt.cm.binary)
plt.xlabel(train_labels[i])
```
### モデルを構築する
ニューラルネットワークを構築するには、モデルのレイヤーを構成してから、モデルをコンパイルする必要があります。
多くの場合、これは単純にレイヤーを積み重ねるだけに減らすことができます。
```
model = keras.Sequential()
# 32 convolution filters used each of size 3x3
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1)))
# 64 convolution filters used each of size 3x3
model.add(Conv2D(64, (3, 3), activation='relu'))
# choose the best features via pooling
model.add(MaxPooling2D(pool_size=(2, 2)))
# randomly turn neurons on and off to improve convergence
model.add(Dropout(0.25))
# flatten since too many dimensions, we only want a classification output
model.add(Flatten())
# fully connected to get all relevant data
model.add(Dense(128, activation='relu'))
# one more dropout
model.add(Dropout(0.5))
# output a softmax to squash the matrix into output probabilities
model.add(Dense(10, activation='softmax'))
```
モデルのトレーニングの準備ができる前に、さらにいくつかの設定が必要です。これらは、モデルのコンパイルステップ中に追加されます。
* *損失関数* - トレーニング中のモデルの精度を測定するため、オプティマイザーでこれを最小限に抑えたい。
* *オプティマイザ* - 表示されるデータとその損失関数に基づいてモデルがどのように更新されるか。
* *指標* - トレーニングおよびテスト手順を監視するために使用されます。 「精度」とは、正しく分類される画像の割合です。
```
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
### モデルを訓練する
ニューラルネットワークモデルを訓練するには、次の手順が必要です。
1. トレーニングデータをモデル(この例では`train_images`および`train_labels`配列)にフィードします。
2. モデルは、画像とラベルの関連付けを学習します。
3. モデルにテストセット(この例では`test_images`配列)について予測を行うように依頼します。予測が`test_labels`配列のラベルと一致することを確認します。
トレーニングを開始するには、`model.fit`メソッドを呼び出します。モデルはトレーニングデータに「適合」します。
```
history = model.fit(train_images, train_labels, epochs=5)
# モデルの保存
model.save_weights("model.h5")
#モデルのロード
#model.load_weights("model.h5")
```
モデルが学習するにつれて、損失と精度のメトリックが表示されます。
### 精度を評価する
次に、テストデータセットでモデルが実行する方法を比較します。
```
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print(test_images.shape)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
```
多くの場合、テストデータセットの精度は、トレーニングデータセットの精度よりもわずかに低くなります。
このトレーニングの精度とテストの精度のギャップは、過剰適合の例です。
私たちの場合、99.19%の精度です!これは、部分的には、ドロップアウトレイヤーで正則化が成功したためです。
### 推論を行う
モデルをトレーニングしたら、それを使用していくつかの画像について予測を行うことができます。
gitリポジトリ(tutorial_deep_learning_basics)のimages配下にある`mnist_dream.mp4`をテストデータとします。
mnist_dream.mp4は以下のように手書き数字が動的に変化する動画になっています。

この動画をインプットにして1フレームごとに画像に切り出し、学習したモデルにインプットして推論結果を得ます。
推論結果を画像に追加して再度動画に変換すると以下のような推論結果付きの動画が生成されます。

```
mnist_dream_path = 'images/mnist_dream.mp4'
mnist_prediction_path = 'images/mnist_dream_predicted.mp4'
# download the video if running in Colab
if not os.path.isfile(mnist_dream_path):
print('downloading the sample video...')
vid_url = this_tutorial_url + '/' + mnist_dream_path
mnist_dream_path = urllib.request.urlretrieve(vid_url)[0]
def cv2_imshow(img):
ret = cv2.imencode('.png', img)[1].tobytes()
img_ip = IPython.display.Image(data=ret)
IPython.display.display(img_ip)
cap = cv2.VideoCapture(mnist_dream_path)
vw = None
frame = -1 # デバッグ用のカウンター, 0-indexed
# すべてのフレームに対してMNIST画像を抽出して予測を実行する
while True: # 481 フレーム
frame += 1
ret, img = cap.read()
if not ret: break
assert img.shape[0] == img.shape[1] # 正方形である必要があるため
if img.shape[0] != 720:
img = cv2.resize(img, (720, 720)) # (720,720にリサイズ)
# 予測するにあたって前処理を行う。
img_proc = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # BGR->GRAYスケールへ変換(OpenCVではBGR(青、緑、赤)でロードされるため)
img_proc = cv2.resize(img_proc, (28, 28)) # 28 x 28 にリサイズ
img_proc = preprocess_images(img_proc) # 255で割って標準正規化
img_proc = 1 - img_proc # 訓練データセットは背景黒&テキスト色が白のため値を反転させる。
net_in = np.expand_dims(img_proc, axis=0) # 次元を拡張してバッチサイズ1を指定
net_in = np.expand_dims(net_in, axis=3) # 次元を拡張してチャネル数を指定
preds = model.predict(net_in)[0] # (1, 10)が返ってくるので[0]で(10,)として取得
guess = np.argmax(preds) #最大値を取得
perc = np.rint(preds * 100).astype(int) # 四捨五入 like array([100, 0, 0, 0, 0, 0, 0, 0, 0, 0])
img = 255 - img #反転 (720, 720,3 ) 0 ~255
pad_color = 0
img = np.pad(img, ((0,0), (0,1280-720), (0,0)), mode='constant', constant_values=(pad_color)) #画像の横幅を追加
line_type = cv2.LINE_AA #ラインタイプを指定
font_face = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 1.3
thickness = 2
x, y = 740, 60
color = (255, 255, 255) # 白色
text = "Neural Network Output:"
# 文字列 "Neural Network Output:" を描画
cv2.putText(img, text=text, org=(x, y), fontScale=font_scale, fontFace=font_face, thickness=thickness,
color=color, lineType=line_type)
# 文字列 "Input:" を描画
text = "Input:"
cv2.putText(img, text=text, org=(30, y), fontScale=font_scale, fontFace=font_face, thickness=thickness,
color=color, lineType=line_type)
"""予測リスト(perc)から数字と確率情報を元に色を指定して、y座標を60ずつずらしながら短形を描画する。
予測値のラベル部分に色付きの長方形が描画される。"""
y = 130
for i, p in enumerate(perc): # (i,p) = (数字,パーセント)
if i == guess: color = (255, 218, 158) #予測値に色を付ける
else: color = (100, 100, 100) #予測値以外は灰色を指定
rect_width = 0 #長方形の横幅の初期値をセット
if p > 0: rect_width = int(p * 3.3) # 0%以外の部分に幅を指定(確率が高いほど横幅が長くなるように指定
rect_start = 180
cv2.rectangle(img, (x+rect_start, y-5), (x+rect_start+rect_width, y-20), color, -1)
text = '{}: {:>3}%'.format(i, int(p))
cv2.putText(img, text=text, org=(x, y), fontScale=font_scale, fontFace=font_face, thickness=thickness,
color=color, lineType=line_type)
y += 60
# 出力をビデオとして保存したくない場合は、これをFalseに設定
save_video = True
if save_video:
if vw is None:
codec = cv2.VideoWriter_fourcc(*'DIVX')
# vid_width_height = (1280, 720)
vid_width_height = img.shape[1], img.shape[0] # img.shape[1] = 1280, img.shape[0]=720
vw = cv2.VideoWriter(mnist_prediction_path, codec, 30, vid_width_height) #(1280, 720)で出力するファイルパスとコーデックを指定。
# 変化具合をゆっくり表示させたいので複数回write
vw.write(img)
vw.write(img)
vw.write(img)
vw.write(img)
vw.write(img)
vw.write(img)
vw.write(img)
vw.write(img)
vw.write(img)
vw.write(img)
# 描画用にスケールダウン
img_disp = cv2.resize(img, (0,0), fx=0.5, fy=0.5)
cv2_imshow(img_disp)
IPython.display.clear_output(wait=True) #出力結果の削除
cap.release() #動画の開放
if vw is not None:
vw.release() # mp4ファイルの開放(ここでmp4ファイルが完成する)
```
### 生成した推論結果つき動画を再生
```
#生成した動画の再生
cap_file = cv2.VideoCapture('images/mnist_dream_predicted.mp4')
while True:
ret, img = cap_file.read()
if not ret: break
# scale down image for display
img_disp = cv2.resize(img, (0,0), fx=0.5, fy=0.5)
cv2_imshow(img_disp)
IPython.display.clear_output(wait=True)
```
### インプット動画からフレームを切りだす→推論→推論結果を動画化までの動作確認(1フレームだけ動かしてみる)
```
import matplotlib.pyplot as plt
mnist_dream_path = 'images/mnist_dream.mp4' #インプット動画
mnist_prediction_path = 'images/mnist_dream_predicted_sample.mp4' #アウトプット動画
# OpenCVで動画ファイルをロードするにはcv2のideoCaptureクラスを使う。
# 引数に動画ファイルのパスを指定する。
cap = cv2.VideoCapture(mnist_dream_path)
#動画の再生秒数を取得
print(cap.get(cv2.CAP_PROP_FRAME_COUNT) / cap.get(cv2.CAP_PROP_FPS))
# read()メソッドで動画のフレーム(コマ)をNumPy配列ndarrayとして取得
# readメソッドの戻り値:フレームの画像が読み込めたかどうかを示すbool値と画像の配列ndarrayのタプル
#試しに1フレーム目の画像を表示
frame = -1
for i in range(120):
ret, img = cap.read()
plt.imshow(img)
print(img.shape)
def preprocess_images(imgs): # should work for both a single image and multiple images
sample_img = imgs if len(imgs.shape) == 2 else imgs[0]
assert sample_img.shape in [(28, 28, 1), (28, 28)], sample_img.shape # make sure images are 28x28 and single-channel (grayscale)
return imgs / 255.0
img = cv2.resize(img, (720, 720)) # (720,720にリサイズ)
# 予測するにあたって前処理を行う。
img_proc = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # BGR->GRAYスケールへ変換(OpenCVではBGR(青、緑、赤)でロードされるため)
img_proc = cv2.resize(img_proc, (28, 28)) # 28 x 28 にリサイズ
img_proc = preprocess_images(img_proc) # 255で割って標準正規化
img_proc = 1 - img_proc # 訓練データセットは背景黒&テキスト色が白のため値を反転させる。
print(img_proc.shape)
print(img_proc.shape)
net_in = np.expand_dims(img_proc, axis=0) # 次元を拡張してバッチサイズ1を指定
print(net_in.shape)
net_in = np.expand_dims(net_in, axis=3) # 次元を拡張してチャネル数を指定
print(net_in.shape)
preds = model.predict(net_in)[0] # (1, 10)が返ってくるので[0]で(10,)として取得
guess = np.argmax(preds) #最大値を取得
perc = np.rint(preds * 100).astype(int) # 四捨五入 like array([100, 0, 0, 0, 0, 0, 0, 0, 0, 0])
print(perc)
img = 255 - img #反転 (720, 720,3 ) 0 ~255
print(img.shape)
pad_color = 0
img = np.pad(img, ((0,0), (0,1280-720), (0,0)), mode='constant', constant_values=(pad_color)) #画像の横幅を追加
print(img.shape)
plt.imshow(img)
line_type = cv2.LINE_AA #ラインタイプを指定
font_face = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 1.3 #fontスケール
thickness = 2 #太さ
x, y = 740, 60 #座標
color = (255, 255, 255) # 白色
text = "Neural Network Output:"
# 文字列 "Neural Network Output:" を描画
cv2.putText(img, text=text, org=(x, y), fontScale=font_scale, fontFace=font_face, thickness=thickness,color=color, lineType=line_type)
plt.imshow(img)
# 文字列 "Input:" を描画
text = "Input:"
cv2.putText(img, text=text, org=(30, y), fontScale=font_scale, fontFace=font_face, thickness=thickness,color=color, lineType=line_type)
plt.imshow(img)
for i, p in enumerate(perc): # (i,p) = (数字,パーセント)
if i == guess: color = (255, 218, 158) #予測値に
else: color = (100, 100, 100)
# 予測リスト(perc)から数字と確率情報を元に色を指定して、y座標を60ずつずらしながら短形を描画する。
# 予測値のラベル部分に色付きの長方形が描画される。
y = 130
for i, p in enumerate(perc):
if i == guess: color = (255, 218, 158) #予測値に色を指定
else: color = (100, 100, 100) #予測値以外は灰色を指定
rect_width = 0 #長方形の横幅の初期値をセット
if p > 0: rect_width = int(p * 3.3) # 0%以外の部分に幅を指定(確率が高いほど横幅が長くなるように指定
rect_start = 180
cv2.rectangle(img, (x+rect_start, y-5), (x+rect_start+rect_width, y-20), color, -1)
text = '{}: {:>3}%'.format(i, int(p))
cv2.putText(img, text=text, org=(x, y), fontScale=font_scale, fontFace=font_face, thickness=thickness,
color=color, lineType=line_type)
y += 60
plt.imshow(img)
vw = None
save_video = True
if save_video:
if vw is None:
codec = cv2.VideoWriter_fourcc(*'DIVX')
# vid_width_height = (1280, 720)
vid_width_height = img.shape[1], img.shape[0] # img.shape[1] = 1280, img.shape[0]=720
vw = cv2.VideoWriter(mnist_prediction_path, codec, 30, vid_width_height) #(1280, 720)で出力するファイルパスとコーデックを指定。
# 15 fpsではうまく機能しないので30 fpsで2回フレームを生成させる。
vw.write(img)
vw.write(img)
# scale down image for display
def cv2_imshow(img):
ret = cv2.imencode('.png', img)[1].tobytes()
img_ip = IPython.display.Image(data=ret)
IPython.display.display(img_ip)
img_disp = cv2.resize(img, (0,0), fx=0.5, fy=0.5)
cv2_imshow(img_disp)
IPython.display.clear_output(wait=True) #出力結果を消す
cap.release() #動画の開放
if vw is not None:
vw.release() #mp4ファイルの開放(ここでmp4ファイルが完成する)
```
|
github_jupyter
|
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
# Commonly used modules
import numpy as np
import os
import sys
# Images, plots, display, and visualization
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import cv2
import IPython
from six.moves import urllib
print(tf.__version__)
(train_features, train_labels), (test_features, test_labels) = keras.datasets.boston_housing.load_data()
# get per-feature statistics (mean, standard deviation) from the training set to normalize by
train_mean = np.mean(train_features, axis=0)
train_std = np.std(train_features, axis=0)
train_features = (train_features - train_mean) / train_std
def build_model():
model = keras.Sequential([
Dense(20, activation=tf.nn.relu, input_shape=[len(train_features[0])]),
Dense(1)
])
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='mse',
metrics=['mae', 'mse'])
return model
# これにより、出力の冗長性は低下しますが、進行状況は表示されます。
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
model = build_model()
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=50)
history = model.fit(train_features, train_labels, epochs=1000, verbose=0, validation_split = 0.1,
callbacks=[early_stop, PrintDot()])
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
# https://www.kaggle.com/c/boston-housing/leaderboardでKaggleリーダーボードと比較するためのRMSE測定値を表示します
rmse_final = np.sqrt(float(hist['val_mean_squared_error'].tail(1)))
print()
print('Final Root Mean Square Error on validation set: {}'.format(round(rmse_final, 3)))
def plot_history():
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [Thousand Dollars$^2$]')
plt.plot(hist['epoch'], hist['mean_squared_error'], label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'], label = 'Val Error')
plt.legend()
plt.ylim([0,50])
plot_history()
test_features_norm = (test_features - train_mean) / train_std
mse, _, _ = model.evaluate(test_features_norm, test_labels)
rmse = np.sqrt(mse)
print('Root Mean Square Error on test set: {}'.format(round(rmse, 3)))
# 初期設定
this_repo_url = 'https://github.com/lexfridman/mit-deep-learning/raw/master/'
this_tutorial_url = this_repo_url + 'tutorial_deep_learning_basics'
(train_images, train_labels), (test_images, test_labels) = keras.datasets.mnist.load_data()
print(train_images.shape)
print(train_labels.shape)
print(test_images.shape)
print(test_labels.shape)
# 単一チャネルであることを指定するために画像を変形
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1)
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1)
print(train_images.shape)
print(test_images.shape)
def preprocess_images(imgs): # 単一の画像と複数の画像の両方で動作する
sample_img = imgs if len(imgs.shape) == 2 else imgs[0]
assert sample_img.shape in [(28, 28, 1), (28, 28)], sample_img.shape # 画像が28x28およびシングルChannelであることを確認
return imgs / 255.0
train_images = preprocess_images(train_images)
test_images = preprocess_images(test_images)
plt.figure(figsize=(10,2))
for i in range(5):
plt.subplot(1,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i].reshape(28, 28), cmap=plt.cm.binary)
plt.xlabel(train_labels[i])
model = keras.Sequential()
# 32 convolution filters used each of size 3x3
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1)))
# 64 convolution filters used each of size 3x3
model.add(Conv2D(64, (3, 3), activation='relu'))
# choose the best features via pooling
model.add(MaxPooling2D(pool_size=(2, 2)))
# randomly turn neurons on and off to improve convergence
model.add(Dropout(0.25))
# flatten since too many dimensions, we only want a classification output
model.add(Flatten())
# fully connected to get all relevant data
model.add(Dense(128, activation='relu'))
# one more dropout
model.add(Dropout(0.5))
# output a softmax to squash the matrix into output probabilities
model.add(Dense(10, activation='softmax'))
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=5)
# モデルの保存
model.save_weights("model.h5")
#モデルのロード
#model.load_weights("model.h5")
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print(test_images.shape)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
mnist_dream_path = 'images/mnist_dream.mp4'
mnist_prediction_path = 'images/mnist_dream_predicted.mp4'
# download the video if running in Colab
if not os.path.isfile(mnist_dream_path):
print('downloading the sample video...')
vid_url = this_tutorial_url + '/' + mnist_dream_path
mnist_dream_path = urllib.request.urlretrieve(vid_url)[0]
def cv2_imshow(img):
ret = cv2.imencode('.png', img)[1].tobytes()
img_ip = IPython.display.Image(data=ret)
IPython.display.display(img_ip)
cap = cv2.VideoCapture(mnist_dream_path)
vw = None
frame = -1 # デバッグ用のカウンター, 0-indexed
# すべてのフレームに対してMNIST画像を抽出して予測を実行する
while True: # 481 フレーム
frame += 1
ret, img = cap.read()
if not ret: break
assert img.shape[0] == img.shape[1] # 正方形である必要があるため
if img.shape[0] != 720:
img = cv2.resize(img, (720, 720)) # (720,720にリサイズ)
# 予測するにあたって前処理を行う。
img_proc = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # BGR->GRAYスケールへ変換(OpenCVではBGR(青、緑、赤)でロードされるため)
img_proc = cv2.resize(img_proc, (28, 28)) # 28 x 28 にリサイズ
img_proc = preprocess_images(img_proc) # 255で割って標準正規化
img_proc = 1 - img_proc # 訓練データセットは背景黒&テキスト色が白のため値を反転させる。
net_in = np.expand_dims(img_proc, axis=0) # 次元を拡張してバッチサイズ1を指定
net_in = np.expand_dims(net_in, axis=3) # 次元を拡張してチャネル数を指定
preds = model.predict(net_in)[0] # (1, 10)が返ってくるので[0]で(10,)として取得
guess = np.argmax(preds) #最大値を取得
perc = np.rint(preds * 100).astype(int) # 四捨五入 like array([100, 0, 0, 0, 0, 0, 0, 0, 0, 0])
img = 255 - img #反転 (720, 720,3 ) 0 ~255
pad_color = 0
img = np.pad(img, ((0,0), (0,1280-720), (0,0)), mode='constant', constant_values=(pad_color)) #画像の横幅を追加
line_type = cv2.LINE_AA #ラインタイプを指定
font_face = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 1.3
thickness = 2
x, y = 740, 60
color = (255, 255, 255) # 白色
text = "Neural Network Output:"
# 文字列 "Neural Network Output:" を描画
cv2.putText(img, text=text, org=(x, y), fontScale=font_scale, fontFace=font_face, thickness=thickness,
color=color, lineType=line_type)
# 文字列 "Input:" を描画
text = "Input:"
cv2.putText(img, text=text, org=(30, y), fontScale=font_scale, fontFace=font_face, thickness=thickness,
color=color, lineType=line_type)
"""予測リスト(perc)から数字と確率情報を元に色を指定して、y座標を60ずつずらしながら短形を描画する。
予測値のラベル部分に色付きの長方形が描画される。"""
y = 130
for i, p in enumerate(perc): # (i,p) = (数字,パーセント)
if i == guess: color = (255, 218, 158) #予測値に色を付ける
else: color = (100, 100, 100) #予測値以外は灰色を指定
rect_width = 0 #長方形の横幅の初期値をセット
if p > 0: rect_width = int(p * 3.3) # 0%以外の部分に幅を指定(確率が高いほど横幅が長くなるように指定
rect_start = 180
cv2.rectangle(img, (x+rect_start, y-5), (x+rect_start+rect_width, y-20), color, -1)
text = '{}: {:>3}%'.format(i, int(p))
cv2.putText(img, text=text, org=(x, y), fontScale=font_scale, fontFace=font_face, thickness=thickness,
color=color, lineType=line_type)
y += 60
# 出力をビデオとして保存したくない場合は、これをFalseに設定
save_video = True
if save_video:
if vw is None:
codec = cv2.VideoWriter_fourcc(*'DIVX')
# vid_width_height = (1280, 720)
vid_width_height = img.shape[1], img.shape[0] # img.shape[1] = 1280, img.shape[0]=720
vw = cv2.VideoWriter(mnist_prediction_path, codec, 30, vid_width_height) #(1280, 720)で出力するファイルパスとコーデックを指定。
# 変化具合をゆっくり表示させたいので複数回write
vw.write(img)
vw.write(img)
vw.write(img)
vw.write(img)
vw.write(img)
vw.write(img)
vw.write(img)
vw.write(img)
vw.write(img)
vw.write(img)
# 描画用にスケールダウン
img_disp = cv2.resize(img, (0,0), fx=0.5, fy=0.5)
cv2_imshow(img_disp)
IPython.display.clear_output(wait=True) #出力結果の削除
cap.release() #動画の開放
if vw is not None:
vw.release() # mp4ファイルの開放(ここでmp4ファイルが完成する)
#生成した動画の再生
cap_file = cv2.VideoCapture('images/mnist_dream_predicted.mp4')
while True:
ret, img = cap_file.read()
if not ret: break
# scale down image for display
img_disp = cv2.resize(img, (0,0), fx=0.5, fy=0.5)
cv2_imshow(img_disp)
IPython.display.clear_output(wait=True)
import matplotlib.pyplot as plt
mnist_dream_path = 'images/mnist_dream.mp4' #インプット動画
mnist_prediction_path = 'images/mnist_dream_predicted_sample.mp4' #アウトプット動画
# OpenCVで動画ファイルをロードするにはcv2のideoCaptureクラスを使う。
# 引数に動画ファイルのパスを指定する。
cap = cv2.VideoCapture(mnist_dream_path)
#動画の再生秒数を取得
print(cap.get(cv2.CAP_PROP_FRAME_COUNT) / cap.get(cv2.CAP_PROP_FPS))
# read()メソッドで動画のフレーム(コマ)をNumPy配列ndarrayとして取得
# readメソッドの戻り値:フレームの画像が読み込めたかどうかを示すbool値と画像の配列ndarrayのタプル
#試しに1フレーム目の画像を表示
frame = -1
for i in range(120):
ret, img = cap.read()
plt.imshow(img)
print(img.shape)
def preprocess_images(imgs): # should work for both a single image and multiple images
sample_img = imgs if len(imgs.shape) == 2 else imgs[0]
assert sample_img.shape in [(28, 28, 1), (28, 28)], sample_img.shape # make sure images are 28x28 and single-channel (grayscale)
return imgs / 255.0
img = cv2.resize(img, (720, 720)) # (720,720にリサイズ)
# 予測するにあたって前処理を行う。
img_proc = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # BGR->GRAYスケールへ変換(OpenCVではBGR(青、緑、赤)でロードされるため)
img_proc = cv2.resize(img_proc, (28, 28)) # 28 x 28 にリサイズ
img_proc = preprocess_images(img_proc) # 255で割って標準正規化
img_proc = 1 - img_proc # 訓練データセットは背景黒&テキスト色が白のため値を反転させる。
print(img_proc.shape)
print(img_proc.shape)
net_in = np.expand_dims(img_proc, axis=0) # 次元を拡張してバッチサイズ1を指定
print(net_in.shape)
net_in = np.expand_dims(net_in, axis=3) # 次元を拡張してチャネル数を指定
print(net_in.shape)
preds = model.predict(net_in)[0] # (1, 10)が返ってくるので[0]で(10,)として取得
guess = np.argmax(preds) #最大値を取得
perc = np.rint(preds * 100).astype(int) # 四捨五入 like array([100, 0, 0, 0, 0, 0, 0, 0, 0, 0])
print(perc)
img = 255 - img #反転 (720, 720,3 ) 0 ~255
print(img.shape)
pad_color = 0
img = np.pad(img, ((0,0), (0,1280-720), (0,0)), mode='constant', constant_values=(pad_color)) #画像の横幅を追加
print(img.shape)
plt.imshow(img)
line_type = cv2.LINE_AA #ラインタイプを指定
font_face = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 1.3 #fontスケール
thickness = 2 #太さ
x, y = 740, 60 #座標
color = (255, 255, 255) # 白色
text = "Neural Network Output:"
# 文字列 "Neural Network Output:" を描画
cv2.putText(img, text=text, org=(x, y), fontScale=font_scale, fontFace=font_face, thickness=thickness,color=color, lineType=line_type)
plt.imshow(img)
# 文字列 "Input:" を描画
text = "Input:"
cv2.putText(img, text=text, org=(30, y), fontScale=font_scale, fontFace=font_face, thickness=thickness,color=color, lineType=line_type)
plt.imshow(img)
for i, p in enumerate(perc): # (i,p) = (数字,パーセント)
if i == guess: color = (255, 218, 158) #予測値に
else: color = (100, 100, 100)
# 予測リスト(perc)から数字と確率情報を元に色を指定して、y座標を60ずつずらしながら短形を描画する。
# 予測値のラベル部分に色付きの長方形が描画される。
y = 130
for i, p in enumerate(perc):
if i == guess: color = (255, 218, 158) #予測値に色を指定
else: color = (100, 100, 100) #予測値以外は灰色を指定
rect_width = 0 #長方形の横幅の初期値をセット
if p > 0: rect_width = int(p * 3.3) # 0%以外の部分に幅を指定(確率が高いほど横幅が長くなるように指定
rect_start = 180
cv2.rectangle(img, (x+rect_start, y-5), (x+rect_start+rect_width, y-20), color, -1)
text = '{}: {:>3}%'.format(i, int(p))
cv2.putText(img, text=text, org=(x, y), fontScale=font_scale, fontFace=font_face, thickness=thickness,
color=color, lineType=line_type)
y += 60
plt.imshow(img)
vw = None
save_video = True
if save_video:
if vw is None:
codec = cv2.VideoWriter_fourcc(*'DIVX')
# vid_width_height = (1280, 720)
vid_width_height = img.shape[1], img.shape[0] # img.shape[1] = 1280, img.shape[0]=720
vw = cv2.VideoWriter(mnist_prediction_path, codec, 30, vid_width_height) #(1280, 720)で出力するファイルパスとコーデックを指定。
# 15 fpsではうまく機能しないので30 fpsで2回フレームを生成させる。
vw.write(img)
vw.write(img)
# scale down image for display
def cv2_imshow(img):
ret = cv2.imencode('.png', img)[1].tobytes()
img_ip = IPython.display.Image(data=ret)
IPython.display.display(img_ip)
img_disp = cv2.resize(img, (0,0), fx=0.5, fy=0.5)
cv2_imshow(img_disp)
IPython.display.clear_output(wait=True) #出力結果を消す
cap.release() #動画の開放
if vw is not None:
vw.release() #mp4ファイルの開放(ここでmp4ファイルが完成する)
| 0.663778 | 0.969324 |
## Translate EMSL data.
The notebooks demostrates how to translate the EMSL spreadsheets [EMSL_FICUS_project_process_data_export.xlsx](https://drive.google.com/drive/u/1/folders/1frzGlz8EB8inpVokNTSwD6Ia94eVUlsZ) and [FICUS - JGI-EMSL Proposal - Gold Study - ID mapping and PI](https://docs.google.com/spreadsheets/d/1BX35JZsRkA5cZ-3Y6x217T3Aif30Ptxe_SjIC7JqPx4/edit#gid=0) into json that conforms with the [NMDC schema](https://github.com/microbiomedata/nmdc-metadata/blob/schema-draft/README.md).
Before doing the translation it is important that you have an up to date `nmdc.py` file in the `lib` directory.
The python modules for running the notebook are in the `requirements.txt` file.
```
import os, sys
sys.path.append(os.path.abspath('../src/bin/lib/')) # add path to lib
import json
import pandas as pds
import jsonasobj
import nmdc
import data_operations as dop
from pandasql import sqldf
def pysqldf(q):
return sqldf(q, globals())
```
## Load GOLD study table from nmdc zip file
The NMDC data is currently stored in a zip file. Instead of unzipping the file, simply use the `zipfile` library to load the `study` table (stored as tab-delimited files).
The code for unzipping and creating the dataframe is found in the `make_dataframe` function. As part of the dataframe creation process, the column names are lower cased and spaces are replaced with underscored. I find it helpful to have some standarization on column names when doing data wrangling. This behavior can be overridden if you wish.
```
study = dop.make_dataframe("export.sql/STUDY_DATA_TABLE.dsv", file_archive_name="../src/data/nmdc-version2.zip")
```
## Subset GOLD tables to active records that are joined to valid study IDs
```
q = """
select
*
from
study
where
active = 'Yes'
"""
study = sqldf(q)
```
## Load EMSL spreadsheets into spreadsheets
```
## load emsl instrument run data
## the spreadsheet contains multiple tab, so I have to load using pandas and the clean the columnn names
emsl = pds.concat(pds.read_excel("../src/data/EMSL_FICUS_project_process_data_export.xlsx",
sheet_name=None), ignore_index=True)
emsl = dop.clean_dataframe_column_names(emsl)
## load mapping spreadsheet
jgi_emsl = dop.make_dataframe("../src/data/FICUS - JGI-EMSL Proposal - Gold Study - ID mapping and PI.xlsx", file_type="excel")
```
## Subset EMSL data to only those that have a valid FICUS study ID
```
## subset the mapping spreadsheet
q = """
select
*
from
jgi_emsl
inner join
study
on
jgi_emsl.gold_study_id = study.gold_id
"""
jgi_emsl = sqldf(q)
# jgi_emsl.head() # peek at data
## subset instrument run data
q = """
select
emsl.*, jgi_emsl.gold_study_id
from
emsl
inner join
jgi_emsl
on
emsl.emsl_proposal_id = jgi_emsl.emsl_proposal_id
"""
emsl = sqldf(q)
```
## Update/prep instrument run data
* Change column experimental_data_type to omics_type
* Change column dataset_file_size_bytes to file_size
* Add processing_institution = "Environmental Molecular Sciences Lab" column
* Add column data_object_id to identify data objects. Currently, this is just "output" + value of dataset_id
Add column data_object_name associated with data object ids. Currently, this is just "output from: " + value of dataset_name
```
emsl.rename(columns={"experimental_data_type":"omics_type"}, inplace=True) # rename column
emsl.rename(columns={"dataset_file_size_bytes":"file_size"}, inplace=True) # rename column
emsl["processing_institution"] = "Environmental Molecular Sciences Lab" # add processing institution
emsl["data_object_id"] = "output_"
emsl["data_object_id"] = emsl["data_object_id"] + emsl["dataset_id"].map(str) # build data object id
emsl["data_object_name"] = "output: "
emsl["data_object_name"] = emsl["data_object_name"] + emsl["dataset_name"].map(str) # build data object name
# emsl[["data_object_id", "dataset_id", "omics_type", "processing_institution", "gold_study_id"]].head() # peek at data
```
## Build omics prossessing json
```
emsl_dictdf = emsl.to_dict(orient="records")
## specify characteristics
characteristics = \
['omics_type', 'instrument_name', 'processing_institution']
## create list of json string objects
omics_processing_dict_list = dop.make_nmdc_dict_list\
(emsl_dictdf, nmdc.OmicsProcessing, id_key='dataset_id', name_key='dataset_name', description_key="dataset_type_description",
part_of_key="gold_study_id", has_output_key="data_object_id", characteristic_fields=characteristics)
omics_processing_dict_list[0] # peek at data
```
## Build data ojbects json
```
## specify characteristics
characteristics = ['file_size']
## create list of dictionaries
data_objects_dict_list = dop.make_nmdc_dict_list\
(emsl_dictdf, nmdc.DataObject, id_key='data_object_id',
name_key='data_object_name', characteristic_fields=characteristics)
# data_objects_dict_list[-1] # peek at data
```
## Update the omics_processing.json file
```
## load omics processing json into dict list
omics_processing_file_data = dop.load_dict_from_json_file("output/nmdc-json/omics_processing.json")
omics_processing_file_data[0] # peek at data
updated_omics_processing = [*omics_processing_file_data, *omics_processing_dict_list]
# updated_omics_processing[-1] ## peek at data
```
## Save updated omics processing data as json
```
updated_omics_processing_json_list = dop.convert_dict_list_to_json_list(updated_omics_processing)
dop.save_json_string_list("output/nmdc-json/omics_processing.json", updated_omics_processing_json_list) # save json string list to file
```
## Update the data_objects.json file
```
## load data objects json into dict list
data_objects_file_data = dop.load_dict_from_json_file("output/nmdc-json/data_objects.json")
# data_objects_file_data[0] # peek at data
updated_data_objects = [*data_objects_file_data, *data_objects_dict_list]
# updated_data_objects[-1] # peek at data
```
## Save updated data objects data as json
```
updated_data_objects_json_list = dop.convert_dict_list_to_json_list(updated_data_objects)
dop.save_json_string_list("output/nmdc-json/data_objects.json", updated_data_objects_json_list) # save json string list to file
```
|
github_jupyter
|
import os, sys
sys.path.append(os.path.abspath('../src/bin/lib/')) # add path to lib
import json
import pandas as pds
import jsonasobj
import nmdc
import data_operations as dop
from pandasql import sqldf
def pysqldf(q):
return sqldf(q, globals())
study = dop.make_dataframe("export.sql/STUDY_DATA_TABLE.dsv", file_archive_name="../src/data/nmdc-version2.zip")
q = """
select
*
from
study
where
active = 'Yes'
"""
study = sqldf(q)
## load emsl instrument run data
## the spreadsheet contains multiple tab, so I have to load using pandas and the clean the columnn names
emsl = pds.concat(pds.read_excel("../src/data/EMSL_FICUS_project_process_data_export.xlsx",
sheet_name=None), ignore_index=True)
emsl = dop.clean_dataframe_column_names(emsl)
## load mapping spreadsheet
jgi_emsl = dop.make_dataframe("../src/data/FICUS - JGI-EMSL Proposal - Gold Study - ID mapping and PI.xlsx", file_type="excel")
## subset the mapping spreadsheet
q = """
select
*
from
jgi_emsl
inner join
study
on
jgi_emsl.gold_study_id = study.gold_id
"""
jgi_emsl = sqldf(q)
# jgi_emsl.head() # peek at data
## subset instrument run data
q = """
select
emsl.*, jgi_emsl.gold_study_id
from
emsl
inner join
jgi_emsl
on
emsl.emsl_proposal_id = jgi_emsl.emsl_proposal_id
"""
emsl = sqldf(q)
emsl.rename(columns={"experimental_data_type":"omics_type"}, inplace=True) # rename column
emsl.rename(columns={"dataset_file_size_bytes":"file_size"}, inplace=True) # rename column
emsl["processing_institution"] = "Environmental Molecular Sciences Lab" # add processing institution
emsl["data_object_id"] = "output_"
emsl["data_object_id"] = emsl["data_object_id"] + emsl["dataset_id"].map(str) # build data object id
emsl["data_object_name"] = "output: "
emsl["data_object_name"] = emsl["data_object_name"] + emsl["dataset_name"].map(str) # build data object name
# emsl[["data_object_id", "dataset_id", "omics_type", "processing_institution", "gold_study_id"]].head() # peek at data
emsl_dictdf = emsl.to_dict(orient="records")
## specify characteristics
characteristics = \
['omics_type', 'instrument_name', 'processing_institution']
## create list of json string objects
omics_processing_dict_list = dop.make_nmdc_dict_list\
(emsl_dictdf, nmdc.OmicsProcessing, id_key='dataset_id', name_key='dataset_name', description_key="dataset_type_description",
part_of_key="gold_study_id", has_output_key="data_object_id", characteristic_fields=characteristics)
omics_processing_dict_list[0] # peek at data
## specify characteristics
characteristics = ['file_size']
## create list of dictionaries
data_objects_dict_list = dop.make_nmdc_dict_list\
(emsl_dictdf, nmdc.DataObject, id_key='data_object_id',
name_key='data_object_name', characteristic_fields=characteristics)
# data_objects_dict_list[-1] # peek at data
## load omics processing json into dict list
omics_processing_file_data = dop.load_dict_from_json_file("output/nmdc-json/omics_processing.json")
omics_processing_file_data[0] # peek at data
updated_omics_processing = [*omics_processing_file_data, *omics_processing_dict_list]
# updated_omics_processing[-1] ## peek at data
updated_omics_processing_json_list = dop.convert_dict_list_to_json_list(updated_omics_processing)
dop.save_json_string_list("output/nmdc-json/omics_processing.json", updated_omics_processing_json_list) # save json string list to file
## load data objects json into dict list
data_objects_file_data = dop.load_dict_from_json_file("output/nmdc-json/data_objects.json")
# data_objects_file_data[0] # peek at data
updated_data_objects = [*data_objects_file_data, *data_objects_dict_list]
# updated_data_objects[-1] # peek at data
updated_data_objects_json_list = dop.convert_dict_list_to_json_list(updated_data_objects)
dop.save_json_string_list("output/nmdc-json/data_objects.json", updated_data_objects_json_list) # save json string list to file
| 0.208582 | 0.84891 |
```
import sys
sys.path.append('../python_packages_static')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import fiona
from shapely.geometry import shape
from gisutils import project
from pydrograph import Nwis
import geopandas as gp
import requests
```
# Notebook to extract NWIS data from model area and make obs tables
## 1. Get DV surface water obs from USGS StreamStats: https://streamstats.usgs.gov/ss/
### 1.1 Get streamflow daily values sites
**Create model bounding box and retrieve NWIS streamflow DV sites**
uses the excellent `pydrogrpah`: https://github.com/aleaf/pydrograph
```
extent_shp = '../source_data/Shapefiles/Extents/Model_Extent_HUC12.shp'
epsg = 5070
extent_poly = shape(fiona.open(extent_shp).next()['geometry'])
extent_poly_ll = project(extent_poly, "+init=epsg:{}".format(epsg), "+init=epsg:4269")
extent_poly_ll.bounds
bound = gp.read_file(extent_shp)
nwis = Nwis(extent=extent_poly_ll)
```
**Get streamflow daily values sites using `pydrograph`**
```
all_dvs = nwis.get_siteinfo('daily_values')
all_dvs
bound = bound.to_crs(epsg=4269)
fig, ax = plt.subplots()
bound.plot(ax=ax, facecolor='None', edgecolor='black')
ax.scatter(all_dvs.dec_long_va, all_dvs.dec_lat_va)
plt.show()
```
### Only SANDBURG CREEK AT ELLENVILLE NY (01366650) and NEVERSINK RIVER AT WOODBOURNE NY (01436500) can be used for flux targets
* NEVERSINK RIVER AT NEVERSINK NY at edge of model, used for SFR inflow (see 0.7_make_SFR_inflow.ipynb)
* Data collection at GUMAER BROOK NEAR WURTSBORO NY only started on 2019-12-11, too little data avialable for annual average flow/BFI
### 1.2 Extract Mean annual flow and BFI data from Streamstats: https://streamstats.usgs.gov/ss/
**gage pages:**
* SANDBURG CREEK AT ELLENVILLE NY (01366650): https://streamstatsags.cr.usgs.gov/gagepages/html/01366650.htm
* NEVERSINK RIVER AT WOODBOURNE NY (01436500): https://streamstatsags.cr.usgs.gov/gagepages/html/01436500.htm
```
gages = ['01366650', '01436500']
sites_dict = {}
# read from gage streamstats pages
for gage in gages:
site_dict = {}
url = f'https://streamstatsags.cr.usgs.gov/gagepages/html/{gage}.htm'
info = ['Mean_Annual_Flow', 'Average_BFI_value', 'Latitude (degrees NAD83)', 'Longitude (degrees NAD83)']
r = requests.get(url, stream=True)
lines = [l.decode('cp1252') for l in r.iter_lines()]
line_nums = []
for i, line in enumerate(lines):
for t in info:
if t in line:
line_nums.append(i)
line_nums.append(i+1)
for prop, val in zip(line_nums[::2], line_nums[1::2]):
p = lines[prop].replace('<td>','').replace('</td>','').replace("<tr class='even'><td width='197'>",'').replace("<tr><td width='200'>",'')
v = float(lines[val].replace('<td>','').replace('</td>','').replace("<td width='590'>",'').replace('</tr>',''))
print(f' prop: {p}')
print(f' val: {v}')
site_dict[p] = v
print('\n')
sites_dict[gage] = site_dict
df = pd.DataFrame.from_dict(sites_dict).T.reset_index(drop=False).rename(columns={'index':'site_id', 'Mean_Annual_Flow':'Mean_Annual_Flow_cfs'})
df
# write out to processed data
df.to_csv('../processed_data/NWIS_DV_STREAMSTATS_INFO.csv', index=False)
```
## 2. Get groundwater daily values
```
gw_dv = nwis.get_siteinfo('gwdv')
gw_dv
```
### Pull groundwater data for the lone GW DVs site in the model domain
uses `pydrograph`: https://github.com/aleaf/pydrograph
```
gw_site = gw_dv.iloc[0]['site_no']
# pydrograh gw dv data retrieval not working at the momnet. do it manually for now -- see below:
gw_data = nwis.get_all_dvs([gw_site],
parameter_code='72019',
start_date='2009-01-01',
end_date='2016-01-01'
)
gw_df = gw_data[gw_site]
gw_df = gw_df.groupby('site_no').mean()
gw_df = gw_df.rename(columns={'106190_72019_00003':'ave_dtw_ft'})
gw_df = gw_df.join(gw_dv[['alt_va']])
gw_df['gw_elev_ft'] = gw_df['alt_va'] - gw_df['ave_dtw_ft']
gw_df['gw_elev_m'] = gw_df['gw_elev_ft'] * 0.3048
gw_df.to_csv('../processed_data/NWIS_GW_DV_data.csv')
gw_df
```
### export NWIS gw sites table for obs section of `neversink_full.yml`
```
gw_dv_gdf = gp.GeoDataFrame(gw_dv, crs="EPSG:4269", geometry=gw_dv.geometry)
gw_dv_gdf_reproj = gw_dv_gdf.to_crs(epsg=epsg)
gw_dv_gdf_reproj['x'] = gw_dv_gdf_reproj.geometry.x
gw_dv_gdf_reproj['y'] = gw_dv_gdf_reproj.geometry.y
gw_dv_gdf_reproj['obsprefix'] = gw_dv_gdf_reproj.index
gw_dv_gdf_reproj
gw_dv_gdf_reproj.to_csv('../processed_data/NWIS_GW_DV_sites.csv', index=False)
```
|
github_jupyter
|
import sys
sys.path.append('../python_packages_static')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import fiona
from shapely.geometry import shape
from gisutils import project
from pydrograph import Nwis
import geopandas as gp
import requests
extent_shp = '../source_data/Shapefiles/Extents/Model_Extent_HUC12.shp'
epsg = 5070
extent_poly = shape(fiona.open(extent_shp).next()['geometry'])
extent_poly_ll = project(extent_poly, "+init=epsg:{}".format(epsg), "+init=epsg:4269")
extent_poly_ll.bounds
bound = gp.read_file(extent_shp)
nwis = Nwis(extent=extent_poly_ll)
all_dvs = nwis.get_siteinfo('daily_values')
all_dvs
bound = bound.to_crs(epsg=4269)
fig, ax = plt.subplots()
bound.plot(ax=ax, facecolor='None', edgecolor='black')
ax.scatter(all_dvs.dec_long_va, all_dvs.dec_lat_va)
plt.show()
gages = ['01366650', '01436500']
sites_dict = {}
# read from gage streamstats pages
for gage in gages:
site_dict = {}
url = f'https://streamstatsags.cr.usgs.gov/gagepages/html/{gage}.htm'
info = ['Mean_Annual_Flow', 'Average_BFI_value', 'Latitude (degrees NAD83)', 'Longitude (degrees NAD83)']
r = requests.get(url, stream=True)
lines = [l.decode('cp1252') for l in r.iter_lines()]
line_nums = []
for i, line in enumerate(lines):
for t in info:
if t in line:
line_nums.append(i)
line_nums.append(i+1)
for prop, val in zip(line_nums[::2], line_nums[1::2]):
p = lines[prop].replace('<td>','').replace('</td>','').replace("<tr class='even'><td width='197'>",'').replace("<tr><td width='200'>",'')
v = float(lines[val].replace('<td>','').replace('</td>','').replace("<td width='590'>",'').replace('</tr>',''))
print(f' prop: {p}')
print(f' val: {v}')
site_dict[p] = v
print('\n')
sites_dict[gage] = site_dict
df = pd.DataFrame.from_dict(sites_dict).T.reset_index(drop=False).rename(columns={'index':'site_id', 'Mean_Annual_Flow':'Mean_Annual_Flow_cfs'})
df
# write out to processed data
df.to_csv('../processed_data/NWIS_DV_STREAMSTATS_INFO.csv', index=False)
gw_dv = nwis.get_siteinfo('gwdv')
gw_dv
gw_site = gw_dv.iloc[0]['site_no']
# pydrograh gw dv data retrieval not working at the momnet. do it manually for now -- see below:
gw_data = nwis.get_all_dvs([gw_site],
parameter_code='72019',
start_date='2009-01-01',
end_date='2016-01-01'
)
gw_df = gw_data[gw_site]
gw_df = gw_df.groupby('site_no').mean()
gw_df = gw_df.rename(columns={'106190_72019_00003':'ave_dtw_ft'})
gw_df = gw_df.join(gw_dv[['alt_va']])
gw_df['gw_elev_ft'] = gw_df['alt_va'] - gw_df['ave_dtw_ft']
gw_df['gw_elev_m'] = gw_df['gw_elev_ft'] * 0.3048
gw_df.to_csv('../processed_data/NWIS_GW_DV_data.csv')
gw_df
gw_dv_gdf = gp.GeoDataFrame(gw_dv, crs="EPSG:4269", geometry=gw_dv.geometry)
gw_dv_gdf_reproj = gw_dv_gdf.to_crs(epsg=epsg)
gw_dv_gdf_reproj['x'] = gw_dv_gdf_reproj.geometry.x
gw_dv_gdf_reproj['y'] = gw_dv_gdf_reproj.geometry.y
gw_dv_gdf_reproj['obsprefix'] = gw_dv_gdf_reproj.index
gw_dv_gdf_reproj
gw_dv_gdf_reproj.to_csv('../processed_data/NWIS_GW_DV_sites.csv', index=False)
| 0.24726 | 0.808974 |
## Confidence Interval
It is useful to estimate an interval for the possible values of the parameter and put a probability on how confident we are that the true parameter value falls inside this interval
## Example
We have the data X and assume we know the population standard deviation ($\sigma$). What is confidence interval for population mean?
$P(L < \mu < U) = 1 - \alpha$
We want to obtain $L$ and $U$, with 1-$\alpha$ confidence
## From statics references
$L = \bar{x} - z_{1- \alpha/2}\frac{\sigma}{\sqrt{N}}$
$U = \bar{x} + z_{1- \alpha/2}\frac{\sigma}{\sqrt{N}}$
## Activity: Obtain the confidence interval for mean of sepal length for setosa
The dataset we will work is iris.cvs
Tasks:
1- Explore this dataset. How many features, records and plants does it have?
2- Gather all of the sepal length for Iris-setosa
3- Write a function that calculate lower and upper bound for mean of sepal length for Iris-setosa with %95 confidence.
Assume $\sigma = 0.3525$
Hint: use `scipy.stats.norm.ppf()` to calculate $z_{1- \alpha/2}$
```
import pandas as pd
import numpy as np
import scipy.stats
df = pd.read_csv('Iris.csv')
x = df[df['Species'] == 'Iris-setosa']['SepalLengthCm'].tolist()
print(np.mean(x))
def ci_z(data_sample, significant_level, sigma):
z = scipy.stats.norm.ppf(1-significant_level/2)
L = np.mean(data_sample) - z*sigma/np.sqrt(len(data_sample))
U = np.mean(data_sample) + z*sigma/np.sqrt(len(data_sample))
return L, U
def ci_t(data_sample, significant_level):
t = scipy.stats.t.ppf(1 - significant_level/2, len(data_sample) - 1)
L = np.mean(data_sample) - t * np.std(data_sample, ddof=1) / np.sqrt(len(data_sample))
U = np.mean(data_sample) + t * np.std(data_sample, ddof=1) / np.sqrt(len(data_sample))
return L, U
print(ci_z(x, 0.05, 0.3525))
print(ci_t(x,0.05))
```
## Outlier Detection
Outliers are extreme values that can skew our dataset, sometimes giving us an incorrect picture of how things actually are in our dataset. The hardest part of this is determining which data points are acceptable, and which ones constitute "outlier" status.
## Activity: find and remove outliers if our dataset is Normal
When our sample data is close to normal distribution, the samples that be outside of three standard deviation can be considered as outliers.
Task: Write a function that first find outliers for a normally distributed data, then remove them.
```
import numpy as np
def find_remove_outlier(data_sample):
# calculate summary statistics
data_mean, data_std = np.mean(data), np.std(data)
# define cut-off
cut_off = data_std * 3
lower, upper = data_mean - cut_off, data_mean + cut_off
# identify outliers
outliers = [x for x in data_sample if x < lower or x > upper]
# remove outliers
outliers_removed = [x for x in data_sample if x > lower and x < upper]
return outliers, outliers_removed
```
## Interquartile range (IQR) for finding and removing outlier when data has any distribution
Tukey suggested to calculate the range between the first quartile (25%) and third quartile (75%) in the data, called the interquartile range (IQR).
## Activity: IQR outlier detection and removal
Task: write a function to find and remove outliers based on IQR method for this data sample:
Hint:
$Q_1$ is the first quartile (25%)
$Q_3$ is the third quartile (75%)
<img src="Images/iqr.png">
`x = [norm.rvs(loc=5 , scale=1 , size=100), -5, 11, 14]`
```
import numpy as np
def find_remove_outlier_iqr(data_sample):
# calculate interquartile range
q25, q75 = np.percentile(data_sample, 25), np.percentile(data_sample, 75)
iqr = q75 - q25
# calculate the outlier cutoff
cut_off = iqr * 1.5
lower, upper = q25 - cut_off, q75 + cut_off
# identify outliers
outliers = [x for x in data_sample if x < lower or x > upper]
# remove outliers
outliers_removed = [x for x in data_sample if x > lower and x < upper]
return outliers
y = np.array([-5, 11, 14])
x = np.concatenate((scipy.stats.norm.rvs(loc=5 , scale=1 , size=100), y))
print(type(x))
print(find_remove_outlier_iqr(x))
print(scipy.stats.iqr(x))
```
## How we can visualy see the outlier?
Box plot use the IQR method to display data and outliers
```
import matplotlib.pyplot as plt
plt.boxplot(x)
plt.show()
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import scipy.stats
df = pd.read_csv('Iris.csv')
x = df[df['Species'] == 'Iris-setosa']['SepalLengthCm'].tolist()
print(np.mean(x))
def ci_z(data_sample, significant_level, sigma):
z = scipy.stats.norm.ppf(1-significant_level/2)
L = np.mean(data_sample) - z*sigma/np.sqrt(len(data_sample))
U = np.mean(data_sample) + z*sigma/np.sqrt(len(data_sample))
return L, U
def ci_t(data_sample, significant_level):
t = scipy.stats.t.ppf(1 - significant_level/2, len(data_sample) - 1)
L = np.mean(data_sample) - t * np.std(data_sample, ddof=1) / np.sqrt(len(data_sample))
U = np.mean(data_sample) + t * np.std(data_sample, ddof=1) / np.sqrt(len(data_sample))
return L, U
print(ci_z(x, 0.05, 0.3525))
print(ci_t(x,0.05))
import numpy as np
def find_remove_outlier(data_sample):
# calculate summary statistics
data_mean, data_std = np.mean(data), np.std(data)
# define cut-off
cut_off = data_std * 3
lower, upper = data_mean - cut_off, data_mean + cut_off
# identify outliers
outliers = [x for x in data_sample if x < lower or x > upper]
# remove outliers
outliers_removed = [x for x in data_sample if x > lower and x < upper]
return outliers, outliers_removed
import numpy as np
def find_remove_outlier_iqr(data_sample):
# calculate interquartile range
q25, q75 = np.percentile(data_sample, 25), np.percentile(data_sample, 75)
iqr = q75 - q25
# calculate the outlier cutoff
cut_off = iqr * 1.5
lower, upper = q25 - cut_off, q75 + cut_off
# identify outliers
outliers = [x for x in data_sample if x < lower or x > upper]
# remove outliers
outliers_removed = [x for x in data_sample if x > lower and x < upper]
return outliers
y = np.array([-5, 11, 14])
x = np.concatenate((scipy.stats.norm.rvs(loc=5 , scale=1 , size=100), y))
print(type(x))
print(find_remove_outlier_iqr(x))
print(scipy.stats.iqr(x))
import matplotlib.pyplot as plt
plt.boxplot(x)
plt.show()
| 0.509764 | 0.981257 |
# Data Exploration with Pandas
```
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
df = pd.read_csv('../data/titanic-train.csv')
type(df)
df.head()
df.info()
df.describe()
```
### Indexing
```
df.iloc[3]
df.loc[0:4,'Ticket']
df['Ticket'].head()
df[['Embarked', 'Ticket']].head()
```
### Selections
```
df[df['Age'] > 70]
df['Age'] > 70
df.query("Age > 70")
df[(df['Age'] == 11) & (df['SibSp'] == 5)]
df[(df.Age == 11) | (df.SibSp == 5)]
df.query('(Age == 11) | (SibSp == 5)')
```
### Unique Values
```
df['Embarked'].unique()
```
### Sorting
```
df.sort_values('Age', ascending = False).head()
```
### Aggregations
```
df['Survived'].value_counts()
df['Pclass'].value_counts()
df.groupby(['Pclass', 'Survived'])['PassengerId'].count()
df['Age'].min()
df['Age'].max()
df['Age'].mean()
df['Age'].median()
mean_age_by_survived = df.groupby('Survived')['Age'].mean()
mean_age_by_survived
std_age_by_survived = df.groupby('Survived')['Age'].std()
std_age_by_survived
```
### Merge
```
df1 = mean_age_by_survived.round(0).reset_index()
df2 = std_age_by_survived.round(0).reset_index()
df1
df2
df3 = pd.merge(df1, df2, on='Survived')
df3
df3.columns = ['Survived', 'Average Age', 'Age Standard Deviation']
df3
```
### Pivot Tables
```
df.pivot_table(index='Pclass',
columns='Survived',
values='PassengerId',
aggfunc='count')
```
### Correlations
```
df['IsFemale'] = df['Sex'] == 'female'
correlated_with_survived = df.corr()['Survived'].sort_values()
correlated_with_survived
%matplotlib inline
correlated_with_survived.iloc[:-1].plot(kind='bar',
title='Titanic Passengers: correlation with survival')
```
# Visual Data Exploration with Matplotlib
```
data1 = np.random.normal(0, 0.1, 1000)
data2 = np.random.normal(1, 0.4, 1000) + np.linspace(0, 1, 1000)
data3 = 2 + np.random.random(1000) * np.linspace(1, 5, 1000)
data4 = np.random.normal(3, 0.2, 1000) + 0.3 * np.sin(np.linspace(0, 20, 1000))
data = np.vstack([data1, data2, data3, data4]).transpose()
df = pd.DataFrame(data, columns=['data1', 'data2', 'data3', 'data4'])
df.head()
```
### Line Plot
```
df.plot(title='Line plot')
plt.plot(df)
plt.title('Line plot')
plt.legend(['data1', 'data2', 'data3', 'data4'])
```
### Scatter Plot
```
df.plot(style='.')
_ = df.plot(kind='scatter', x='data1', y='data2',
xlim=(-1.5, 1.5), ylim=(0, 3))
```
### Histograms
```
df.plot(kind='hist',
bins=50,
title='Histogram',
alpha=0.6)
```
### Cumulative distribution
```
df.plot(kind='hist',
bins=100,
title='Cumulative distributions',
normed=True,
cumulative=True,
alpha=0.4)
```
### Box Plot
```
df.plot(kind='box',
title='Boxplot')
```
### Subplots
```
fig, ax = plt.subplots(2, 2, figsize=(5, 5))
df.plot(ax=ax[0][0],
title='Line plot')
df.plot(ax=ax[0][1],
style='o',
title='Scatter plot')
df.plot(ax=ax[1][0],
kind='hist',
bins=50,
title='Histogram')
df.plot(ax=ax[1][1],
kind='box',
title='Boxplot')
plt.tight_layout()
```
### Pie charts
```
gt01 = df['data1'] > 0.1
piecounts = gt01.value_counts()
piecounts
piecounts.plot(kind='pie',
figsize=(5, 5),
explode=[0, 0.15],
labels=['<= 0.1', '> 0.1'],
autopct='%1.1f%%',
shadow=True,
startangle=90,
fontsize=16)
```
### Hexbin plot
```
data = np.vstack([np.random.normal((0, 0), 2, size=(1000, 2)),
np.random.normal((9, 9), 3, size=(2000, 2))])
df = pd.DataFrame(data, columns=['x', 'y'])
df.head()
df.plot()
df.plot(kind='kde')
df.plot(kind='hexbin', x='x', y='y', bins=100, cmap='rainbow')
```
# Unstructured data
### Images
```
from PIL import Image
img = Image.open('../data/iss.jpg')
img
type(img)
imgarray = np.asarray(img)
type(imgarray)
imgarray.shape
imgarray.ravel().shape
435 * 640 * 3
```
### Sound
```
from scipy.io import wavfile
rate, snd = wavfile.read(filename='../data/sms.wav')
from IPython.display import Audio
Audio(data=snd, rate=rate)
len(snd)
snd
plt.plot(snd)
_ = plt.specgram(snd, NFFT=1024, Fs=44100)
plt.ylabel('Frequency (Hz)')
plt.xlabel('Time (s)')
```
# Data Exploration Exercises
## Exercise 1
- load the dataset: `../data/international-airline-passengers.csv`
- inspect it using the `.info()` and `.head()` commands
- use the function [`pd.to_datetime()`](http://pandas.pydata.org/pandas-docs/version/0.20/generated/pandas.to_datetime.html) to change the column type of 'Month' to a datatime type
- set the index of df to be a datetime index using the column 'Month' and the `df.set_index()` method
- choose the appropriate plot and display the data
- choose appropriate scale
- label the axes
```
df = pd.read_csv('../data/international-airline-passengers.csv')
df.info()
df.head()
df['Month']=pd.to_datetime(df['Month'])
df.set_index(df['Month'], inplace=True)
df.plot(x='Month', y='Thousand Passengers')
```
## Exercise 2
- load the dataset: `../data/weight-height.csv`
- inspect it
- plot it using a scatter plot with Weight as a function of Height
- plot the male and female populations with 2 different colors on a new scatter plot
- remember to label the axes
```
df = pd.read_csv('../data/weight-height.csv')
df.info()
df.head()
# Plot
plt.scatter(x=df['Height'], y=df['Weight'])
plt.title('Scatter plot weight-height')
plt.xlabel('Height')
plt.ylabel('Weight')
plt.show()
males = df[df['Gender']=='Male']
females = df[df['Gender']=='Female']
fig, ax = plt.subplots()
males.plot(kind='scatter', x='Height', y='Weight', ax=ax, color ='blue', title = 'Male & Female populations')
females.plot(kind='scatter', x='Height', y='Weight', ax=ax, color ='red')
```
## Exercise 3
- plot the histogram of the heights for males and for females on the same plot
- use alpha to control transparency in the plot comand
- plot a vertical line at the mean of each population using `plt.axvline()`
```
males['Height'].plot(kind = 'hist',
bins=50,
title='Histogram',
alpha=0.7)
females['Height'].plot(kind = 'hist',
bins=50,
title='Histogram',
alpha=0.7)
plt.title('Population Height')
plt.legend(['Males','Females'])
plt.xlabel("Height (in)")
plt.axvline(x= males['Height'].mean(), linewidth=4, color='b')
plt.axvline(x= females['Height'].mean(), linewidth=4, color='r')
```
## Exercise 4
- plot the weights of the males and females using a box plot
- which one is easier to read?
- (remember to put in titles, axes and legends)
```
df.boxplot(by ='Gender', column =['Weight'], grid = False)
```
## Exercise 5
- load the dataset: `../data/titanic-train.csv`
- learn about scattermatrix here: http://pandas.pydata.org/pandas-docs/stable/visualization.html
- display the data using a scattermatrix
```
df = pd.read_csv('../data/titanic-train.csv')
df.head()
from pandas.tools.plotting import scatter_matrix
_ = scatter_matrix(df.drop('PassengerId', axis=1), figsize=(10, 10))
```
|
github_jupyter
|
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
df = pd.read_csv('../data/titanic-train.csv')
type(df)
df.head()
df.info()
df.describe()
df.iloc[3]
df.loc[0:4,'Ticket']
df['Ticket'].head()
df[['Embarked', 'Ticket']].head()
df[df['Age'] > 70]
df['Age'] > 70
df.query("Age > 70")
df[(df['Age'] == 11) & (df['SibSp'] == 5)]
df[(df.Age == 11) | (df.SibSp == 5)]
df.query('(Age == 11) | (SibSp == 5)')
df['Embarked'].unique()
df.sort_values('Age', ascending = False).head()
df['Survived'].value_counts()
df['Pclass'].value_counts()
df.groupby(['Pclass', 'Survived'])['PassengerId'].count()
df['Age'].min()
df['Age'].max()
df['Age'].mean()
df['Age'].median()
mean_age_by_survived = df.groupby('Survived')['Age'].mean()
mean_age_by_survived
std_age_by_survived = df.groupby('Survived')['Age'].std()
std_age_by_survived
df1 = mean_age_by_survived.round(0).reset_index()
df2 = std_age_by_survived.round(0).reset_index()
df1
df2
df3 = pd.merge(df1, df2, on='Survived')
df3
df3.columns = ['Survived', 'Average Age', 'Age Standard Deviation']
df3
df.pivot_table(index='Pclass',
columns='Survived',
values='PassengerId',
aggfunc='count')
df['IsFemale'] = df['Sex'] == 'female'
correlated_with_survived = df.corr()['Survived'].sort_values()
correlated_with_survived
%matplotlib inline
correlated_with_survived.iloc[:-1].plot(kind='bar',
title='Titanic Passengers: correlation with survival')
data1 = np.random.normal(0, 0.1, 1000)
data2 = np.random.normal(1, 0.4, 1000) + np.linspace(0, 1, 1000)
data3 = 2 + np.random.random(1000) * np.linspace(1, 5, 1000)
data4 = np.random.normal(3, 0.2, 1000) + 0.3 * np.sin(np.linspace(0, 20, 1000))
data = np.vstack([data1, data2, data3, data4]).transpose()
df = pd.DataFrame(data, columns=['data1', 'data2', 'data3', 'data4'])
df.head()
df.plot(title='Line plot')
plt.plot(df)
plt.title('Line plot')
plt.legend(['data1', 'data2', 'data3', 'data4'])
df.plot(style='.')
_ = df.plot(kind='scatter', x='data1', y='data2',
xlim=(-1.5, 1.5), ylim=(0, 3))
df.plot(kind='hist',
bins=50,
title='Histogram',
alpha=0.6)
df.plot(kind='hist',
bins=100,
title='Cumulative distributions',
normed=True,
cumulative=True,
alpha=0.4)
df.plot(kind='box',
title='Boxplot')
fig, ax = plt.subplots(2, 2, figsize=(5, 5))
df.plot(ax=ax[0][0],
title='Line plot')
df.plot(ax=ax[0][1],
style='o',
title='Scatter plot')
df.plot(ax=ax[1][0],
kind='hist',
bins=50,
title='Histogram')
df.plot(ax=ax[1][1],
kind='box',
title='Boxplot')
plt.tight_layout()
gt01 = df['data1'] > 0.1
piecounts = gt01.value_counts()
piecounts
piecounts.plot(kind='pie',
figsize=(5, 5),
explode=[0, 0.15],
labels=['<= 0.1', '> 0.1'],
autopct='%1.1f%%',
shadow=True,
startangle=90,
fontsize=16)
data = np.vstack([np.random.normal((0, 0), 2, size=(1000, 2)),
np.random.normal((9, 9), 3, size=(2000, 2))])
df = pd.DataFrame(data, columns=['x', 'y'])
df.head()
df.plot()
df.plot(kind='kde')
df.plot(kind='hexbin', x='x', y='y', bins=100, cmap='rainbow')
from PIL import Image
img = Image.open('../data/iss.jpg')
img
type(img)
imgarray = np.asarray(img)
type(imgarray)
imgarray.shape
imgarray.ravel().shape
435 * 640 * 3
from scipy.io import wavfile
rate, snd = wavfile.read(filename='../data/sms.wav')
from IPython.display import Audio
Audio(data=snd, rate=rate)
len(snd)
snd
plt.plot(snd)
_ = plt.specgram(snd, NFFT=1024, Fs=44100)
plt.ylabel('Frequency (Hz)')
plt.xlabel('Time (s)')
df = pd.read_csv('../data/international-airline-passengers.csv')
df.info()
df.head()
df['Month']=pd.to_datetime(df['Month'])
df.set_index(df['Month'], inplace=True)
df.plot(x='Month', y='Thousand Passengers')
df = pd.read_csv('../data/weight-height.csv')
df.info()
df.head()
# Plot
plt.scatter(x=df['Height'], y=df['Weight'])
plt.title('Scatter plot weight-height')
plt.xlabel('Height')
plt.ylabel('Weight')
plt.show()
males = df[df['Gender']=='Male']
females = df[df['Gender']=='Female']
fig, ax = plt.subplots()
males.plot(kind='scatter', x='Height', y='Weight', ax=ax, color ='blue', title = 'Male & Female populations')
females.plot(kind='scatter', x='Height', y='Weight', ax=ax, color ='red')
males['Height'].plot(kind = 'hist',
bins=50,
title='Histogram',
alpha=0.7)
females['Height'].plot(kind = 'hist',
bins=50,
title='Histogram',
alpha=0.7)
plt.title('Population Height')
plt.legend(['Males','Females'])
plt.xlabel("Height (in)")
plt.axvline(x= males['Height'].mean(), linewidth=4, color='b')
plt.axvline(x= females['Height'].mean(), linewidth=4, color='r')
df.boxplot(by ='Gender', column =['Weight'], grid = False)
df = pd.read_csv('../data/titanic-train.csv')
df.head()
from pandas.tools.plotting import scatter_matrix
_ = scatter_matrix(df.drop('PassengerId', axis=1), figsize=(10, 10))
| 0.464173 | 0.911416 |
## Example of using the Google Cloud Client Library for BigQuery
This is the recommended way to programmatically access BigQuery.
The API documentation is here: https://googleapis.github.io/google-cloud-python/latest/bigquery/reference.html. Because it is impossible to cover the full API, we strongly suggest that you have a browser window open to the documentation as you read through this notebook and try it out.
### Authenticate and build stubs
```
PROJECT='cloud-training-demos' # CHANGE THIS
from google.cloud import bigquery
bq = bigquery.Client(project=PROJECT)
```
## Dataset manipulation
Get info about a dataset
```
# information about the ch04 dataset in our project
dsref = bq.dataset(dataset_id="ch04", project=PROJECT)
dsinfo = bq.get_dataset(dsref)
print(dsinfo.dataset_id)
print(dsinfo.created)
```
By default, the project in the Client is used
```
# information about the ch04 dataset in our project
dsref = bq.dataset(dataset_id="ch04")
dsinfo = bq.get_dataset(dsref)
print(dsinfo.dataset_id)
print(dsinfo.created)
```
Get info about a dataset in some other project
```
from google.cloud.bigquery.dataset import DatasetReference
dsref = bq.dataset(dataset_id="london_bicycles", project='bigquery-public-data')
dsinfo = bq.get_dataset(dsref)
print('{} created on {}'.format(dsinfo.dataset_id, dsinfo.created))
```
Another way to create a dataset reference
```
from google.cloud.bigquery.dataset import DatasetReference
dsref = DatasetReference.from_string('bigquery-public-data.london_bicycles')
dsinfo = bq.get_dataset(dsref)
print('{} created on {} in {}'.format(dsinfo.dataset_id, dsinfo.created, dsinfo.location))
for access in dsinfo.access_entries:
if access.role == 'READER':
print(access)
```
Deleting a dataset
```
def delete_dataset(name, project_name=PROJECT):
from http import HTTPStatus
dsref = bq.dataset(dataset_id=name, project=project_name)
try:
bq.delete_dataset(dsref)
print('{} deleted'.format(dsref.dataset_id))
except Exception as err:
if err.code == HTTPStatus.NOT_FOUND: # 404
print('Dataset {} does not exist'.format(dsref.dataset_id))
else:
raise
delete_dataset('ch05')
```
Creating a dataset
```
from http import HTTPStatus
dsref = bq.dataset(dataset_id="ch05", project=PROJECT)
try:
ds = bq.create_dataset(dsref)
print('{} created on {}'.format(ds.dataset_id, ds.created))
except Exception as err:
if err.code == HTTPStatus.CONFLICT: # 409
print('Dataset {} already exists.'.format(dsref.dataset_id))
else:
raise
```
Creating a dataset in EU
```
delete_dataset('ch05eu') # start afresh
from http import HTTPStatus
dsref = bq.dataset(dataset_id="ch05eu", project=PROJECT)
try:
dsinfo = bigquery.Dataset(dsref)
dsinfo.location = 'EU'
ds = bq.create_dataset(dsinfo)
print('{} created on {} in {}'.format(ds.dataset_id, ds.created, ds.location))
except Exception as err:
if err.code == HTTPStatus.CONFLICT: # 409
print('Dataset {} already exists.'.format(dsref.dataset_id))
else:
raise
```
Updating a dataset
```
dsref = bq.dataset(dataset_id="ch05", project=PROJECT)
dsinfo = bq.get_dataset(dsref)
print(dsinfo.description)
dsinfo.description = "Chapter 5 of BigQuery: The Definitive Guide"
dsinfo = bq.update_dataset(dsinfo, ['description'])
print(dsinfo.description)
```
Adding access to a dataset programmatically
```
dsref = bq.dataset(dataset_id="ch05", project=PROJECT)
dsinfo = bq.get_dataset(dsref)
entry = bigquery.AccessEntry(
role="READER",
entity_type="userByEmail",
entity_id="[email protected]",
)
if entry not in dsinfo.access_entries:
entries = list(dsinfo.access_entries)
entries.append(entry)
dsinfo.access_entries = entries
dsinfo = bq.update_dataset(dsinfo, ["access_entries"]) # API request
else:
print('{} already has access'.format(entry.entity_id))
print(dsinfo.access_entries)
```
## Table manipulation
List tables in dataset
```
# list tables in dataset
dsref = DatasetReference.from_string('bigquery-public-data.london_bicycles')
tables = bq.list_tables(dsref)
for table in tables:
print(table.table_id)
```
View table properties
```
from google.cloud.bigquery.table import TableReference
tblref = TableReference.from_string('bigquery-public-data.london_bicycles.cycle_stations')
table = bq.get_table(tblref)
print('{} rows in {} (descr: {})'.format(table.num_rows, table.table_id, table.description))
for field in table.schema:
if 'count' in field.name:
print(field)
```
Deleting a table
```
def delete_table(table_name, project=PROJECT):
from http import HTTPStatus
tblref = TableReference.from_string('{}.{}'.format(project, table_name))
try:
bq.delete_table(tblref)
print('{} deleted'.format(tblref.table_id))
except Exception as err:
if err.code == HTTPStatus.NOT_FOUND: # 404
print('Table {} does not exist'.format(tblref.table_id))
else:
raise
delete_table('ch05.temp_table')
```
Creating a table
```
from http import HTTPStatus
tblref = TableReference.from_string('{}.ch05.temp_table'.format(PROJECT))
try:
table = bq.create_table(tblref)
print('{} created on {}'.format(table.table_id, table.created))
except Exception as err:
if err.code == HTTPStatus.CONFLICT: # 409
print('Table {} already exists.'.format(table.table_id))
else:
raise
```
Update table schema
```
schema = [
bigquery.SchemaField("chapter", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("title", "STRING", mode="REQUIRED"),
]
tblref = TableReference.from_string('{}.ch05.temp_table'.format(PROJECT))
table = bq.get_table(tblref)
print(table.etag)
table.schema = schema
table = bq.update_table(table, ["schema"])
print(table.schema)
print(table.etag)
```
Insert rows into table
```
rows = [
(1, u'What is BigQuery?'),
(2, u'Query essentials'),
]
print(table.table_id, table.num_rows)
errors = bq.insert_rows(table, rows)
print(errors)
table = bq.get_table(tblref)
print(table.table_id, table.num_rows) # won't be updated because streaming
## This will fail because the data type on the 2nd row is wrong
rows = [
('3', u'Operating on data types'),
('wont work', u'This will fail'),
('4', u'Loading data into BigQuery'),
]
errors = bq.insert_rows(table, rows)
print(errors)
```
Creating an empty table with schema
```
from http import HTTPStatus
schema = [
bigquery.SchemaField("chapter", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("title", "STRING", mode="REQUIRED"),
]
tblref = TableReference.from_string(
'{}.ch05.temp_table2'.format(PROJECT))
try:
table = bigquery.Table(tblref, schema)
table = bq.create_table(table)
print('{} created on {}'.format(table.table_id, table.created))
print(table.schema)
except Exception as err:
if err.code == HTTPStatus.CONFLICT: # 409
print('Table {} already exists.'.format(table.table_id))
else:
raise
# remove the two temporary tables
delete_table('ch05.temp_table')
delete_table('ch05.temp_table2')
```
Loading a Pandas data frame
```
!pip install pyarrow
import pandas as pd
data = [
(1, u'What is BigQuery?'),
(2, u'Query essentials'),
]
df = pd.DataFrame(data, columns=['chapter', 'title'])
tblref = TableReference.from_string(
'{}.ch05.temp_table3'.format(PROJECT))
job = bq.load_table_from_dataframe(df, tblref)
job.result() # blocks and waits
print("Loaded {} rows into {}".format(job.output_rows, tblref.table_id))
delete_table('ch05.temp_table3')
```
Loading from a URI
```
import time
job_config = bigquery.LoadJobConfig()
job_config.autodetect = True
job_config.source_format = bigquery.SourceFormat.CSV
job_config.null_marker = 'NULL'
uri = "gs://bigquery-oreilly-book/college_scorecard.csv"
tblref = TableReference.from_string(
'{}.ch05.college_scorecard_gcs'.format(PROJECT))
job = bq.load_table_from_uri(uri, tblref, job_config=job_config)
while not job.done():
print('.', end='', flush=True)
time.sleep(0.1)
print('Done')
table = bq.get_table(tblref)
print("Loaded {} rows into {}.".format(table.num_rows, table.table_id))
delete_table('ch05.college_scorecard_gcs')
```
Loading from a file object
```
import time
import gzip
job_config = bigquery.LoadJobConfig()
job_config.autodetect = True
job_config.source_format = bigquery.SourceFormat.CSV
job_config.null_marker = 'NULL'
tblref = TableReference.from_string(
'{}.ch05.college_scorecard_local'.format(PROJECT))
with gzip.open('../04_load/college_scorecard.csv.gz') as fp:
job = bq.load_table_from_file(fp, tblref, job_config=job_config)
while not job.done():
print('.', end='', flush=True)
time.sleep(0.1)
print('Done')
table = bq.get_table(tblref)
print("Loaded {} rows into {}.".format(table.num_rows, table.table_id))
delete_table('ch05.college_scorecard_local')
```
Copying a table
```
# copy london stations table to our dataset
source_tbl = TableReference.from_string(
'bigquery-public-data.london_bicycles.cycle_stations')
dest_tbl = TableReference.from_string(
'{}.ch05eu.cycle_stations_copy'.format(PROJECT))
job = bq.copy_table(source_tbl, dest_tbl, location='EU')
job.result() # blocks and waits
dest_table = bq.get_table(dest_tbl)
print(dest_table.num_rows)
```
Exporting from a table to Cloud Storage
```
BUCKET=PROJECT + '-eu-temp'
!gsutil mb -l EU gs://$BUCKET
source_tbl = TableReference.from_string(
'bigquery-public-data.london_bicycles.cycle_stations')
dest_uri = 'gs://{}/tmp/exported/cycle_stations'.format(BUCKET)
config = bigquery.job.ExtractJobConfig(
destination_format=bigquery.job.DestinationFormat.NEWLINE_DELIMITED_JSON)
job = bq.extract_table(source_tbl, dest_uri, location='EU', job_config=config)
job.result() # blocks and waits
!gsutil cat $dest_uri | head -5
!gsutil rm -rf gs://$BUCKET
!gsutil rb -f gs://$BUCKET
```
Browsing a table
```
tblref = TableReference.from_string(
'bigquery-public-data.london_bicycles.cycle_stations')
table = bq.get_table(tblref)
print("Total number of rows = {}".format(table.num_rows)) # 787
fields = [field for field in table.schema
if 'count' in field.name or field.name == 'id']
print("Extracting only {}".format(fields))
rows = bq.list_rows(table,
start_index=300,
max_results=5,
selected_fields=fields)
fmt = '{!s:<10} ' * len(rows.schema)
print(fmt.format(*[field.name for field in rows.schema]))
for row in rows:
print(fmt.format(*row))
```
### Query and get result
```
query = """
SELECT
start_station_name
, AVG(duration) as duration
, COUNT(duration) as num_trips
FROM `bigquery-public-data`.london_bicycles.cycle_hire
GROUP BY start_station_name
ORDER BY num_trips DESC
LIMIT 10
"""
print(query)
```
Dry run
```
config = bigquery.QueryJobConfig()
config.dry_run = True
job = bq.query(query, location='EU', job_config=config)
print("This query will process {} bytes.".format(job.total_bytes_processed))
```
Actual execution
```
# send query request
job = bq.query(query, location='EU')
fmt = '{!s:<40} {:>10d} {:>10d}'
for row in job:
fields = (row['start_station_name'],
(int)(0.5 + row['duration']),
row['num_trips'])
print(fmt.format(*fields))
```
Query result to Pandas dataframe
```
query = """
SELECT
start_station_name
, AVG(duration) as duration
, COUNT(duration) as num_trips
FROM `bigquery-public-data`.london_bicycles.cycle_hire
GROUP BY start_station_name
"""
df = bq.query(query, location='EU').to_dataframe()
print(df.describe())
```
Parameterized query to get only trips longer than some duration
```
query2 = """
SELECT
start_station_name
, COUNT(duration) as num_trips
FROM `bigquery-public-data`.london_bicycles.cycle_hire
WHERE duration >= @min_duration
GROUP BY start_station_name
ORDER BY num_trips DESC
LIMIT 10
"""
print(query2)
config = bigquery.QueryJobConfig()
config.query_parameters = [
bigquery.ScalarQueryParameter('min_duration', "INT64", 600)
]
job = bq.query(query2, location='EU', job_config=config)
fmt = '{!s:<40} {:>10d}'
for row in job:
fields = (row['start_station_name'],
row['num_trips'])
print(fmt.format(*fields))
```
Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
|
github_jupyter
|
PROJECT='cloud-training-demos' # CHANGE THIS
from google.cloud import bigquery
bq = bigquery.Client(project=PROJECT)
# information about the ch04 dataset in our project
dsref = bq.dataset(dataset_id="ch04", project=PROJECT)
dsinfo = bq.get_dataset(dsref)
print(dsinfo.dataset_id)
print(dsinfo.created)
# information about the ch04 dataset in our project
dsref = bq.dataset(dataset_id="ch04")
dsinfo = bq.get_dataset(dsref)
print(dsinfo.dataset_id)
print(dsinfo.created)
from google.cloud.bigquery.dataset import DatasetReference
dsref = bq.dataset(dataset_id="london_bicycles", project='bigquery-public-data')
dsinfo = bq.get_dataset(dsref)
print('{} created on {}'.format(dsinfo.dataset_id, dsinfo.created))
from google.cloud.bigquery.dataset import DatasetReference
dsref = DatasetReference.from_string('bigquery-public-data.london_bicycles')
dsinfo = bq.get_dataset(dsref)
print('{} created on {} in {}'.format(dsinfo.dataset_id, dsinfo.created, dsinfo.location))
for access in dsinfo.access_entries:
if access.role == 'READER':
print(access)
def delete_dataset(name, project_name=PROJECT):
from http import HTTPStatus
dsref = bq.dataset(dataset_id=name, project=project_name)
try:
bq.delete_dataset(dsref)
print('{} deleted'.format(dsref.dataset_id))
except Exception as err:
if err.code == HTTPStatus.NOT_FOUND: # 404
print('Dataset {} does not exist'.format(dsref.dataset_id))
else:
raise
delete_dataset('ch05')
from http import HTTPStatus
dsref = bq.dataset(dataset_id="ch05", project=PROJECT)
try:
ds = bq.create_dataset(dsref)
print('{} created on {}'.format(ds.dataset_id, ds.created))
except Exception as err:
if err.code == HTTPStatus.CONFLICT: # 409
print('Dataset {} already exists.'.format(dsref.dataset_id))
else:
raise
delete_dataset('ch05eu') # start afresh
from http import HTTPStatus
dsref = bq.dataset(dataset_id="ch05eu", project=PROJECT)
try:
dsinfo = bigquery.Dataset(dsref)
dsinfo.location = 'EU'
ds = bq.create_dataset(dsinfo)
print('{} created on {} in {}'.format(ds.dataset_id, ds.created, ds.location))
except Exception as err:
if err.code == HTTPStatus.CONFLICT: # 409
print('Dataset {} already exists.'.format(dsref.dataset_id))
else:
raise
dsref = bq.dataset(dataset_id="ch05", project=PROJECT)
dsinfo = bq.get_dataset(dsref)
print(dsinfo.description)
dsinfo.description = "Chapter 5 of BigQuery: The Definitive Guide"
dsinfo = bq.update_dataset(dsinfo, ['description'])
print(dsinfo.description)
dsref = bq.dataset(dataset_id="ch05", project=PROJECT)
dsinfo = bq.get_dataset(dsref)
entry = bigquery.AccessEntry(
role="READER",
entity_type="userByEmail",
entity_id="[email protected]",
)
if entry not in dsinfo.access_entries:
entries = list(dsinfo.access_entries)
entries.append(entry)
dsinfo.access_entries = entries
dsinfo = bq.update_dataset(dsinfo, ["access_entries"]) # API request
else:
print('{} already has access'.format(entry.entity_id))
print(dsinfo.access_entries)
# list tables in dataset
dsref = DatasetReference.from_string('bigquery-public-data.london_bicycles')
tables = bq.list_tables(dsref)
for table in tables:
print(table.table_id)
from google.cloud.bigquery.table import TableReference
tblref = TableReference.from_string('bigquery-public-data.london_bicycles.cycle_stations')
table = bq.get_table(tblref)
print('{} rows in {} (descr: {})'.format(table.num_rows, table.table_id, table.description))
for field in table.schema:
if 'count' in field.name:
print(field)
def delete_table(table_name, project=PROJECT):
from http import HTTPStatus
tblref = TableReference.from_string('{}.{}'.format(project, table_name))
try:
bq.delete_table(tblref)
print('{} deleted'.format(tblref.table_id))
except Exception as err:
if err.code == HTTPStatus.NOT_FOUND: # 404
print('Table {} does not exist'.format(tblref.table_id))
else:
raise
delete_table('ch05.temp_table')
from http import HTTPStatus
tblref = TableReference.from_string('{}.ch05.temp_table'.format(PROJECT))
try:
table = bq.create_table(tblref)
print('{} created on {}'.format(table.table_id, table.created))
except Exception as err:
if err.code == HTTPStatus.CONFLICT: # 409
print('Table {} already exists.'.format(table.table_id))
else:
raise
schema = [
bigquery.SchemaField("chapter", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("title", "STRING", mode="REQUIRED"),
]
tblref = TableReference.from_string('{}.ch05.temp_table'.format(PROJECT))
table = bq.get_table(tblref)
print(table.etag)
table.schema = schema
table = bq.update_table(table, ["schema"])
print(table.schema)
print(table.etag)
rows = [
(1, u'What is BigQuery?'),
(2, u'Query essentials'),
]
print(table.table_id, table.num_rows)
errors = bq.insert_rows(table, rows)
print(errors)
table = bq.get_table(tblref)
print(table.table_id, table.num_rows) # won't be updated because streaming
## This will fail because the data type on the 2nd row is wrong
rows = [
('3', u'Operating on data types'),
('wont work', u'This will fail'),
('4', u'Loading data into BigQuery'),
]
errors = bq.insert_rows(table, rows)
print(errors)
from http import HTTPStatus
schema = [
bigquery.SchemaField("chapter", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("title", "STRING", mode="REQUIRED"),
]
tblref = TableReference.from_string(
'{}.ch05.temp_table2'.format(PROJECT))
try:
table = bigquery.Table(tblref, schema)
table = bq.create_table(table)
print('{} created on {}'.format(table.table_id, table.created))
print(table.schema)
except Exception as err:
if err.code == HTTPStatus.CONFLICT: # 409
print('Table {} already exists.'.format(table.table_id))
else:
raise
# remove the two temporary tables
delete_table('ch05.temp_table')
delete_table('ch05.temp_table2')
!pip install pyarrow
import pandas as pd
data = [
(1, u'What is BigQuery?'),
(2, u'Query essentials'),
]
df = pd.DataFrame(data, columns=['chapter', 'title'])
tblref = TableReference.from_string(
'{}.ch05.temp_table3'.format(PROJECT))
job = bq.load_table_from_dataframe(df, tblref)
job.result() # blocks and waits
print("Loaded {} rows into {}".format(job.output_rows, tblref.table_id))
delete_table('ch05.temp_table3')
import time
job_config = bigquery.LoadJobConfig()
job_config.autodetect = True
job_config.source_format = bigquery.SourceFormat.CSV
job_config.null_marker = 'NULL'
uri = "gs://bigquery-oreilly-book/college_scorecard.csv"
tblref = TableReference.from_string(
'{}.ch05.college_scorecard_gcs'.format(PROJECT))
job = bq.load_table_from_uri(uri, tblref, job_config=job_config)
while not job.done():
print('.', end='', flush=True)
time.sleep(0.1)
print('Done')
table = bq.get_table(tblref)
print("Loaded {} rows into {}.".format(table.num_rows, table.table_id))
delete_table('ch05.college_scorecard_gcs')
import time
import gzip
job_config = bigquery.LoadJobConfig()
job_config.autodetect = True
job_config.source_format = bigquery.SourceFormat.CSV
job_config.null_marker = 'NULL'
tblref = TableReference.from_string(
'{}.ch05.college_scorecard_local'.format(PROJECT))
with gzip.open('../04_load/college_scorecard.csv.gz') as fp:
job = bq.load_table_from_file(fp, tblref, job_config=job_config)
while not job.done():
print('.', end='', flush=True)
time.sleep(0.1)
print('Done')
table = bq.get_table(tblref)
print("Loaded {} rows into {}.".format(table.num_rows, table.table_id))
delete_table('ch05.college_scorecard_local')
# copy london stations table to our dataset
source_tbl = TableReference.from_string(
'bigquery-public-data.london_bicycles.cycle_stations')
dest_tbl = TableReference.from_string(
'{}.ch05eu.cycle_stations_copy'.format(PROJECT))
job = bq.copy_table(source_tbl, dest_tbl, location='EU')
job.result() # blocks and waits
dest_table = bq.get_table(dest_tbl)
print(dest_table.num_rows)
BUCKET=PROJECT + '-eu-temp'
!gsutil mb -l EU gs://$BUCKET
source_tbl = TableReference.from_string(
'bigquery-public-data.london_bicycles.cycle_stations')
dest_uri = 'gs://{}/tmp/exported/cycle_stations'.format(BUCKET)
config = bigquery.job.ExtractJobConfig(
destination_format=bigquery.job.DestinationFormat.NEWLINE_DELIMITED_JSON)
job = bq.extract_table(source_tbl, dest_uri, location='EU', job_config=config)
job.result() # blocks and waits
!gsutil cat $dest_uri | head -5
!gsutil rm -rf gs://$BUCKET
!gsutil rb -f gs://$BUCKET
tblref = TableReference.from_string(
'bigquery-public-data.london_bicycles.cycle_stations')
table = bq.get_table(tblref)
print("Total number of rows = {}".format(table.num_rows)) # 787
fields = [field for field in table.schema
if 'count' in field.name or field.name == 'id']
print("Extracting only {}".format(fields))
rows = bq.list_rows(table,
start_index=300,
max_results=5,
selected_fields=fields)
fmt = '{!s:<10} ' * len(rows.schema)
print(fmt.format(*[field.name for field in rows.schema]))
for row in rows:
print(fmt.format(*row))
query = """
SELECT
start_station_name
, AVG(duration) as duration
, COUNT(duration) as num_trips
FROM `bigquery-public-data`.london_bicycles.cycle_hire
GROUP BY start_station_name
ORDER BY num_trips DESC
LIMIT 10
"""
print(query)
config = bigquery.QueryJobConfig()
config.dry_run = True
job = bq.query(query, location='EU', job_config=config)
print("This query will process {} bytes.".format(job.total_bytes_processed))
# send query request
job = bq.query(query, location='EU')
fmt = '{!s:<40} {:>10d} {:>10d}'
for row in job:
fields = (row['start_station_name'],
(int)(0.5 + row['duration']),
row['num_trips'])
print(fmt.format(*fields))
query = """
SELECT
start_station_name
, AVG(duration) as duration
, COUNT(duration) as num_trips
FROM `bigquery-public-data`.london_bicycles.cycle_hire
GROUP BY start_station_name
"""
df = bq.query(query, location='EU').to_dataframe()
print(df.describe())
query2 = """
SELECT
start_station_name
, COUNT(duration) as num_trips
FROM `bigquery-public-data`.london_bicycles.cycle_hire
WHERE duration >= @min_duration
GROUP BY start_station_name
ORDER BY num_trips DESC
LIMIT 10
"""
print(query2)
config = bigquery.QueryJobConfig()
config.query_parameters = [
bigquery.ScalarQueryParameter('min_duration', "INT64", 600)
]
job = bq.query(query2, location='EU', job_config=config)
fmt = '{!s:<40} {:>10d}'
for row in job:
fields = (row['start_station_name'],
row['num_trips'])
print(fmt.format(*fields))
| 0.202838 | 0.805709 |
<a href="https://colab.research.google.com/github/eda-ricercatore/gulyas-scripts/blob/master/sandbox/python/google-colab/elastic-net/spare/l1_l2_combined.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
# Preamble... Setting up the environment/workflow.
# Importing Python packages from Python Libraries
import matplotlib.pyplot as plt
import numpy as np
"""
Set up the 2-D space for graph plotting.
Can be extended to 3 dimensions, or "n" dimensions.
"""
w1 = np.linspace(-1.0, 1.0, 100)
w2 = np.linspace(-1.0, 1.0, 100)
W_1, W_2 = np.meshgrid(w1,w2)
```
# Equation of a circle
The [equation of a circle](https://www.mathsisfun.com/algebra/circle-equations.html) is described as:
$(x−a)^2 + (y−b)^2 = r$,
where $(a,b)$ refers to the location of the center of the circle, $r$ refers the radius of the circle, and $x$ and $y$ are the $x-y$ dimensions of the 2-D plane.
# Equation Representing $L^2$-norm
The [equation representing $L^2$-norm](https://mathworld.wolfram.com/L2-Norm.html) is:
$\| x \|_2 = \sqrt{\displaystyle\sum_{i = 1}^{n} |x_{i}|^{2}}$
Also, see https://en.wikipedia.org/wiki/Lp_space.
Hence, in the 2-D space, we can represent the $L^2$-norm with the equation of a circle.
## Derivation of the Mathematical Equation Representing $L^2$-norm
$(x−a)^2 + (y−b)^2 = r$
becomes $x^2 + (y)^2 = r$, for a = 0 and b = 0 to center the circle
at the origin.
Rearranging the equation, we get $x^2 + y^2 - r = 0$
Process this equation as a function in *Python*, $L2 = x^2 + y^2 - r$.
```
# Constant representing the radius of the (w_1, w_2) circle.
t = 1
# Equation representing L2 regularization.
L2 = W_1**2 + W_2**2 - t
print("Plotting L2 regularization")
plt.contour(W_1,W_2,L2,[0])
plt.show()
```
# Equation Representing a Rhombus, Kite, Lozenge, Rectangle, or a Square
From https://math.stackexchange.com/questions/69099/equation-of-a-rectangle, we have:
$\left(\frac{x}{a} \right)^2 + \left(\frac{y}{b} \right)^2 = c$,
where (𝑎,𝑏) refers to the location of the center of the square, 𝑟 refers the radius of the circle, and 𝑥 and 𝑦 are the 𝑥−𝑦 dimensions of the 2-D plane.
becomes $\left(\frac{x}{a} \right)^2 + \left(\frac{y}{b} \right)^2 = c$
Resources for equations representing rectangles, rhombuses, kites, lozenges, rectangles, and squares:
+ https://math.stackexchange.com/questions/69099/equation-of-a-rectangle
```
# Equation representing L1 regularization.
L1 = abs(W_1) + abs(W_2) - t
print("Plotting L1 regularization")
plt.contour(W_1,W_2,L1,[0])
plt.show()
```
# Resources for Visualizing $L_1$ and $L_2$ Regularization
[Resource with *Python* source code](https://xavierbourretsicotte.github.io/ridge_lasso_visual.html)
Other resources:
+ https://medium.com/analytics-vidhya/effects-of-l1-and-l2-regularization-explained-5a916ecf4f06
+ https://towardsdatascience.com/intuitions-on-l1-and-l2-regularisation-235f2db4c261
+ https://www.cs.mcgill.ca/~dprecup/courses/ML/Lectures/ml-lecture02.pdf
+ https://www.coursera.org/lecture/ml-classification/visualizing-effect-of-l2-regularization-in-logistic-regression-1VXLD
+ https://davidrosenberg.github.io/ml2015/docs/2b.L1L2-regularization.pdf
+ https://xavierbourretsicotte.github.io/ridge_lasso_visual.html
# Resources for elastic net
+ https://hackernoon.com/an-introduction-to-ridge-lasso-and-elastic-net-regression-cca60b4b934f
+ https://www.machinecurve.com/index.php/2020/01/21/what-are-l1-l2-and-elastic-net-regularization-in-neural-networks/
+ https://towardsdatascience.com/regularization-for-machine-learning-models-9173c2e90449
+ http://enhancedatascience.com/2017/07/04/machine-learning-explained-regularization/
+ https://www.analyticsvidhya.com/blog/2016/01/ridge-lasso-regression-python-complete-tutorial/
+ http://laid.delanover.com/difference-between-l1-and-l2-regularization-implementation-and-visualization-in-tensorflow/
+ https://github.com/topics/elastic-net
+ https://cran.r-project.org/web/packages/penalized/vignettes/penalized.pdf
```
# Weights for L1 regularization.
alpha = [0.1*x for x in range(10)]
# Weights for L2 regularization.
beta = [1 - y for y in alpha]
"""
Print the values for the weights of L1 & L2 regularization
to check their values.
"""
print("alpha is:",alpha,"=")
print("beta is:",beta,"=")
"""
Determine the current combination of weights,
or ratio of alpha and beta, for the elastic net to
combine L1 and L2 norm.
"""
for index, (alpha_i,beta_i) in enumerate(zip(alpha,beta)):
print("")
"""
Equation for elastic net, which combines L1 regularization
with L2 regularization as a linear combination of these
regularization methods.
"""
elastic_net = alpha_i*L1 + beta_i*L2
print("For alpha =", alpha_i, "and beta =", beta_i,".")
print("Plotting elastic net combination of L1 & L2 regularization.")
plt.contour(W_1,W_2,elastic_net,[0])
plt.show()
```
# Future Work
File path: sandbox/python/google-colab/elastic-net/l1_l2_combined.ipynb
Use [Bokeh](https://docs.bokeh.org/en/latest/index.html) to allow people to use a sliding window to change the ratio of the weights for L1 and L2 normalization.
# Resources to Read
Resources and publications to read (and examine) as I look up information about L-$\infty$ regularization:
+ https://towardsdatascience.com/different-types-of-regularization-on-neuronal-network-with-pytorch-a9d6faf4793e
+ https://en.wikipedia.org/wiki/Elastic_net_regularization
+ https://medium.com/@dataakkadian/what-is-regularization-in-machine-learning-93c3516e6bd9
+ https://statisticaloddsandends.wordpress.com/2018/07/17/different-types-of-regularization/
+ https://www.deeplearningbook.org/contents/regularization.html
Resources and publications to read (and examine) as I look up information about types of regularization:
+ https://www.stat.berkeley.edu/~binyu/ps/test06.pdf
# Help with Using LaTeX with Google Colab
+ https://colab.research.google.com/notebooks/basic_features_overview.ipynb
```
# Importing Python packages from Python Libraries
import matplotlib.pyplot as plt
import numpy as np
w1 = np.linspace(-1.0, 1.0, 100)
w2 = np.linspace(-1.0, 1.0, 100)
W_1, W_2 = np.meshgrid(w1,w2)
# Constant representing the length of the kite.
t = 1
# Equation to help me understand visualization of L1 regularization.
L1_w1 = abs(W_1) - t
print("Plotting L1 regularization")
plt.contour(W_1,W_2,L1_w1,[0])
plt.show()
# Equation to help me understand visualization of L1 regularization.
L1_w2 = abs(W_2) - t
print("Plotting L1 regularization")
plt.contour(W_1,W_2,L1_w2,[0])
plt.show()
# Equation to help me understand visualization of L1 regularization.
L1_w1 = abs(W_1)
print("Plotting L1 regularization")
plt.contour(W_1,W_2,L1_w1,[0])
plt.show()
# Equation to help me understand visualization of L1 regularization.
L1 = abs(W_1) + abs(W_2)
print("Plotting L1 regularization")
plt.contour(W_1,W_2,L1,[0])
plt.show()
```
# Adding \LaTeX notation
$\infty$
```
# Importing Python packages from Python Libraries
import matplotlib.pyplot as plt
import numpy as np
"""
w1 = np.linspace(-1.0, 1.0, 100)
w2 = np.linspace(-1.0, 1.0, 100)
"""
w1 = np.linspace(-25.0, 25.0, 100)
w2 = np.linspace(-25.0, 25.0, 100)
W_1, W_2 = np.meshgrid(w1,w2)
# Setting up constants for the rectangle
breadth = 2
length = 3
constant = 7
"""
References for equations representing a rectangle:
https://math.stackexchange.com/questions/69099/equation-of-a-rectangle
"""
# Equation representing a rectangle, using absolute values.
rect1 = abs(W_1/breadth + W_2/length) + abs(W_1/breadth - W_2/length) - constant
print("Plotting a rectangle, using absolute values.")
plt.contour(W_1,W_2,rect1,[0])
plt.show()
# Equation representing a rectangle, using squares.
rect2 = pow(W_1/breadth,2) + pow(W_2/length,2) - constant
#rect2 = (W_1/breadth)**2 + (W_2/length)**2 - constant
"""
The following statement is in 1-D, not 2-D.
Hence, it cannot be plotted with plt.contour(...) in 2-D.
rect2a = pow(w1/breadth,2) + pow(w2/length,2) - constant
"""
rect2a = pow(w1/breadth,2) + pow(w2/length,2) - constant
print("Plotting a rectangle, using pow().")
plt.contour(W_1,W_2,rect2,[0])
#plt.contour(w1,w2,rect2a,[0])
plt.show()
# Equation representing a rhombus, using absolute values.
rhombus = abs(W_1/breadth) + abs(W_2/length) - constant
print("Plotting a rhombus, using absolute values.")
plt.contour(W_1,W_2,rhombus,[0])
plt.show()
# Set up the 2-D grid for plotting.
w1 = np.linspace(-25.0, 25.0, 100)
w2 = np.linspace(-25.0, 25.0, 100)
W_1, W_2 = np.meshgrid(w1,w2)
# Set up the constants of the shapre/figure.
breadth = 2
length = 3
constant = 7
# Equation representing a rhombus, using absolute values.
rhombus = abs(W_1/breadth) + abs(W_2/length) - constant
print("Plotting a rhombus, using absolute values.")
plt.contour(W_1,W_2,rhombus,[0])
plt.show()
"""
w1 = np.linspace(-2.5, 5.0, 100)
w2 = np.linspace(-1.5, 5.0, 100)
"""
"""
w1 = np.linspace(15.0, 25.0, 100)
w2 = np.linspace(12.0, 20.0, 100)
"""
"""
w1 = np.linspace(-50.0, 50.0, 100)
w2 = np.linspace(-50.0, 50.0, 100)
"""
w1 = np.linspace(33.0, 46.0, 100)
w2 = np.linspace(40.0, 60.0, 100)
W_1, W_2 = np.meshgrid(w1,w2)
breadth = 2
length = 3
constant = 7
a = 40
b = 50
"""
Equation representing a rectangle, using squares.
Center the circle at (a,b)
"""
#rect2 = pow(W_1/breadth,2) + pow(W_2/length,2) - constant
rect2 = ((W_1-a)/breadth)**2 + ((W_2-b)/length)**2 - constant
print("Plotting a rectangle, using absolute values.")
plt.contour(W_1,W_2,rect2,[0])
#plt.contour(w1,w2,rect2a,[0])
plt.show()
"""
w1 = np.linspace(-2.5, 5.0, 100)
w2 = np.linspace(-1.5, 5.0, 100)
"""
w1 = np.linspace(-35.0, 35.0, 100)
w2 = np.linspace(-10.0, 10.0, 100)
W_1, W_2 = np.meshgrid(w1,w2)
breadth = 12
length = 3
constant = 7
# Equation representing a rectangle, using squares.
#rect2 = pow(W_1/breadth,2) + pow(W_2/length,2) - constant
#rect2 = (W_1-40/breadth)**2 + (W_2-50/length)**2 - constant
rect2 = (W_1/breadth)**2 + (W_2/length)**2 - constant
print("Plotting a rectangle, using absolute values.")
plt.contour(W_1,W_2,rect2,[0])
#plt.contour(w1,w2,rect2a,[0])
plt.show()
```
# Location of Jupyter Notebook
It is located at: `sandbox/python/google-colab/elastic-net/l1_l2_combined.ipynb`
Or, see the following:
sandbox/python/google-colab/elastic-net/l1_l2_combined.ipynb
# Author Information
The MIT License (MIT)
Copyright (c) <2020> Zhiyang Ong
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Email address: `echo "cukj -wb- 23wU4X5M589 TROJANS cqkH wiuz2y 0f Mw Stanford" | awk '{ sub("23wU4X5M589","F.d_c_b. ") sub("Stanford","d0mA1n"); print $5, $2, $8; for (i=1; i<=1; i++) print "6\b"; print $9, $7, $6 }' | sed y/kqcbuHwM62z/gnotrzadqmC/ | tr 'q' ' ' | tr -d [:cntrl:] | tr -d 'ir' | tr y "\n" Che cosa significa?`
Or, you can copy and past the following into the command line of the "Terminal" application of a *UNIX*-like operating system.
echo "cukj -wb- 23wU4X5M589 TROJANS cqkH wiuz2y 0f Mw Stanford" | awk '{ sub("23wU4X5M589","F.d_c_b. ") sub("Stanford","d0mA1n"); print $5, $2, $8; for (i=1; i<=1; i++) print "6\b"; print $9, $7, $6 }' | sed y/kqcbuHwM62z/gnotrzadqmC/ | tr 'q' ' ' | tr -d [:cntrl:] | tr -d 'ir' | tr y "\n" Che cosa significa?
|
github_jupyter
|
# Preamble... Setting up the environment/workflow.
# Importing Python packages from Python Libraries
import matplotlib.pyplot as plt
import numpy as np
"""
Set up the 2-D space for graph plotting.
Can be extended to 3 dimensions, or "n" dimensions.
"""
w1 = np.linspace(-1.0, 1.0, 100)
w2 = np.linspace(-1.0, 1.0, 100)
W_1, W_2 = np.meshgrid(w1,w2)
# Constant representing the radius of the (w_1, w_2) circle.
t = 1
# Equation representing L2 regularization.
L2 = W_1**2 + W_2**2 - t
print("Plotting L2 regularization")
plt.contour(W_1,W_2,L2,[0])
plt.show()
# Equation representing L1 regularization.
L1 = abs(W_1) + abs(W_2) - t
print("Plotting L1 regularization")
plt.contour(W_1,W_2,L1,[0])
plt.show()
# Weights for L1 regularization.
alpha = [0.1*x for x in range(10)]
# Weights for L2 regularization.
beta = [1 - y for y in alpha]
"""
Print the values for the weights of L1 & L2 regularization
to check their values.
"""
print("alpha is:",alpha,"=")
print("beta is:",beta,"=")
"""
Determine the current combination of weights,
or ratio of alpha and beta, for the elastic net to
combine L1 and L2 norm.
"""
for index, (alpha_i,beta_i) in enumerate(zip(alpha,beta)):
print("")
"""
Equation for elastic net, which combines L1 regularization
with L2 regularization as a linear combination of these
regularization methods.
"""
elastic_net = alpha_i*L1 + beta_i*L2
print("For alpha =", alpha_i, "and beta =", beta_i,".")
print("Plotting elastic net combination of L1 & L2 regularization.")
plt.contour(W_1,W_2,elastic_net,[0])
plt.show()
# Importing Python packages from Python Libraries
import matplotlib.pyplot as plt
import numpy as np
w1 = np.linspace(-1.0, 1.0, 100)
w2 = np.linspace(-1.0, 1.0, 100)
W_1, W_2 = np.meshgrid(w1,w2)
# Constant representing the length of the kite.
t = 1
# Equation to help me understand visualization of L1 regularization.
L1_w1 = abs(W_1) - t
print("Plotting L1 regularization")
plt.contour(W_1,W_2,L1_w1,[0])
plt.show()
# Equation to help me understand visualization of L1 regularization.
L1_w2 = abs(W_2) - t
print("Plotting L1 regularization")
plt.contour(W_1,W_2,L1_w2,[0])
plt.show()
# Equation to help me understand visualization of L1 regularization.
L1_w1 = abs(W_1)
print("Plotting L1 regularization")
plt.contour(W_1,W_2,L1_w1,[0])
plt.show()
# Equation to help me understand visualization of L1 regularization.
L1 = abs(W_1) + abs(W_2)
print("Plotting L1 regularization")
plt.contour(W_1,W_2,L1,[0])
plt.show()
# Importing Python packages from Python Libraries
import matplotlib.pyplot as plt
import numpy as np
"""
w1 = np.linspace(-1.0, 1.0, 100)
w2 = np.linspace(-1.0, 1.0, 100)
"""
w1 = np.linspace(-25.0, 25.0, 100)
w2 = np.linspace(-25.0, 25.0, 100)
W_1, W_2 = np.meshgrid(w1,w2)
# Setting up constants for the rectangle
breadth = 2
length = 3
constant = 7
"""
References for equations representing a rectangle:
https://math.stackexchange.com/questions/69099/equation-of-a-rectangle
"""
# Equation representing a rectangle, using absolute values.
rect1 = abs(W_1/breadth + W_2/length) + abs(W_1/breadth - W_2/length) - constant
print("Plotting a rectangle, using absolute values.")
plt.contour(W_1,W_2,rect1,[0])
plt.show()
# Equation representing a rectangle, using squares.
rect2 = pow(W_1/breadth,2) + pow(W_2/length,2) - constant
#rect2 = (W_1/breadth)**2 + (W_2/length)**2 - constant
"""
The following statement is in 1-D, not 2-D.
Hence, it cannot be plotted with plt.contour(...) in 2-D.
rect2a = pow(w1/breadth,2) + pow(w2/length,2) - constant
"""
rect2a = pow(w1/breadth,2) + pow(w2/length,2) - constant
print("Plotting a rectangle, using pow().")
plt.contour(W_1,W_2,rect2,[0])
#plt.contour(w1,w2,rect2a,[0])
plt.show()
# Equation representing a rhombus, using absolute values.
rhombus = abs(W_1/breadth) + abs(W_2/length) - constant
print("Plotting a rhombus, using absolute values.")
plt.contour(W_1,W_2,rhombus,[0])
plt.show()
# Set up the 2-D grid for plotting.
w1 = np.linspace(-25.0, 25.0, 100)
w2 = np.linspace(-25.0, 25.0, 100)
W_1, W_2 = np.meshgrid(w1,w2)
# Set up the constants of the shapre/figure.
breadth = 2
length = 3
constant = 7
# Equation representing a rhombus, using absolute values.
rhombus = abs(W_1/breadth) + abs(W_2/length) - constant
print("Plotting a rhombus, using absolute values.")
plt.contour(W_1,W_2,rhombus,[0])
plt.show()
"""
w1 = np.linspace(-2.5, 5.0, 100)
w2 = np.linspace(-1.5, 5.0, 100)
"""
"""
w1 = np.linspace(15.0, 25.0, 100)
w2 = np.linspace(12.0, 20.0, 100)
"""
"""
w1 = np.linspace(-50.0, 50.0, 100)
w2 = np.linspace(-50.0, 50.0, 100)
"""
w1 = np.linspace(33.0, 46.0, 100)
w2 = np.linspace(40.0, 60.0, 100)
W_1, W_2 = np.meshgrid(w1,w2)
breadth = 2
length = 3
constant = 7
a = 40
b = 50
"""
Equation representing a rectangle, using squares.
Center the circle at (a,b)
"""
#rect2 = pow(W_1/breadth,2) + pow(W_2/length,2) - constant
rect2 = ((W_1-a)/breadth)**2 + ((W_2-b)/length)**2 - constant
print("Plotting a rectangle, using absolute values.")
plt.contour(W_1,W_2,rect2,[0])
#plt.contour(w1,w2,rect2a,[0])
plt.show()
"""
w1 = np.linspace(-2.5, 5.0, 100)
w2 = np.linspace(-1.5, 5.0, 100)
"""
w1 = np.linspace(-35.0, 35.0, 100)
w2 = np.linspace(-10.0, 10.0, 100)
W_1, W_2 = np.meshgrid(w1,w2)
breadth = 12
length = 3
constant = 7
# Equation representing a rectangle, using squares.
#rect2 = pow(W_1/breadth,2) + pow(W_2/length,2) - constant
#rect2 = (W_1-40/breadth)**2 + (W_2-50/length)**2 - constant
rect2 = (W_1/breadth)**2 + (W_2/length)**2 - constant
print("Plotting a rectangle, using absolute values.")
plt.contour(W_1,W_2,rect2,[0])
#plt.contour(w1,w2,rect2a,[0])
plt.show()
| 0.728265 | 0.979668 |
# Getting Into Shape: Array Shapes and Axes
Now that you’ve seen some of what NumPy can do, it’s time to firm up that foundation with some important theory. There are a few concepts that are important to keep in mind, especially as you work with arrays in higher dimensions.
**Vectors**, which are one-dimensional arrays of numbers, are the least complicated to keep track of. Two dimensions aren’t too bad, either, because they’re similar to spreadsheets. But things start to get tricky at three dimensions, and visualizing four? Forget about it.
## Mastering Shape
Shape is a key concept when you’re using multidimensional arrays. At a certain point, it’s easier to forget about visualizing the shape of your data and to instead follow some mental rules and trust NumPy to tell you the correct shape.
All arrays have a property called `.shape` that returns a tuple of the size in each dimension. It’s less important which dimension is which, but it’s critical that the arrays you pass to functions are in the shape that the functions expect. A common way to confirm that your data has the proper shape is to print the data and its shape until you’re sure everything is working like you expect.
This next example will show this process. You’ll create an array with a complex shape, check it, and reorder it to look like it’s supposed to:
```
import numpy as np
temperatures = np.array([
29.3, 42.1, 18.8, 16.1, 38.0, 12.5,
12.6, 49.9, 38.6, 31.3, 9.2, 22.2
]).reshape(2, 2, 3)
temperatures.shape
temperatures
np.swapaxes(temperatures, 1, 2)
```
Here, you use a numpy.ndarray method called `.reshape()` to form a `2 × 2 × 3` block of data. When you check the shape of your array in input 3, it’s exactly what you told it to be. However, you can see how printed arrays quickly become hard to visualize in three or more dimensions. After you swap axes with `.swapaxes()`, it becomes little clearer which dimension is which. You’ll see more about axes in the next section.
Shape will come up again in the section on broadcasting. For now, just keep in mind that these little checks don’t cost anything. You can always delete the cells or get rid of the code once things are running smoothly.
## Understanding Axes
The example above shows how important it is to know not only what shape your data is in but also which data is in which **axis**. In NumPy arrays, axes are zero-indexed and identify which dimension is which. For example, a two-dimensional array has a vertical axis (axis 0) and a horizontal axis (axis 1). Lots of functions and commands in NumPy change their behavior based on which axis you tell them to process.
This example will show how `.max()` behaves by default, with no `axis` argument, and how it changes functionality depending on which axis you specify when you do supply an argument:
```
import numpy as np
table = np.array([
[5, 3, 7, 1, 2],
[2, 6, 7 , 9 ,3],
[1, 1, 1, 1, 8],
[4, 3, 2, 0, 10],
])
table
table.flatten()
table.shape
table.max()
table.max(axis=0)
table.max(axis=1)
```
By default, `.max()` returns the largest value in the entire array, no matter how many dimensions there are. However, once you specify an axis, it performs that calculation for each set of values along that particular axis. For example, with an argument of `axis=0`, `.max() `selects the maximum value in each of the four vertical sets of values in table and returns an array that has been **flattened**, or aggregated into a one-dimensional array.
In fact, many of NumPy’s functions behave this way: If no axis is specified, then they perform an operation on the entire dataset. Otherwise, they perform the operation in an **axis-wise** fashion.
### Reshaping Arrays
Another important concept here is to reshape your NumPy Arrays, especially when you are dealing with multidimensional arrays. It's common for you to create a NumPy Array in just one dimension, reshaping it to a multidimension later, or vice versa. A key idea here is that you can change the shape of your arrays, but the number of elements should not be changed; for example, you can't reshape a `3x3` array to a `10x1` array. The total number of elements (or a so-called data buffer in the ndarray internal organization) should be consistent before and after reshaping. Or ,you might need to resize, but that's another story. Now, let's look at some shape manipulations:
```
x = np.arange(24)
x
x.shape = 2, 3, -1
x
x.strides
```
The basic reshaping technique changes the `numpy.shape` attribute. In the preceding example, we have an array whose shape is `(24, 1)`, and after altering the shape attribute, we obtain an array of the same size but the shape has been changed to `2x3x4`. Note that `-1` in a shape means the remaining shape size of the transferred array.
The following example is to reshape a `100x100x100` array back to just one dimension; here, we apply two functions, `numpy.flatten()` and `numpy.ravel()`, to collapse the array, and at the same time, we also compare the execution time. We notice that the speed difference between `numpy.flatten()` and `numpy.ravel()` is huge, but both of them are much faster than three layers of Python looping. The difference in performance between the two functions is that `np.flatten()` creates a copy from the original array, while `np.ravel()` just changes the view (if you don't remember the difference between copies and views, go back a bit to previous sections on how numpy manages memory section).
```
x = np.arange(1000000)
x.shape = 100, 100, 100
%timeit x.flatten()
%timeit x.ravel()
```
This example simply shows you that NumPy offers many functions and some of them can produce same results; pick up the function that satisfies your purpose and, at the same time, provides you with optimized performance.
## Broadcasting
NumPy operations are mostly done element-wise, which requires two arrays in an operation to have the same shape; however, this doesn't mean that NumPy operations can't take two differently shaped arrays (refer to the first example we looked at with scalars). NumPy provides the flexibility to broadcast a smaller-sized array across a larger one. But we can't broadcast the array to just about any shape. It needs to follow certain constrains; we will be covering them in this section. One key idea to keep in mind is that broadcasting involves performing meaningful operations over two differently shaped arrays. However, inappropriate broadcasting might lead to an inefficient use of memory that slows down computation.
### Broadcasting rules
The general rule for broadcasting is to determine whether two arrays are compatible with dimensioning. There are two conditions that need to be met:
- Two arrays should be of equal dimensions
- One of them is 1
If the preceding conditions are not met, a ValueError exception will be thrown to indicate that the arrays have incompatible shapes. Now, we are going through three examples to take a look at how broadcasting rules work:
```
x = np.array([[0, 0, 0], [10, 10, 10], [20, 20, 20]])
x
y = np.array([1, 2, 3])
y
x + y
```
Let's make the preceding code into a graph to help us understand broadcasting. The `x` variable has a shape of `(3, 3)`, while `y` only has a shape of `3`. But in NumPy broadcasting, the shape of `y` is translated to `3` by `1`; therefore, the second condition of the rule has been met. `y` has been broadcast to the same shape of `x` by repeating it. The `+` operation can apply element-wise.
<img src="../images/broadcasting-1.png" alt="broadcasting-1" width=500 align="left" />
Next, we are going to show you the result of broadcasting both arrays:
```
x = np.array([[0], [10], [20]])
x.shape, y.shape
x + y
```
The preceding example shows you how both `x` and `y` are broadcast. `x` is broadcast by the column, while `y` is broadcast by the row since both of them have dimension that are equal to 1 in terms of their shape. The second broadcasting condition has been met, and the new result array is a `3` by `3` array.
<img src="../images/broadcasting-2.jpg" alt="broadcasting-2" width=500 align="left" />
Let's take a look of our last example, which two arrays can't meet the requirement of broadcasting rules:
```
x = np.array([[0, 0, 0], [10, 10, 10], [20, 20, 20]])
y = np.arange(1, 5)
x + y
```
In the third example, broadcasting can't be performed due to `x` and `y` as they have different shapes in the row dimension and none of them are equal to `1`. Thus, none of the broadcasting conditions can be met. NumPy throws `ValueError`, telling you that the shape is incompatible.
<img src="../images/broadcasting-3.jpg" alt="broadcasting-3" width=500 align="left" />
Understanding broadcasting is an important part of mastering vectorized calculations, and vectorized calculations are the way to write clean, idiomatic NumPy code.
|
github_jupyter
|
import numpy as np
temperatures = np.array([
29.3, 42.1, 18.8, 16.1, 38.0, 12.5,
12.6, 49.9, 38.6, 31.3, 9.2, 22.2
]).reshape(2, 2, 3)
temperatures.shape
temperatures
np.swapaxes(temperatures, 1, 2)
import numpy as np
table = np.array([
[5, 3, 7, 1, 2],
[2, 6, 7 , 9 ,3],
[1, 1, 1, 1, 8],
[4, 3, 2, 0, 10],
])
table
table.flatten()
table.shape
table.max()
table.max(axis=0)
table.max(axis=1)
x = np.arange(24)
x
x.shape = 2, 3, -1
x
x.strides
x = np.arange(1000000)
x.shape = 100, 100, 100
%timeit x.flatten()
%timeit x.ravel()
x = np.array([[0, 0, 0], [10, 10, 10], [20, 20, 20]])
x
y = np.array([1, 2, 3])
y
x + y
x = np.array([[0], [10], [20]])
x.shape, y.shape
x + y
x = np.array([[0, 0, 0], [10, 10, 10], [20, 20, 20]])
y = np.arange(1, 5)
x + y
| 0.23066 | 0.993189 |
<a href="https://colab.research.google.com/github/fernandofsilva/desafio-alelo/blob/main/notebooks/RF_Classifier.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Random Forest Classifier
## Setup
```
# Data handling
import pandas as pd
# Modules for feature engineering and modelling
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import plot_confusion_matrix
from sklearn.ensemble import RandomForestClassifier
# Plot
import matplotlib.pyplot as plt
plt.style.use('ggplot')
```
## Load data and merge
```
# Characters powers
powers = pd.read_csv("drive/MyDrive/Datasets/super_hero_powers.csv")
# Characters information
info = pd.read_csv(
"drive/MyDrive/Datasets/heroes_information.csv",
index_col=0,
na_values=['-', -99.0]
)
# Merge databases
data = info.merge(right=powers, how='inner', left_on='name', right_on='hero_names')
data.drop('hero_names', axis=1, inplace=True)
# Removed characters without aligment or neutral value
data = data[data['Alignment'].isin(['bad', 'good'])]
```
## Feature Engineering
### Class for variable selection
```
class ColumnSelector(BaseEstimator, TransformerMixin):
def __init__(self, columns):
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.columns]
```
### Define columns according to each
```
# Categorical columns name
categorical = list(info.select_dtypes('object').columns)[1:-1]
# Numeric columns name
numerical = list(info.select_dtypes('number').columns)
# Boolean columns name
boolean = list(powers.select_dtypes('bool').columns)
```
### Construct feature engineering graph
```
# Define categorical pipeline
cat_pipe = Pipeline(
[('selector', ColumnSelector(categorical)),
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('encoder', OneHotEncoder(handle_unknown='ignore', sparse=False))])
# Define numerical pipeline
num_pipe = Pipeline(
[('selector', ColumnSelector(numerical)),
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
# Define boolean pipeline
bool_pipe = Pipeline(
[('selector', ColumnSelector(boolean)),
('pca', PCA(n_components=0.95))])
# Fit column transformer to training data
preprocessor = FeatureUnion(
[('categorical', cat_pipe),
('numerical', num_pipe),
('boolean', bool_pipe)])
```
### Transform the target
Unfornately, Sklearn pipeline doen't have support to add a transformation in the target variable like the ReshapeEndpoint in Tensorflow.
```
# Format the target variable
y = data['Alignment']
y.replace({'bad': 0, 'good': 1}, inplace=True)
```
## Model
```
# Combine categorical and numerical pipeline with the model
model = Pipeline([
('preprocessor', preprocessor),
('model', RandomForestClassifier(min_samples_leaf=2))
])
```
### Fit and predict
The function cross_val_predict already has stratify the target variable, in other words, the function balance the target variable between the Kfolds.
```
model.fit(data, y)
y_pred = cross_val_predict(model, data, y, cv=10)
```
## Metrics
```
# Plot confusition matrix with the results
title = 'Confusion Matrix RF Classifier'
disp = plot_confusion_matrix(
model,
data,
y,
display_labels=['good', 'bad'],
cmap=plt.cm.Blues,
normalize='true'
)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
```
|
github_jupyter
|
# Data handling
import pandas as pd
# Modules for feature engineering and modelling
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import plot_confusion_matrix
from sklearn.ensemble import RandomForestClassifier
# Plot
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# Characters powers
powers = pd.read_csv("drive/MyDrive/Datasets/super_hero_powers.csv")
# Characters information
info = pd.read_csv(
"drive/MyDrive/Datasets/heroes_information.csv",
index_col=0,
na_values=['-', -99.0]
)
# Merge databases
data = info.merge(right=powers, how='inner', left_on='name', right_on='hero_names')
data.drop('hero_names', axis=1, inplace=True)
# Removed characters without aligment or neutral value
data = data[data['Alignment'].isin(['bad', 'good'])]
class ColumnSelector(BaseEstimator, TransformerMixin):
def __init__(self, columns):
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.columns]
# Categorical columns name
categorical = list(info.select_dtypes('object').columns)[1:-1]
# Numeric columns name
numerical = list(info.select_dtypes('number').columns)
# Boolean columns name
boolean = list(powers.select_dtypes('bool').columns)
# Define categorical pipeline
cat_pipe = Pipeline(
[('selector', ColumnSelector(categorical)),
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('encoder', OneHotEncoder(handle_unknown='ignore', sparse=False))])
# Define numerical pipeline
num_pipe = Pipeline(
[('selector', ColumnSelector(numerical)),
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
# Define boolean pipeline
bool_pipe = Pipeline(
[('selector', ColumnSelector(boolean)),
('pca', PCA(n_components=0.95))])
# Fit column transformer to training data
preprocessor = FeatureUnion(
[('categorical', cat_pipe),
('numerical', num_pipe),
('boolean', bool_pipe)])
# Format the target variable
y = data['Alignment']
y.replace({'bad': 0, 'good': 1}, inplace=True)
# Combine categorical and numerical pipeline with the model
model = Pipeline([
('preprocessor', preprocessor),
('model', RandomForestClassifier(min_samples_leaf=2))
])
model.fit(data, y)
y_pred = cross_val_predict(model, data, y, cv=10)
# Plot confusition matrix with the results
title = 'Confusion Matrix RF Classifier'
disp = plot_confusion_matrix(
model,
data,
y,
display_labels=['good', 'bad'],
cmap=plt.cm.Blues,
normalize='true'
)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
| 0.828766 | 0.948058 |
Importing packages
```
import numpy as np
import pandas as pd
import featuretools as ft
import utils
import os
from utils import relative_error
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR
from sklearn import tree
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
```
Loading training data
```
data = utils.load_data('train_FD004.txt')
#data = utils.load_data('train_FD004_7v.txt')
#data.head()
```
Creating cutoff times
```
cutoff_times = utils.make_cutoff_times(data)
#cutoff_times.head()
```
Making entitysets
```
def make_entityset(data):
es = ft.EntitySet('Dataset')
es.entity_from_dataframe(dataframe=data,
entity_id='recordings',
index='index',
time_index='time')
es.normalize_entity(base_entity_id='recordings',
new_entity_id='engines',
index='engine_no')
es.normalize_entity(base_entity_id='recordings',
new_entity_id='cycles',
index='time_in_cycles')
return es
es = make_entityset(data)
#es
#es["recordings"].variables
#es["engines"].variables
#es["cycles"].variables
#es["recordings"].df.head(5)
#es["engines"].df.head(5)
#es["cycles"].df.head(5)
es.plot()
```
Creating features
```
fm, features = ft.dfs(entityset=es,
target_entity='engines',
agg_primitives=['last', 'max', 'min'],
# agg_primitives=['last', 'max', 'min', 'mean', 'std'],
trans_primitives=[],
cutoff_time=cutoff_times,
max_depth=3,
verbose=True)
fm.to_csv('simple_fm.csv')
```
Splitting training data
```
fm = pd.read_csv('simple_fm.csv', index_col='engine_no')
X_train = fm.copy().fillna(0)
y_train = X_train.pop('RUL')
X_training, X_validating, y_training, y_validating = train_test_split(X_train, y_train, random_state=17)
```
Prediction using median baseline 1 in training data
```
medianpredict1 = [np.median(y_training) for _ in y_validating]
print('Baseline by median label (training data): Mean Abs Error = {:.2f}'.format(
mean_absolute_error(medianpredict1, y_validating)))
print('Baseline by median label (training data): Root Mean Square Error = {:.2f}'.format(np.sqrt(mean_squared_error(y_validating, medianpredict1))))
print('Baseline by median label (training data): Relative Error = {:.2f}'.format(relative_error(y_validating.values, medianpredict1)))
```
Prediction using median baseline 2 in training data
```
#takes the rows that their engine id is selected for training
recordings_from_train = es['recordings'].df[es['recordings'].df['engine_no'].isin(y_training.index)]
#recordings_from_train.groupby(['engine_no']).apply(lambda df: df.shape[0]): replaces the data of each row by number of cycles of that engine
median_life = np.median(recordings_from_train.groupby(['engine_no']).apply(lambda df: df.shape[0]))
#takes the rows that their engine id is selected for testing
recordings_from_test = es['recordings'].df[es['recordings'].df['engine_no'].isin(y_validating.index)]
#number of cycles for the engine - RUL
life_in_test = recordings_from_test.groupby(['engine_no']).apply(lambda df: df.shape[0])-y_validating
medianpredict2 = (median_life - life_in_test).apply(lambda row: max(row, 0))
print('Baseline by median life (training data): Mean Abs Error = {:.2f}'.format(
mean_absolute_error(medianpredict2, y_validating)))
print('Baseline by median life (training data): Root Mean Square Error = {:.2f}'.format(np.sqrt(mean_squared_error(y_validating, medianpredict2))))
print('Baseline by median life (training data): Relative Error = {:.2f}'.format(relative_error(y_validating.values, medianpredict2)))
```
Prediction using RFR in training data
```
RFRreg = RandomForestRegressor(n_estimators=100)
RFRreg.fit(X_training, y_training)
RFRpreds = RFRreg.predict(X_validating)
print('RFR Mean Abs Error (training data): {:.2f}'.format(mean_absolute_error(RFRpreds, y_validating)))
print('RFR Root Mean Square Error (training data): {:.2f}'.format(np.sqrt(mean_squared_error(y_validating, RFRpreds))))
print('RFR Relative Error (training data): {:.2f}'.format(relative_error(y_validating.values, RFRpreds)))
#high_imp_feats = utils.feature_importances(X, RFRreg, feats=10)
```
Loading test data
```
data2 = utils.load_data('test_FD004.txt')
#data2 = utils.load_data('test_FD004_7v.txt')
es2 = make_entityset(data2)
fm2 = ft.calculate_feature_matrix(entityset=es2, features=features, verbose=True)
X_test = fm2.copy().fillna(0)
y_test = pd.read_csv('RUL_FD004.txt', sep=' ', header=-1, names=['RUL'], index_col=False)
#fm2.head()
```
Prediction using RFR in test data
```
RFRreg.fit(X_train, y_train)
RFRpreds2 = RFRreg.predict(X_test)
print('RFR Mean Abs Error (test data): {:.2f}'.format(mean_absolute_error(RFRpreds2, y_test)))
print('RFR Root Mean Square Error (test data): {:.2f}'.format(np.sqrt(mean_squared_error(y_test, RFRpreds2))))
print('RFR Relative Error (test data): {:.2f}'.format(relative_error(y_test.values, RFRpreds2)))
```
Prediction using median baseline 1 & 2 in test data
```
medianpredict1 = [np.median(y_training) for _ in RFRpreds2]
print('Baseline by median label (test data): Mean Abs Error = {:.2f}'.format(
mean_absolute_error(medianpredict1, y_test)))
print('Baseline by median label (test data): Root Mean Square Error = {:.2f}'.format(np.sqrt(mean_squared_error(y_test, medianpredict1))))
print('Baseline by median label (test data): Relative Error = {:.2f}'.format(relative_error(y_test.values, medianpredict1)))
medianpredict2 = (median_life - es2['recordings'].df.groupby(['engine_no']).apply(lambda df: df.shape[0])).apply(lambda row: max(row, 0))
print('Baseline by median life (test data): Mean Abs Error = {:.2f}'.format(
mean_absolute_error(medianpredict2, y_test)))
print('Baseline by median life (test data): Root Mean Square Error = {:.2f}'.format(np.sqrt(mean_squared_error(y_test, medianpredict2))))
print('Baseline by median life (test data): Relative Error = {:.2f}'.format(relative_error(y_test.values, medianpredict2.values)))
```
Prediction using SVR in test data
```
SVRreg=SVR(kernel='rbf',epsilon=3.0,degree=3)
SVRreg.fit(X_train,y_train)
SVRpreds=SVRreg.predict(X_test)
print('SVR Mean Abs Error (test data): {:.2f}'.format(mean_absolute_error(SVRpreds, y_test)))
print('SVR Root Mean Square Error (test data): {:.2f}'.format(np.sqrt(mean_squared_error(y_test, SVRpreds))))
print('SVR Relative Error (test data): {:.2f}'.format(relative_error(y_test.values, SVRpreds)))
```
Prediction using MLP in test data
```
MLPreg=MLPRegressor(hidden_layer_sizes=(2, ), activation='relu', solver='adam', alpha=0.0001, batch_size='auto', learning_rate='constant', learning_rate_init=0.001, max_iter=4000, tol=0.0001, momentum=0.9, epsilon=1e-08)
MLPreg.fit(X_train,y_train)
MLPpreds=MLPreg.predict(X_test)
print('MLP Mean Abs Error (test data): {:.2f}'.format(mean_absolute_error(MLPpreds, y_test)))
print('MLP Root Mean Square Error (test data): {:.2f}'.format(np.sqrt(mean_squared_error(y_test, MLPpreds))))
print('MLP Relative Error (test data): {:.2f}'.format(relative_error(y_test.values, MLPpreds)))
```
Prediction using CART in test data
```
CARTreg = tree.DecisionTreeRegressor()
CARTreg.fit(X_train,y_train)
CARTpreds = CARTreg.predict(X_test)
print('CART Mean Abs Error (test data): {:.2f}'.format(mean_absolute_error(CARTpreds, y_test)))
print('CART Root Mean Square Error (test data): {:.2f}'.format(np.sqrt(mean_squared_error(y_test, CARTpreds))))
print('CART Relative Error (test data): {:.2f}'.format(relative_error(y_test.values, CARTpreds)))
```
Saving output files
```
try:
os.mkdir("output")
except:
pass
fm.to_csv('output/simple_train_feature_matrix.csv')
cutoff_times.to_csv('output/simple_train_label_times.csv')
fm2.to_csv('output/simple_test_feature_matrix.csv')
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import featuretools as ft
import utils
import os
from utils import relative_error
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR
from sklearn import tree
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
data = utils.load_data('train_FD004.txt')
#data = utils.load_data('train_FD004_7v.txt')
#data.head()
cutoff_times = utils.make_cutoff_times(data)
#cutoff_times.head()
def make_entityset(data):
es = ft.EntitySet('Dataset')
es.entity_from_dataframe(dataframe=data,
entity_id='recordings',
index='index',
time_index='time')
es.normalize_entity(base_entity_id='recordings',
new_entity_id='engines',
index='engine_no')
es.normalize_entity(base_entity_id='recordings',
new_entity_id='cycles',
index='time_in_cycles')
return es
es = make_entityset(data)
#es
#es["recordings"].variables
#es["engines"].variables
#es["cycles"].variables
#es["recordings"].df.head(5)
#es["engines"].df.head(5)
#es["cycles"].df.head(5)
es.plot()
fm, features = ft.dfs(entityset=es,
target_entity='engines',
agg_primitives=['last', 'max', 'min'],
# agg_primitives=['last', 'max', 'min', 'mean', 'std'],
trans_primitives=[],
cutoff_time=cutoff_times,
max_depth=3,
verbose=True)
fm.to_csv('simple_fm.csv')
fm = pd.read_csv('simple_fm.csv', index_col='engine_no')
X_train = fm.copy().fillna(0)
y_train = X_train.pop('RUL')
X_training, X_validating, y_training, y_validating = train_test_split(X_train, y_train, random_state=17)
medianpredict1 = [np.median(y_training) for _ in y_validating]
print('Baseline by median label (training data): Mean Abs Error = {:.2f}'.format(
mean_absolute_error(medianpredict1, y_validating)))
print('Baseline by median label (training data): Root Mean Square Error = {:.2f}'.format(np.sqrt(mean_squared_error(y_validating, medianpredict1))))
print('Baseline by median label (training data): Relative Error = {:.2f}'.format(relative_error(y_validating.values, medianpredict1)))
#takes the rows that their engine id is selected for training
recordings_from_train = es['recordings'].df[es['recordings'].df['engine_no'].isin(y_training.index)]
#recordings_from_train.groupby(['engine_no']).apply(lambda df: df.shape[0]): replaces the data of each row by number of cycles of that engine
median_life = np.median(recordings_from_train.groupby(['engine_no']).apply(lambda df: df.shape[0]))
#takes the rows that their engine id is selected for testing
recordings_from_test = es['recordings'].df[es['recordings'].df['engine_no'].isin(y_validating.index)]
#number of cycles for the engine - RUL
life_in_test = recordings_from_test.groupby(['engine_no']).apply(lambda df: df.shape[0])-y_validating
medianpredict2 = (median_life - life_in_test).apply(lambda row: max(row, 0))
print('Baseline by median life (training data): Mean Abs Error = {:.2f}'.format(
mean_absolute_error(medianpredict2, y_validating)))
print('Baseline by median life (training data): Root Mean Square Error = {:.2f}'.format(np.sqrt(mean_squared_error(y_validating, medianpredict2))))
print('Baseline by median life (training data): Relative Error = {:.2f}'.format(relative_error(y_validating.values, medianpredict2)))
RFRreg = RandomForestRegressor(n_estimators=100)
RFRreg.fit(X_training, y_training)
RFRpreds = RFRreg.predict(X_validating)
print('RFR Mean Abs Error (training data): {:.2f}'.format(mean_absolute_error(RFRpreds, y_validating)))
print('RFR Root Mean Square Error (training data): {:.2f}'.format(np.sqrt(mean_squared_error(y_validating, RFRpreds))))
print('RFR Relative Error (training data): {:.2f}'.format(relative_error(y_validating.values, RFRpreds)))
#high_imp_feats = utils.feature_importances(X, RFRreg, feats=10)
data2 = utils.load_data('test_FD004.txt')
#data2 = utils.load_data('test_FD004_7v.txt')
es2 = make_entityset(data2)
fm2 = ft.calculate_feature_matrix(entityset=es2, features=features, verbose=True)
X_test = fm2.copy().fillna(0)
y_test = pd.read_csv('RUL_FD004.txt', sep=' ', header=-1, names=['RUL'], index_col=False)
#fm2.head()
RFRreg.fit(X_train, y_train)
RFRpreds2 = RFRreg.predict(X_test)
print('RFR Mean Abs Error (test data): {:.2f}'.format(mean_absolute_error(RFRpreds2, y_test)))
print('RFR Root Mean Square Error (test data): {:.2f}'.format(np.sqrt(mean_squared_error(y_test, RFRpreds2))))
print('RFR Relative Error (test data): {:.2f}'.format(relative_error(y_test.values, RFRpreds2)))
medianpredict1 = [np.median(y_training) for _ in RFRpreds2]
print('Baseline by median label (test data): Mean Abs Error = {:.2f}'.format(
mean_absolute_error(medianpredict1, y_test)))
print('Baseline by median label (test data): Root Mean Square Error = {:.2f}'.format(np.sqrt(mean_squared_error(y_test, medianpredict1))))
print('Baseline by median label (test data): Relative Error = {:.2f}'.format(relative_error(y_test.values, medianpredict1)))
medianpredict2 = (median_life - es2['recordings'].df.groupby(['engine_no']).apply(lambda df: df.shape[0])).apply(lambda row: max(row, 0))
print('Baseline by median life (test data): Mean Abs Error = {:.2f}'.format(
mean_absolute_error(medianpredict2, y_test)))
print('Baseline by median life (test data): Root Mean Square Error = {:.2f}'.format(np.sqrt(mean_squared_error(y_test, medianpredict2))))
print('Baseline by median life (test data): Relative Error = {:.2f}'.format(relative_error(y_test.values, medianpredict2.values)))
SVRreg=SVR(kernel='rbf',epsilon=3.0,degree=3)
SVRreg.fit(X_train,y_train)
SVRpreds=SVRreg.predict(X_test)
print('SVR Mean Abs Error (test data): {:.2f}'.format(mean_absolute_error(SVRpreds, y_test)))
print('SVR Root Mean Square Error (test data): {:.2f}'.format(np.sqrt(mean_squared_error(y_test, SVRpreds))))
print('SVR Relative Error (test data): {:.2f}'.format(relative_error(y_test.values, SVRpreds)))
MLPreg=MLPRegressor(hidden_layer_sizes=(2, ), activation='relu', solver='adam', alpha=0.0001, batch_size='auto', learning_rate='constant', learning_rate_init=0.001, max_iter=4000, tol=0.0001, momentum=0.9, epsilon=1e-08)
MLPreg.fit(X_train,y_train)
MLPpreds=MLPreg.predict(X_test)
print('MLP Mean Abs Error (test data): {:.2f}'.format(mean_absolute_error(MLPpreds, y_test)))
print('MLP Root Mean Square Error (test data): {:.2f}'.format(np.sqrt(mean_squared_error(y_test, MLPpreds))))
print('MLP Relative Error (test data): {:.2f}'.format(relative_error(y_test.values, MLPpreds)))
CARTreg = tree.DecisionTreeRegressor()
CARTreg.fit(X_train,y_train)
CARTpreds = CARTreg.predict(X_test)
print('CART Mean Abs Error (test data): {:.2f}'.format(mean_absolute_error(CARTpreds, y_test)))
print('CART Root Mean Square Error (test data): {:.2f}'.format(np.sqrt(mean_squared_error(y_test, CARTpreds))))
print('CART Relative Error (test data): {:.2f}'.format(relative_error(y_test.values, CARTpreds)))
try:
os.mkdir("output")
except:
pass
fm.to_csv('output/simple_train_feature_matrix.csv')
cutoff_times.to_csv('output/simple_train_label_times.csv')
fm2.to_csv('output/simple_test_feature_matrix.csv')
| 0.669096 | 0.791378 |
```
import numpy as np
```
## `np.zeros_like` vs `np.zeros`
```
print("Small size")
a = np.ones(shape=(4, 500))
%timeit -n10000 np.zeros_like(a)
%timeit -n10000 np.zeros(shape=a.shape)
print("Large size")
a = np.ones(shape=(8, 50000))
%timeit -n100 np.zeros_like(a)
%timeit -n100 np.zeros(shape=a.shape)
```
## `np.ones_like` vs `np.ones`
```
print("Small size")
a = np.ones(shape=(4, 500))
%timeit -n10000 np.ones_like(a)
%timeit -n10000 np.ones(shape=a.shape)
print("Large size")
a = np.ones(shape=(8, 50000))
%timeit -n100 np.ones_like(a)
%timeit -n100 np.ones(shape=a.shape)
```
## `np.full_like` vs `np.full`
```
print("Small size")
a = np.ones(shape=(4, 500))
%timeit -n10000 np.full_like(a, 1.1)
%timeit -n10000 np.full(a.shape, 1.1)
print("Large size")
a = np.ones(shape=(8, 50000))
%timeit -n100 np.full_like(a, 1.1)
%timeit -n100 np.full(a.shape, 1.1)
```
## `np.empty_like` vs `np.empty`
```
print("Small size")
a = np.empty(shape=(4, 500))
%timeit -n10000 np.empty_like(a)
%timeit -n10000 np.empty(a.shape)
print("Large size")
a = np.ones(shape=(8, 50000))
%timeit -n100 np.empty_like(a)
%timeit -n100 np.empty(a.shape)
```
## `np.r_` vs `np.concatenate` vs `vstack` vs `np.array`
```
print("Small size")
a = np.ones(500)
%timeit -n10000 np.r_[[a], [a], [a], [a]]
%timeit -n10000 np.concatenate(([a], [a], [a], [a]))
%timeit -n10000 np.vstack((a, a, a, a))
%timeit -n10000 np.array([a,a,a,a]) # another approach for this case
%timeit -n10000 np.array([a, a, a, a]).reshape((-1, a.shape[-1]))
assert np.allclose(np.r_[[a], [a], [a], [a]], np.concatenate(([a], [a], [a], [a])))
assert np.allclose(np.r_[[a], [a], [a], [a]], np.vstack((a, a, a, a)))
assert np.allclose(np.r_[[a], [a], [a], [a]], np.array([a,a,a,a]))
assert np.allclose(np.r_[[a], [a], [a], [a]], np.array([a, a, a, a]).reshape((-1, a.shape[-1])))
print("\nLarge size")
a = np.random.rand(8, 50000)
%timeit -n100 np.r_[a, a, a, a]
%timeit -n100 np.concatenate((a, a, a, a))
%timeit -n100 np.vstack((a, a, a, a))
%timeit -n100 np.array([a, a, a, a]).reshape((-1, a.shape[1]))
assert np.allclose(np.r_[a, a, a, a], np.concatenate((a, a, a, a)))
assert np.allclose(np.r_[a, a, a, a], np.vstack((a, a, a, a)))
assert np.allclose(np.r_[a, a, a, a], np.array([a, a, a, a]).reshape((-1, a.shape[1])))
```
## `np.c_` vs `np.hstack`
```
print("Small size")
U = np.ones(shape=(2, 400))
%timeit -n10000 np.c_[U[:, 0], U]
%timeit -n10000 np.hstack((U[:, 0, np.newaxis], U))
%timeit -n10000 Unew = np.empty(shape=(U.shape[0], U.shape[1] + 1)); Unew[:, 0] = U[:, 0]; Unew[:, 1:] = U;
assert np.allclose(np.c_[U[:, 0], U], np.hstack((U[:, 0, np.newaxis], U)))
Unew = np.empty(shape=(U.shape[0], U.shape[1] + 1)); Unew[:, 0] = U[:, 0]; Unew[:, 1:] = U;
assert np.allclose(np.c_[U[:, 0], U], Unew)
print("\nLarge size")
U = np.ones(shape=(3, 400000))
%timeit -n100 np.c_[U[:, 0], U]
%timeit -n100 np.hstack((U[:, 0, np.newaxis], U))
%timeit -n100 Unew = np.empty(shape=(U.shape[0], U.shape[1] + 1)); Unew[:, 0] = U[:, 0]; Unew[:, 1:] = U;
assert np.allclose(np.c_[U[:, 0], U], np.hstack((U[:, 0, np.newaxis], U)))
Unew = np.empty(shape=(U.shape[0], U.shape[1] + 1)); Unew[:, 0] = U[:, 0]; Unew[:, 1:] = U;
assert np.allclose(np.c_[U[:, 0], U], Unew)
```
## `np.diff` vs `U[..., 1:] - U[..., :-1]`
```
print("Small size")
U = np.ones(shape=(4, 200))
%timeit -n10000 np.diff(U, axis=-1)
%timeit -n10000 U[..., 1:] - U[..., :-1]
assert np.allclose(np.diff(U, axis=-1), U[..., 1:] - U[..., :-1])
print("Large size")
U = np.ones(shape=(20, 10000))
%timeit -n100 np.diff(U, axis=-1)
%timeit -n100 U[..., 1:] - U[..., :-1]
assert np.allclose(np.diff(U, axis=-1), U[..., 1:] - U[..., :-1])
```
## `a.copy()` vs `np.array(a)` vs `np.r_[a]`
```
a = np.arange(5000)
%timeit -n1000 a.copy()
%timeit -n1000 np.array(a)
%timeit -n1000 np.r_[a]
%timeit -n1000 anew = np.empty_like(a); anew[:] = a[:];
```
|
github_jupyter
|
import numpy as np
print("Small size")
a = np.ones(shape=(4, 500))
%timeit -n10000 np.zeros_like(a)
%timeit -n10000 np.zeros(shape=a.shape)
print("Large size")
a = np.ones(shape=(8, 50000))
%timeit -n100 np.zeros_like(a)
%timeit -n100 np.zeros(shape=a.shape)
print("Small size")
a = np.ones(shape=(4, 500))
%timeit -n10000 np.ones_like(a)
%timeit -n10000 np.ones(shape=a.shape)
print("Large size")
a = np.ones(shape=(8, 50000))
%timeit -n100 np.ones_like(a)
%timeit -n100 np.ones(shape=a.shape)
print("Small size")
a = np.ones(shape=(4, 500))
%timeit -n10000 np.full_like(a, 1.1)
%timeit -n10000 np.full(a.shape, 1.1)
print("Large size")
a = np.ones(shape=(8, 50000))
%timeit -n100 np.full_like(a, 1.1)
%timeit -n100 np.full(a.shape, 1.1)
print("Small size")
a = np.empty(shape=(4, 500))
%timeit -n10000 np.empty_like(a)
%timeit -n10000 np.empty(a.shape)
print("Large size")
a = np.ones(shape=(8, 50000))
%timeit -n100 np.empty_like(a)
%timeit -n100 np.empty(a.shape)
print("Small size")
a = np.ones(500)
%timeit -n10000 np.r_[[a], [a], [a], [a]]
%timeit -n10000 np.concatenate(([a], [a], [a], [a]))
%timeit -n10000 np.vstack((a, a, a, a))
%timeit -n10000 np.array([a,a,a,a]) # another approach for this case
%timeit -n10000 np.array([a, a, a, a]).reshape((-1, a.shape[-1]))
assert np.allclose(np.r_[[a], [a], [a], [a]], np.concatenate(([a], [a], [a], [a])))
assert np.allclose(np.r_[[a], [a], [a], [a]], np.vstack((a, a, a, a)))
assert np.allclose(np.r_[[a], [a], [a], [a]], np.array([a,a,a,a]))
assert np.allclose(np.r_[[a], [a], [a], [a]], np.array([a, a, a, a]).reshape((-1, a.shape[-1])))
print("\nLarge size")
a = np.random.rand(8, 50000)
%timeit -n100 np.r_[a, a, a, a]
%timeit -n100 np.concatenate((a, a, a, a))
%timeit -n100 np.vstack((a, a, a, a))
%timeit -n100 np.array([a, a, a, a]).reshape((-1, a.shape[1]))
assert np.allclose(np.r_[a, a, a, a], np.concatenate((a, a, a, a)))
assert np.allclose(np.r_[a, a, a, a], np.vstack((a, a, a, a)))
assert np.allclose(np.r_[a, a, a, a], np.array([a, a, a, a]).reshape((-1, a.shape[1])))
print("Small size")
U = np.ones(shape=(2, 400))
%timeit -n10000 np.c_[U[:, 0], U]
%timeit -n10000 np.hstack((U[:, 0, np.newaxis], U))
%timeit -n10000 Unew = np.empty(shape=(U.shape[0], U.shape[1] + 1)); Unew[:, 0] = U[:, 0]; Unew[:, 1:] = U;
assert np.allclose(np.c_[U[:, 0], U], np.hstack((U[:, 0, np.newaxis], U)))
Unew = np.empty(shape=(U.shape[0], U.shape[1] + 1)); Unew[:, 0] = U[:, 0]; Unew[:, 1:] = U;
assert np.allclose(np.c_[U[:, 0], U], Unew)
print("\nLarge size")
U = np.ones(shape=(3, 400000))
%timeit -n100 np.c_[U[:, 0], U]
%timeit -n100 np.hstack((U[:, 0, np.newaxis], U))
%timeit -n100 Unew = np.empty(shape=(U.shape[0], U.shape[1] + 1)); Unew[:, 0] = U[:, 0]; Unew[:, 1:] = U;
assert np.allclose(np.c_[U[:, 0], U], np.hstack((U[:, 0, np.newaxis], U)))
Unew = np.empty(shape=(U.shape[0], U.shape[1] + 1)); Unew[:, 0] = U[:, 0]; Unew[:, 1:] = U;
assert np.allclose(np.c_[U[:, 0], U], Unew)
print("Small size")
U = np.ones(shape=(4, 200))
%timeit -n10000 np.diff(U, axis=-1)
%timeit -n10000 U[..., 1:] - U[..., :-1]
assert np.allclose(np.diff(U, axis=-1), U[..., 1:] - U[..., :-1])
print("Large size")
U = np.ones(shape=(20, 10000))
%timeit -n100 np.diff(U, axis=-1)
%timeit -n100 U[..., 1:] - U[..., :-1]
assert np.allclose(np.diff(U, axis=-1), U[..., 1:] - U[..., :-1])
a = np.arange(5000)
%timeit -n1000 a.copy()
%timeit -n1000 np.array(a)
%timeit -n1000 np.r_[a]
%timeit -n1000 anew = np.empty_like(a); anew[:] = a[:];
| 0.386416 | 0.902524 |
<a href="https://colab.research.google.com/github/fabiormazza/IA025_2022S1/blob/main/projeto_final/fabio_mazza/Hipotese_Ws_NewBaseCase.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
# iremos utilizar a biblioteca dos transformers para ter acesso ao tokenizador do BERT.
!pip install transformers
import collections
import itertools
import functools
import math
import random
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import DataLoader
import tqdm
from collections import OrderedDict
# Check which GPU we are using
!nvidia-smi
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
print('Using {}'.format(device))
```
## Implementação do MyDataset
```
from typing import List
def tokenize(text: str, tokenizer):
# Recomenda-se usar o tokenizer.batch_encode_plus pois é mais rápido.
return tokenizer(text, return_tensors=None, add_special_tokens=False).input_ids
class MyDataset():
def __init__(self, texts: List[str], tokenizer, max_seq_length: int):
self.max_seq_length = max_seq_length
cls = 101
sep = 100
tokens_all = torch.tensor([])
inputs = torch.tensor([])
targets = torch.tensor([])
for text in tqdm.notebook.tqdm(texts):
tokens = tokenize(text, tokenizer)
tokens = [cls] + tokens + [sep]
tokens = torch.FloatTensor(tokens)
tokens_all = torch.cat((tokens_all, tokens))
#print(tokens_all)
pad_size = max_seq_length - tokens_all.size(dim=0) % max_seq_length
tokens_all = torch.cat((tokens_all, torch.zeros(pad_size)), dim=0) #pad_token_id = 0 (review if otherwise) / tokens_uns is the tokens tensor before reshaping
inputs = tokens_all.reshape((-1, max_seq_length))
t_tokens = torch.roll(tokens_all, -1, 0)
t_tokens[-1] = 0
targets = t_tokens.reshape((-1, max_seq_length))
self.inputs = torch.LongTensor(inputs.numpy())
self.targets = torch.LongTensor(targets.numpy())
def __len__(self):
return len(self.inputs)
def __getitem__(self, idx):
return self.inputs[idx], self.targets[idx]
## Teste com frases longas
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("neuralmind/bert-base-portuguese-cased")
dummy_texts = ['Os primeiros socorros foram feitos no local e a vítima foi levada \
ao Hospital de Base de Brasília com ferimentos leves, consciente e com estado de saúde estável.',
'Um veículo capotou na Via Estrutural na tarde desta sexta-feira (3), por volta das 15h. O Corpo\
de Bombeiros Militar do Distrito Federal (CBMDF) foi chamado ao local para realizar os primeiros atendimentos à vítima.']
dummy_dataset = MyDataset(texts=dummy_texts, tokenizer=tokenizer, max_seq_length=9)
dummy_loader = DataLoader(dummy_dataset, batch_size=6, shuffle=False)
first_batch_input, first_batch_target = next(iter(dummy_loader))
print(first_batch_input)
print(first_batch_target)
```
# Carregamento do dataset
Iremos usar uma pequena amostra do dataset [BrWaC](https://www.inf.ufrgs.br/pln/wiki/index.php?title=BrWaC) para treinar e avaliar nosso modelo de linguagem.
```
!wget -nc https://storage.googleapis.com/unicamp-dl/ia025a_2022s1/aula9/sample-1gb.txt
# Load datasets
max_seq_length = 18
train_examples = 30000
valid_examples = 3000
test_examples = 3000
texts = open('sample-1gb.txt').readlines()
print(f'Read {len(texts)} lines.')
max_lines = train_examples + valid_examples + test_examples
print(f'Truncating to {max_lines} lines.')
texts = texts[:max_lines]
training_texts = texts[:-(valid_examples + test_examples)]
valid_texts = texts[-(valid_examples + test_examples):-test_examples]
test_texts = texts[-test_examples:]
training_dataset = MyDataset(texts=training_texts, tokenizer=tokenizer, max_seq_length=max_seq_length)
valid_dataset = MyDataset(texts=valid_texts, tokenizer=tokenizer, max_seq_length=max_seq_length)
test_dataset = MyDataset(texts=test_texts, tokenizer=tokenizer, max_seq_length=max_seq_length)
print(f'training examples: {len(training_dataset)}')
print(f'valid examples: {len(valid_dataset)}')
print(f'test examples: {len(test_dataset)}')
class LanguageModel(torch.nn.Module):
def __init__(self, vocab_size: int, max_seq_length: int, dim: int, n_layers: int, pad_token_id: int):
"""
Implements the Self-attention, decoder-only."
Args:
vocab_size (int): Size of the input vocabulary.
max_seq_length (int): Size of the sequence to consider as context for prediction.
dim (int): Dimension of the embedding layer for each word in the context.
n_layers (int): number of self-attention layers.
pad_token_id (int): id of the pad token that will be ignored in the attention.
"""
super(LanguageModel, self).__init__()
self.V = vocab_size
self.L = max_seq_length
self.D = dim
self.n_layers = n_layers
self.pad_token_id = pad_token_id
n_linear_1 = 256
n_linear_2 = 256
self.embedding = torch.nn.Embedding(vocab_size, dim, device=device)
self.positional = torch.nn.Parameter(torch.randn(max_seq_length, dim, device=device)/1000000000)
self.wq = nn.Linear(dim, dim, bias = False, device=device)
self.wk = nn.Linear(dim, dim, bias = False, device=device)
self.wv = nn.Linear(dim, dim, bias = False, device=device)
self.wo = nn.Linear(dim, dim, bias = False, device=device)
self.linear = nn.Sequential(OrderedDict([
('l1', torch.nn.Linear(dim, n_linear_2, device=device)),
('relu', torch.nn.ReLU()),
('dropout', torch.nn.Dropout(p=0.20)),
('l2', torch.nn.Linear(n_linear_2, vocab_size, device=device, bias = False))
]))
def forward(self, inputs):
"""
Args:
inputs is a LongTensor of shape (batch_size, max_seq_length)
Returns:
logits of shape (batch_size, max_seq_length, vocab_size)
"""
X = self.embedding(inputs)
X = X + self.positional
Q = self.wq(X)
K = self.wk(X)
V = self.wv(X)
scores = torch.matmul(Q, K.permute(0, 2, 1))
#causal mask
mask = torch.ones((self.L, self.L), dtype=torch.bool)
mask = torch.triu(mask, diagonal=1)
scores[:, mask] = -1000000000
probs = torch.nn.functional.softmax(scores, dim=-1)
E = torch.matmul(probs, V)
E = self.wo(E)
out = self.linear(E)
return out
```
## Teste o modelo com um exemplo
```
model = LanguageModel(
vocab_size=tokenizer.vocab_size,
max_seq_length=max_seq_length,
dim=64,
n_layers=2,
pad_token_id=tokenizer.pad_token_id,
).to(device)
sample_input, _ = next(iter(DataLoader(training_dataset)))
sample_input = sample_input.to(device)
sample_output = model(sample_input)
print(f'sample_input.shape: {sample_input.shape}')
print(f'sample_output.shape: {sample_output.shape}')
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'Number of model parameters: {num_params}')
```
## Assert da Perplexidade
```
random.seed(123)
np.random.seed(123)
torch.manual_seed(123)
def perplexity(logits, target, ignore_token_id: int):
"""
Computes the perplexity.
Args:
logits: a FloatTensor of shape (batch_size, seq_length, vocab_size)
target: a LongTensor of shape (batch_size, seq_length)
Returns:
A float corresponding to the perplexity
"""
logits = logits.reshape(-1, logits.shape[-1])
target = target.reshape(-1)
loss = nn.functional.cross_entropy(logits, target, reduction='mean', ignore_index=ignore_token_id)
return torch.exp(loss)
n_examples = 1000
train_input_ids, train_target_ids = next(iter(DataLoader(training_dataset, batch_size=n_examples)))
train_input_ids = train_input_ids.to(device)
train_target_ids = train_target_ids.to(device)
logits = model(train_input_ids)
my_perplexity = perplexity(logits=logits, target=train_target_ids, ignore_token_id=tokenizer.pad_token_id)
print(f'my perplexity: {int(my_perplexity)}')
print(f'correct initial perplexity: {tokenizer.vocab_size}')
assert math.isclose(my_perplexity, tokenizer.vocab_size, abs_tol=7000)
print('Passou o no assert da perplexidade')
```
## Laço de Treinamento e Validação
```
max_examples = 20_000_000 #150_000_000
eval_every_steps = 10000
lr = 3e-4
model = LanguageModel(
vocab_size=tokenizer.vocab_size,
max_seq_length=max_seq_length,
dim=128,
n_layers=2,
pad_token_id=tokenizer.pad_token_id,
).to(device)
train_loader = DataLoader(training_dataset, batch_size=512, shuffle=True, drop_last=True)
validation_loader = DataLoader(valid_dataset, batch_size=512)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
def train_step(input_ids, target_ids):
model.train()
model.zero_grad()
logits = model(input_ids)
logits = logits.reshape(-1, logits.shape[-1])
target_ids = target_ids.reshape(-1)
loss = nn.functional.cross_entropy(logits, target_ids, ignore_index=model.pad_token_id)
loss.backward()
optimizer.step()
return loss.item()
def validation_step(input_ids, target_ids):
model.eval()
logits = model(input_ids)
logits = logits.reshape(-1, logits.shape[-1])
target_ids = target_ids.reshape(-1)
loss = nn.functional.cross_entropy(logits, target_ids, ignore_index=model.pad_token_id)
return loss.item()
train_losses = []
n_examples = 0
step = 0
while n_examples < max_examples:
for train_input_ids, train_target_ids in train_loader:
loss = train_step(train_input_ids.to(device), train_target_ids.to(device))
train_losses.append(loss)
if step % eval_every_steps == 0:
train_ppl = np.exp(np.average(train_losses))
with torch.no_grad():
valid_ppl = np.exp(np.average([
validation_step(val_input_ids.to(device), val_target_ids.to(device))
for val_input_ids, val_target_ids in validation_loader]))
print(f'{step} steps; {n_examples} examples so far; train ppl: {train_ppl:.2f}, valid ppl: {valid_ppl:.2f}')
train_losses = []
n_examples += len(train_input_ids) # Increment of batch size
step += 1
if n_examples >= max_examples:
break
```
## Avaliação final no dataset de teste
Bonus: o modelo com menor perplexidade no dataset de testes ganhará 0.5 ponto na nota final.
```
test_loader = DataLoader(test_dataset, batch_size=512)
with torch.no_grad():
test_ppl = np.exp(np.average([
validation_step(test_input_ids.to(device), test_target_ids.to(device))
for test_input_ids, test_target_ids in test_loader
]))
print(f'test perplexity: {test_ppl}')
```
## Teste seu modelo com uma sentença
Escolha uma sentença gerada pelo modelo que ache interessante.
```
prompt = 'O especialista reforça que, apesar do aumento do número de frentes frias, elas passarão de maneira'
max_output_tokens = 20
model.eval()
for _ in range(max_output_tokens):
input_ids = tokenize(text=prompt, tokenizer=tokenizer)
input_ids_truncated = input_ids[-max_seq_length:] # Usamos apenas os últimos <max_seq_length> tokens como entrada para o modelo.
logits = model(torch.LongTensor([input_ids_truncated]).to(device))
logits = logits[:, -1, :] # Usamos apenas o ultimo token da sequencia
# Ao usarmos o argmax, a saída do modelo em cada passo é o token de maior probabilidade.
# Isso se chama decodificação gulosa (greedy decoding).
predicted_id = torch.argmax(logits).item()
input_ids += [predicted_id] # Concatenamos a entrada com o token escolhido nesse passo.
prompt = tokenizer.decode(input_ids)
print(prompt)
prompt = 'As imagens mostram o felino surpreendendo a ação carinhosa do tratador com uma violenta mordida no braço.'
max_output_tokens = 20
model.eval()
for _ in range(max_output_tokens):
input_ids = tokenize(text=prompt, tokenizer=tokenizer)
input_ids_truncated = input_ids[-max_seq_length:] # Usamos apenas os últimos <max_seq_length> tokens como entrada para o modelo.
logits = model(torch.LongTensor([input_ids_truncated]).to(device))
logits = logits[:, -1, :] # Usamos apenas o ultimo token da sequencia
# Ao usarmos o argmax, a saída do modelo em cada passo é o token de maior probabilidade.
# Isso se chama decodificação gulosa (greedy decoding).
predicted_id = torch.argmax(logits).item()
input_ids += [predicted_id] # Concatenamos a entrada com o token escolhido nesse passo.
prompt = tokenizer.decode(input_ids)
print(prompt)
prompt = 'Um vídeo do momento, feito pelo morador de um prédio próximo, mostra o cachorro pulando na'
max_output_tokens = 20
model.eval()
for _ in range(max_output_tokens):
input_ids = tokenize(text=prompt, tokenizer=tokenizer)
input_ids_truncated = input_ids[-max_seq_length:] # Usamos apenas os últimos <max_seq_length> tokens como entrada para o modelo.
logits = model(torch.LongTensor([input_ids_truncated]).to(device))
logits = logits[:, -1, :] # Usamos apenas o ultimo token da sequencia
# Ao usarmos o argmax, a saída do modelo em cada passo é o token de maior probabilidade.
# Isso se chama decodificação gulosa (greedy decoding).
predicted_id = torch.argmax(logits).item()
input_ids += [predicted_id] # Concatenamos a entrada com o token escolhido nesse passo.
prompt = tokenizer.decode(input_ids)
print(prompt)
```
## Bonus 1
Quem conseguir a menor perplexidade no dataset de testes ganha 0.5 ponto na média final.
## Bonus 2
Qual é a complexidade (em notação O-grande) da função de geração de texto acima?
Quem responder corretamente a pergunta acima e deixar a função com menor complexidade ganha 0.5 ponto na média final.
|
github_jupyter
|
# iremos utilizar a biblioteca dos transformers para ter acesso ao tokenizador do BERT.
!pip install transformers
import collections
import itertools
import functools
import math
import random
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import DataLoader
import tqdm
from collections import OrderedDict
# Check which GPU we are using
!nvidia-smi
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
print('Using {}'.format(device))
from typing import List
def tokenize(text: str, tokenizer):
# Recomenda-se usar o tokenizer.batch_encode_plus pois é mais rápido.
return tokenizer(text, return_tensors=None, add_special_tokens=False).input_ids
class MyDataset():
def __init__(self, texts: List[str], tokenizer, max_seq_length: int):
self.max_seq_length = max_seq_length
cls = 101
sep = 100
tokens_all = torch.tensor([])
inputs = torch.tensor([])
targets = torch.tensor([])
for text in tqdm.notebook.tqdm(texts):
tokens = tokenize(text, tokenizer)
tokens = [cls] + tokens + [sep]
tokens = torch.FloatTensor(tokens)
tokens_all = torch.cat((tokens_all, tokens))
#print(tokens_all)
pad_size = max_seq_length - tokens_all.size(dim=0) % max_seq_length
tokens_all = torch.cat((tokens_all, torch.zeros(pad_size)), dim=0) #pad_token_id = 0 (review if otherwise) / tokens_uns is the tokens tensor before reshaping
inputs = tokens_all.reshape((-1, max_seq_length))
t_tokens = torch.roll(tokens_all, -1, 0)
t_tokens[-1] = 0
targets = t_tokens.reshape((-1, max_seq_length))
self.inputs = torch.LongTensor(inputs.numpy())
self.targets = torch.LongTensor(targets.numpy())
def __len__(self):
return len(self.inputs)
def __getitem__(self, idx):
return self.inputs[idx], self.targets[idx]
## Teste com frases longas
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("neuralmind/bert-base-portuguese-cased")
dummy_texts = ['Os primeiros socorros foram feitos no local e a vítima foi levada \
ao Hospital de Base de Brasília com ferimentos leves, consciente e com estado de saúde estável.',
'Um veículo capotou na Via Estrutural na tarde desta sexta-feira (3), por volta das 15h. O Corpo\
de Bombeiros Militar do Distrito Federal (CBMDF) foi chamado ao local para realizar os primeiros atendimentos à vítima.']
dummy_dataset = MyDataset(texts=dummy_texts, tokenizer=tokenizer, max_seq_length=9)
dummy_loader = DataLoader(dummy_dataset, batch_size=6, shuffle=False)
first_batch_input, first_batch_target = next(iter(dummy_loader))
print(first_batch_input)
print(first_batch_target)
!wget -nc https://storage.googleapis.com/unicamp-dl/ia025a_2022s1/aula9/sample-1gb.txt
# Load datasets
max_seq_length = 18
train_examples = 30000
valid_examples = 3000
test_examples = 3000
texts = open('sample-1gb.txt').readlines()
print(f'Read {len(texts)} lines.')
max_lines = train_examples + valid_examples + test_examples
print(f'Truncating to {max_lines} lines.')
texts = texts[:max_lines]
training_texts = texts[:-(valid_examples + test_examples)]
valid_texts = texts[-(valid_examples + test_examples):-test_examples]
test_texts = texts[-test_examples:]
training_dataset = MyDataset(texts=training_texts, tokenizer=tokenizer, max_seq_length=max_seq_length)
valid_dataset = MyDataset(texts=valid_texts, tokenizer=tokenizer, max_seq_length=max_seq_length)
test_dataset = MyDataset(texts=test_texts, tokenizer=tokenizer, max_seq_length=max_seq_length)
print(f'training examples: {len(training_dataset)}')
print(f'valid examples: {len(valid_dataset)}')
print(f'test examples: {len(test_dataset)}')
class LanguageModel(torch.nn.Module):
def __init__(self, vocab_size: int, max_seq_length: int, dim: int, n_layers: int, pad_token_id: int):
"""
Implements the Self-attention, decoder-only."
Args:
vocab_size (int): Size of the input vocabulary.
max_seq_length (int): Size of the sequence to consider as context for prediction.
dim (int): Dimension of the embedding layer for each word in the context.
n_layers (int): number of self-attention layers.
pad_token_id (int): id of the pad token that will be ignored in the attention.
"""
super(LanguageModel, self).__init__()
self.V = vocab_size
self.L = max_seq_length
self.D = dim
self.n_layers = n_layers
self.pad_token_id = pad_token_id
n_linear_1 = 256
n_linear_2 = 256
self.embedding = torch.nn.Embedding(vocab_size, dim, device=device)
self.positional = torch.nn.Parameter(torch.randn(max_seq_length, dim, device=device)/1000000000)
self.wq = nn.Linear(dim, dim, bias = False, device=device)
self.wk = nn.Linear(dim, dim, bias = False, device=device)
self.wv = nn.Linear(dim, dim, bias = False, device=device)
self.wo = nn.Linear(dim, dim, bias = False, device=device)
self.linear = nn.Sequential(OrderedDict([
('l1', torch.nn.Linear(dim, n_linear_2, device=device)),
('relu', torch.nn.ReLU()),
('dropout', torch.nn.Dropout(p=0.20)),
('l2', torch.nn.Linear(n_linear_2, vocab_size, device=device, bias = False))
]))
def forward(self, inputs):
"""
Args:
inputs is a LongTensor of shape (batch_size, max_seq_length)
Returns:
logits of shape (batch_size, max_seq_length, vocab_size)
"""
X = self.embedding(inputs)
X = X + self.positional
Q = self.wq(X)
K = self.wk(X)
V = self.wv(X)
scores = torch.matmul(Q, K.permute(0, 2, 1))
#causal mask
mask = torch.ones((self.L, self.L), dtype=torch.bool)
mask = torch.triu(mask, diagonal=1)
scores[:, mask] = -1000000000
probs = torch.nn.functional.softmax(scores, dim=-1)
E = torch.matmul(probs, V)
E = self.wo(E)
out = self.linear(E)
return out
model = LanguageModel(
vocab_size=tokenizer.vocab_size,
max_seq_length=max_seq_length,
dim=64,
n_layers=2,
pad_token_id=tokenizer.pad_token_id,
).to(device)
sample_input, _ = next(iter(DataLoader(training_dataset)))
sample_input = sample_input.to(device)
sample_output = model(sample_input)
print(f'sample_input.shape: {sample_input.shape}')
print(f'sample_output.shape: {sample_output.shape}')
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'Number of model parameters: {num_params}')
random.seed(123)
np.random.seed(123)
torch.manual_seed(123)
def perplexity(logits, target, ignore_token_id: int):
"""
Computes the perplexity.
Args:
logits: a FloatTensor of shape (batch_size, seq_length, vocab_size)
target: a LongTensor of shape (batch_size, seq_length)
Returns:
A float corresponding to the perplexity
"""
logits = logits.reshape(-1, logits.shape[-1])
target = target.reshape(-1)
loss = nn.functional.cross_entropy(logits, target, reduction='mean', ignore_index=ignore_token_id)
return torch.exp(loss)
n_examples = 1000
train_input_ids, train_target_ids = next(iter(DataLoader(training_dataset, batch_size=n_examples)))
train_input_ids = train_input_ids.to(device)
train_target_ids = train_target_ids.to(device)
logits = model(train_input_ids)
my_perplexity = perplexity(logits=logits, target=train_target_ids, ignore_token_id=tokenizer.pad_token_id)
print(f'my perplexity: {int(my_perplexity)}')
print(f'correct initial perplexity: {tokenizer.vocab_size}')
assert math.isclose(my_perplexity, tokenizer.vocab_size, abs_tol=7000)
print('Passou o no assert da perplexidade')
max_examples = 20_000_000 #150_000_000
eval_every_steps = 10000
lr = 3e-4
model = LanguageModel(
vocab_size=tokenizer.vocab_size,
max_seq_length=max_seq_length,
dim=128,
n_layers=2,
pad_token_id=tokenizer.pad_token_id,
).to(device)
train_loader = DataLoader(training_dataset, batch_size=512, shuffle=True, drop_last=True)
validation_loader = DataLoader(valid_dataset, batch_size=512)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
def train_step(input_ids, target_ids):
model.train()
model.zero_grad()
logits = model(input_ids)
logits = logits.reshape(-1, logits.shape[-1])
target_ids = target_ids.reshape(-1)
loss = nn.functional.cross_entropy(logits, target_ids, ignore_index=model.pad_token_id)
loss.backward()
optimizer.step()
return loss.item()
def validation_step(input_ids, target_ids):
model.eval()
logits = model(input_ids)
logits = logits.reshape(-1, logits.shape[-1])
target_ids = target_ids.reshape(-1)
loss = nn.functional.cross_entropy(logits, target_ids, ignore_index=model.pad_token_id)
return loss.item()
train_losses = []
n_examples = 0
step = 0
while n_examples < max_examples:
for train_input_ids, train_target_ids in train_loader:
loss = train_step(train_input_ids.to(device), train_target_ids.to(device))
train_losses.append(loss)
if step % eval_every_steps == 0:
train_ppl = np.exp(np.average(train_losses))
with torch.no_grad():
valid_ppl = np.exp(np.average([
validation_step(val_input_ids.to(device), val_target_ids.to(device))
for val_input_ids, val_target_ids in validation_loader]))
print(f'{step} steps; {n_examples} examples so far; train ppl: {train_ppl:.2f}, valid ppl: {valid_ppl:.2f}')
train_losses = []
n_examples += len(train_input_ids) # Increment of batch size
step += 1
if n_examples >= max_examples:
break
test_loader = DataLoader(test_dataset, batch_size=512)
with torch.no_grad():
test_ppl = np.exp(np.average([
validation_step(test_input_ids.to(device), test_target_ids.to(device))
for test_input_ids, test_target_ids in test_loader
]))
print(f'test perplexity: {test_ppl}')
prompt = 'O especialista reforça que, apesar do aumento do número de frentes frias, elas passarão de maneira'
max_output_tokens = 20
model.eval()
for _ in range(max_output_tokens):
input_ids = tokenize(text=prompt, tokenizer=tokenizer)
input_ids_truncated = input_ids[-max_seq_length:] # Usamos apenas os últimos <max_seq_length> tokens como entrada para o modelo.
logits = model(torch.LongTensor([input_ids_truncated]).to(device))
logits = logits[:, -1, :] # Usamos apenas o ultimo token da sequencia
# Ao usarmos o argmax, a saída do modelo em cada passo é o token de maior probabilidade.
# Isso se chama decodificação gulosa (greedy decoding).
predicted_id = torch.argmax(logits).item()
input_ids += [predicted_id] # Concatenamos a entrada com o token escolhido nesse passo.
prompt = tokenizer.decode(input_ids)
print(prompt)
prompt = 'As imagens mostram o felino surpreendendo a ação carinhosa do tratador com uma violenta mordida no braço.'
max_output_tokens = 20
model.eval()
for _ in range(max_output_tokens):
input_ids = tokenize(text=prompt, tokenizer=tokenizer)
input_ids_truncated = input_ids[-max_seq_length:] # Usamos apenas os últimos <max_seq_length> tokens como entrada para o modelo.
logits = model(torch.LongTensor([input_ids_truncated]).to(device))
logits = logits[:, -1, :] # Usamos apenas o ultimo token da sequencia
# Ao usarmos o argmax, a saída do modelo em cada passo é o token de maior probabilidade.
# Isso se chama decodificação gulosa (greedy decoding).
predicted_id = torch.argmax(logits).item()
input_ids += [predicted_id] # Concatenamos a entrada com o token escolhido nesse passo.
prompt = tokenizer.decode(input_ids)
print(prompt)
prompt = 'Um vídeo do momento, feito pelo morador de um prédio próximo, mostra o cachorro pulando na'
max_output_tokens = 20
model.eval()
for _ in range(max_output_tokens):
input_ids = tokenize(text=prompt, tokenizer=tokenizer)
input_ids_truncated = input_ids[-max_seq_length:] # Usamos apenas os últimos <max_seq_length> tokens como entrada para o modelo.
logits = model(torch.LongTensor([input_ids_truncated]).to(device))
logits = logits[:, -1, :] # Usamos apenas o ultimo token da sequencia
# Ao usarmos o argmax, a saída do modelo em cada passo é o token de maior probabilidade.
# Isso se chama decodificação gulosa (greedy decoding).
predicted_id = torch.argmax(logits).item()
input_ids += [predicted_id] # Concatenamos a entrada com o token escolhido nesse passo.
prompt = tokenizer.decode(input_ids)
print(prompt)
| 0.843605 | 0.854095 |
# Real World Example
```
import numpy as np
import scipy.stats as stats
import math
from PIL import Image
import matplotlib.pyplot as plt
from IBP_Sampler_Package.IBP_Sampler import IBP, log_likelyhood, sampler
```
For this real world example, we will use images of each number of a dice
## Experiment 1
### Setup: Create X using the images of dice
```
#Convert each dice to an array
One = np.array(Image.open('One.png').convert('L'))
Two = np.array(Image.open('Two.png').convert('L'))
Three = np.array(Image.open("Three.png").convert("L"))
Four = np.array(Image.open("Four.png").convert("L"))
Five = np.array(Image.open("Five.png").convert("L"))
Six = np.array(Image.open("Six.png").convert("L"))
#Make sure it is binary
One[One > 0] = 1
Two[Two > 0] = 1
Three[Three > 0] = 1
Four[Four > 0] = 1
Five[Five > 0] = 1
Six[Six > 0] = 1
#Reduce the dimensions
One = One[25:200:5,25:200:5]
Two = Two[25:200:5,25:200:5]
Three = Three[25:200:5,25:200:5]
Four = Four[25:200:5,25:200:5]
Five = Five[25:200:5,25:200:5]
Six = Six[25:200:5,25:200:5]
#Plot Dice for Reference
fig, ax = plt.subplots(2, 3,figsize = (12,4))
ax[0,0].imshow(One,cmap = "gray")
ax[0,0].set_title("One")
ax[0,1].imshow(Two,cmap = "gray")
ax[0,1].set_title("Two")
ax[0,2].imshow(Three,cmap = "gray")
ax[0,2].set_title("Three")
ax[1,0].imshow(Four,cmap = "gray")
ax[1,0].set_title("Four")
ax[1,1].imshow(Five,cmap = "gray")
ax[1,1].set_title("Five")
ax[1,2].imshow(Six,cmap = "gray")
ax[1,2].set_title("Six")
pass
np.random.seed(1)
N = 50
D = One.shape[0]**2
One = One.reshape(D)
Two = Two.reshape(D)
Three = Three.reshape(D)
Four = Four.reshape(D)
Five = Five.reshape(D)
Six = Six.reshape(D)
sigmaX = 0.25
#Create X from basis vectors
One = np.outer(np.random.binomial(1,.5,N),One)
Two = np.outer(np.random.binomial(1,.5,N),Two)
Three = np.outer(np.random.binomial(1,.5,N),Three)
Four = np.outer(np.random.binomial(1,.5,N),Four)
Five = np.outer(np.random.binomial(1,.5,N),Five)
Six = np.outer(np.random.binomial(1,.5,N),Six)
X = One + Two + Three + Four + Five + Six
#Add noise
X = X + np.random.normal(0,sigmaX,(N,D))
```
### Simulation: Run the Gibbs Sampler for X
```
np.random.seed(2)
Ks,Alpha,SXs,SAs,Z = sampler(X,alpha = 1,niter = 500,epsilon = .05,sigma_X =.4,sigma_A = .5,alpha_a_prior = 1,alpha_b_prior = 1,max_new = 3)
```
### Results: Plot the Posterior Mean of the Feature Matrix
```
A = np.linalg.inv(Z.T @ Z + SXs[499]**2 / SAs[499]**2 * np.eye(int(Ks[499]))) @ Z.T @ X
fig, ax = plt.subplots(2, 6,figsize = (15,5))
ax[0,0].imshow(A[0].reshape(35,35),cmap = "gray")
ax[0,1].imshow(A[1].reshape(35,35),cmap = "gray")
ax[0,2].imshow(A[2].reshape(35,35),cmap = "gray")
ax[0,3].imshow(A[3].reshape(35,35),cmap = "gray")
ax[0,4].imshow(A[4].reshape(35,35),cmap = "gray")
ax[0,5].imshow(A[5].reshape(35,35),cmap = "gray")
ax[1,0].imshow(A[6].reshape(35,35),cmap = "gray")
ax[1,1].imshow(A[7].reshape(35,35),cmap = "gray")
ax[1,2].imshow(A[8].reshape(35,35),cmap = "gray")
ax[1,3].imshow(A[9].reshape(35,35),cmap = "gray")
ax[1,4].imshow(A[10].reshape(35,35),cmap = "gray")
ax[1,5].imshow(A[11].reshape(35,35),cmap = "gray")
# fig.delaxes(ax[1,5])
pass
```
### Results: Plot the values of K, $\alpha$, $\sigma_X$,$\sigma_A$ over the iterations
```
fig, (ax1,ax2,ax3,ax4) = plt.subplots(4, 1,figsize = (10,10))
ax1.plot(Ks)
ax1.set_title("K")
ax2.plot(Alpha)
ax2.set_title("Alpha")
ax3.plot(SXs)
ax3.set_title("Sigma X")
ax4.plot(SAs)
ax4.set_title("Sigma A")
pass
```
## Experiment 2
### Setup: Create X using the images of dice
```
#Convert each dice to an array
One = np.array(Image.open('One.png').convert('L'))
Two = np.array(Image.open('Two.png').convert('L'))
Three = np.array(Image.open("Three.png").convert("L"))
Four = np.array(Image.open("Four.png").convert("L"))
Five = np.array(Image.open("Five.png").convert("L"))
Six = np.array(Image.open("Six.png").convert("L"))
#Make sure it is binary
One[One > 0] = 1
Two[Two > 0] = 1
Three[Three > 0] = 1
Four[Four > 0] = 1
Five[Five > 0] = 1
Six[Six > 0] = 1
#Reduce the dimensions
One = One[25:200:5,25:200:5]
Two = Two[25:200:5,25:200:5]
Three = Three[25:200:5,25:200:5]
Four = Four[25:200:5,25:200:5]
Five = Five[25:200:5,25:200:5]
Six = Six[25:200:5,25:200:5]
#Plot Dice for Reference
fig, ax = plt.subplots(2, 3,figsize = (12,4))
ax[0,0].imshow(One,cmap = "gray")
ax[0,0].set_title("One")
ax[0,1].imshow(Two,cmap = "gray")
ax[0,1].set_title("Two")
ax[0,2].imshow(Three,cmap = "gray")
ax[0,2].set_title("Three")
ax[1,0].imshow(Four,cmap = "gray")
ax[1,0].set_title("Four")
ax[1,1].imshow(Five,cmap = "gray")
ax[1,1].set_title("Five")
ax[1,2].imshow(Six,cmap = "gray")
ax[1,2].set_title("Six")
pass
np.random.seed(1)
N = 50
D = One.shape[0]**2
One = One.reshape(D)
Two = Two.reshape(D)
Three = Three.reshape(D)
Four = Four.reshape(D)
Five = Five.reshape(D)
Six = Six.reshape(D)
sigmaX = 0.25
#Create X from basis vectors
One = np.outer(np.random.binomial(1,.5,N),One)
Two = np.outer(np.random.binomial(1,.5,N),Two)
Three = np.outer(np.random.binomial(1,.5,N),Three)
Four = np.outer(np.random.binomial(1,.5,N),Four)
Five = np.outer(np.random.binomial(1,.5,N),Five)
Six = np.outer(np.random.binomial(1,.5,N),Six)
X = One + Two + Three + Four + Five + Six
#Add noise
X = X + np.random.normal(0,sigmaX,(N,D))
```
### Simulation: Run the Gibbs Sampler for X
```
np.random.seed(5)
Ks,Alpha,SXs,SAs,Z = sampler(X,alpha = 1,niter = 500,epsilon = .05,sigma_X =.4,sigma_A = .5,alpha_a_prior = 1,alpha_b_prior = 1,max_new = 3)
```
### Results: Plot the Posterior Mean of the Feature Matrix
```
A = np.linalg.inv(Z.T @ Z + SXs[499]**2 / SAs[499]**2 * np.eye(int(Ks[499]))) @ Z.T @ X
fig, ax = plt.subplots(2, 6,figsize = (15,5))
ax[0,0].imshow(A[0].reshape(35,35),cmap = "gray")
ax[0,1].imshow(A[1].reshape(35,35),cmap = "gray")
ax[0,2].imshow(A[2].reshape(35,35),cmap = "gray")
ax[0,3].imshow(A[3].reshape(35,35),cmap = "gray")
ax[0,4].imshow(A[4].reshape(35,35),cmap = "gray")
ax[0,5].imshow(A[5].reshape(35,35),cmap = "gray")
ax[1,0].imshow(A[6].reshape(35,35),cmap = "gray")
ax[1,1].imshow(A[7].reshape(35,35),cmap = "gray")
ax[1,2].imshow(A[8].reshape(35,35),cmap = "gray")
ax[1,3].imshow(A[9].reshape(35,35),cmap = "gray")
ax[1,4].imshow(A[10].reshape(35,35),cmap = "gray")
#ax[1,5].imshow(A[11].reshape(35,35),cmap = "gray")
fig.delaxes(ax[1,5])
pass
```
### Results: Plot the values of K, $\alpha$, $\sigma_X$,$\sigma_A$ over the iterations
```
fig, (ax1,ax2,ax3,ax4) = plt.subplots(4, 1,figsize = (10,10))
ax1.plot(Ks)
ax1.set_title("K")
ax2.plot(Alpha)
ax2.set_title("Alpha")
ax3.plot(SXs)
ax3.set_title("Sigma X")
ax4.plot(SAs)
ax4.set_title("Sigma A")
pass
```
|
github_jupyter
|
import numpy as np
import scipy.stats as stats
import math
from PIL import Image
import matplotlib.pyplot as plt
from IBP_Sampler_Package.IBP_Sampler import IBP, log_likelyhood, sampler
#Convert each dice to an array
One = np.array(Image.open('One.png').convert('L'))
Two = np.array(Image.open('Two.png').convert('L'))
Three = np.array(Image.open("Three.png").convert("L"))
Four = np.array(Image.open("Four.png").convert("L"))
Five = np.array(Image.open("Five.png").convert("L"))
Six = np.array(Image.open("Six.png").convert("L"))
#Make sure it is binary
One[One > 0] = 1
Two[Two > 0] = 1
Three[Three > 0] = 1
Four[Four > 0] = 1
Five[Five > 0] = 1
Six[Six > 0] = 1
#Reduce the dimensions
One = One[25:200:5,25:200:5]
Two = Two[25:200:5,25:200:5]
Three = Three[25:200:5,25:200:5]
Four = Four[25:200:5,25:200:5]
Five = Five[25:200:5,25:200:5]
Six = Six[25:200:5,25:200:5]
#Plot Dice for Reference
fig, ax = plt.subplots(2, 3,figsize = (12,4))
ax[0,0].imshow(One,cmap = "gray")
ax[0,0].set_title("One")
ax[0,1].imshow(Two,cmap = "gray")
ax[0,1].set_title("Two")
ax[0,2].imshow(Three,cmap = "gray")
ax[0,2].set_title("Three")
ax[1,0].imshow(Four,cmap = "gray")
ax[1,0].set_title("Four")
ax[1,1].imshow(Five,cmap = "gray")
ax[1,1].set_title("Five")
ax[1,2].imshow(Six,cmap = "gray")
ax[1,2].set_title("Six")
pass
np.random.seed(1)
N = 50
D = One.shape[0]**2
One = One.reshape(D)
Two = Two.reshape(D)
Three = Three.reshape(D)
Four = Four.reshape(D)
Five = Five.reshape(D)
Six = Six.reshape(D)
sigmaX = 0.25
#Create X from basis vectors
One = np.outer(np.random.binomial(1,.5,N),One)
Two = np.outer(np.random.binomial(1,.5,N),Two)
Three = np.outer(np.random.binomial(1,.5,N),Three)
Four = np.outer(np.random.binomial(1,.5,N),Four)
Five = np.outer(np.random.binomial(1,.5,N),Five)
Six = np.outer(np.random.binomial(1,.5,N),Six)
X = One + Two + Three + Four + Five + Six
#Add noise
X = X + np.random.normal(0,sigmaX,(N,D))
np.random.seed(2)
Ks,Alpha,SXs,SAs,Z = sampler(X,alpha = 1,niter = 500,epsilon = .05,sigma_X =.4,sigma_A = .5,alpha_a_prior = 1,alpha_b_prior = 1,max_new = 3)
A = np.linalg.inv(Z.T @ Z + SXs[499]**2 / SAs[499]**2 * np.eye(int(Ks[499]))) @ Z.T @ X
fig, ax = plt.subplots(2, 6,figsize = (15,5))
ax[0,0].imshow(A[0].reshape(35,35),cmap = "gray")
ax[0,1].imshow(A[1].reshape(35,35),cmap = "gray")
ax[0,2].imshow(A[2].reshape(35,35),cmap = "gray")
ax[0,3].imshow(A[3].reshape(35,35),cmap = "gray")
ax[0,4].imshow(A[4].reshape(35,35),cmap = "gray")
ax[0,5].imshow(A[5].reshape(35,35),cmap = "gray")
ax[1,0].imshow(A[6].reshape(35,35),cmap = "gray")
ax[1,1].imshow(A[7].reshape(35,35),cmap = "gray")
ax[1,2].imshow(A[8].reshape(35,35),cmap = "gray")
ax[1,3].imshow(A[9].reshape(35,35),cmap = "gray")
ax[1,4].imshow(A[10].reshape(35,35),cmap = "gray")
ax[1,5].imshow(A[11].reshape(35,35),cmap = "gray")
# fig.delaxes(ax[1,5])
pass
fig, (ax1,ax2,ax3,ax4) = plt.subplots(4, 1,figsize = (10,10))
ax1.plot(Ks)
ax1.set_title("K")
ax2.plot(Alpha)
ax2.set_title("Alpha")
ax3.plot(SXs)
ax3.set_title("Sigma X")
ax4.plot(SAs)
ax4.set_title("Sigma A")
pass
#Convert each dice to an array
One = np.array(Image.open('One.png').convert('L'))
Two = np.array(Image.open('Two.png').convert('L'))
Three = np.array(Image.open("Three.png").convert("L"))
Four = np.array(Image.open("Four.png").convert("L"))
Five = np.array(Image.open("Five.png").convert("L"))
Six = np.array(Image.open("Six.png").convert("L"))
#Make sure it is binary
One[One > 0] = 1
Two[Two > 0] = 1
Three[Three > 0] = 1
Four[Four > 0] = 1
Five[Five > 0] = 1
Six[Six > 0] = 1
#Reduce the dimensions
One = One[25:200:5,25:200:5]
Two = Two[25:200:5,25:200:5]
Three = Three[25:200:5,25:200:5]
Four = Four[25:200:5,25:200:5]
Five = Five[25:200:5,25:200:5]
Six = Six[25:200:5,25:200:5]
#Plot Dice for Reference
fig, ax = plt.subplots(2, 3,figsize = (12,4))
ax[0,0].imshow(One,cmap = "gray")
ax[0,0].set_title("One")
ax[0,1].imshow(Two,cmap = "gray")
ax[0,1].set_title("Two")
ax[0,2].imshow(Three,cmap = "gray")
ax[0,2].set_title("Three")
ax[1,0].imshow(Four,cmap = "gray")
ax[1,0].set_title("Four")
ax[1,1].imshow(Five,cmap = "gray")
ax[1,1].set_title("Five")
ax[1,2].imshow(Six,cmap = "gray")
ax[1,2].set_title("Six")
pass
np.random.seed(1)
N = 50
D = One.shape[0]**2
One = One.reshape(D)
Two = Two.reshape(D)
Three = Three.reshape(D)
Four = Four.reshape(D)
Five = Five.reshape(D)
Six = Six.reshape(D)
sigmaX = 0.25
#Create X from basis vectors
One = np.outer(np.random.binomial(1,.5,N),One)
Two = np.outer(np.random.binomial(1,.5,N),Two)
Three = np.outer(np.random.binomial(1,.5,N),Three)
Four = np.outer(np.random.binomial(1,.5,N),Four)
Five = np.outer(np.random.binomial(1,.5,N),Five)
Six = np.outer(np.random.binomial(1,.5,N),Six)
X = One + Two + Three + Four + Five + Six
#Add noise
X = X + np.random.normal(0,sigmaX,(N,D))
np.random.seed(5)
Ks,Alpha,SXs,SAs,Z = sampler(X,alpha = 1,niter = 500,epsilon = .05,sigma_X =.4,sigma_A = .5,alpha_a_prior = 1,alpha_b_prior = 1,max_new = 3)
A = np.linalg.inv(Z.T @ Z + SXs[499]**2 / SAs[499]**2 * np.eye(int(Ks[499]))) @ Z.T @ X
fig, ax = plt.subplots(2, 6,figsize = (15,5))
ax[0,0].imshow(A[0].reshape(35,35),cmap = "gray")
ax[0,1].imshow(A[1].reshape(35,35),cmap = "gray")
ax[0,2].imshow(A[2].reshape(35,35),cmap = "gray")
ax[0,3].imshow(A[3].reshape(35,35),cmap = "gray")
ax[0,4].imshow(A[4].reshape(35,35),cmap = "gray")
ax[0,5].imshow(A[5].reshape(35,35),cmap = "gray")
ax[1,0].imshow(A[6].reshape(35,35),cmap = "gray")
ax[1,1].imshow(A[7].reshape(35,35),cmap = "gray")
ax[1,2].imshow(A[8].reshape(35,35),cmap = "gray")
ax[1,3].imshow(A[9].reshape(35,35),cmap = "gray")
ax[1,4].imshow(A[10].reshape(35,35),cmap = "gray")
#ax[1,5].imshow(A[11].reshape(35,35),cmap = "gray")
fig.delaxes(ax[1,5])
pass
fig, (ax1,ax2,ax3,ax4) = plt.subplots(4, 1,figsize = (10,10))
ax1.plot(Ks)
ax1.set_title("K")
ax2.plot(Alpha)
ax2.set_title("Alpha")
ax3.plot(SXs)
ax3.set_title("Sigma X")
ax4.plot(SAs)
ax4.set_title("Sigma A")
pass
| 0.514156 | 0.905657 |
# NumPy Basics
This notebook is created by Eda AYDIN through by Udemy, DATAI Team.
```
import numpy as np
array= np.array([1,2,3,4,5])
array
array2 = np.array([1,2,3,4,5,6,7,8,9,10])
array2
```
## reshape()
```
# reshape(row,column)
array3 = array2.reshape(2,5)
array3
```
## shape
```
array.shape # 1 row, 5 column
array2.shape # 1 row, 10 column
array3.shape #2 row, 5 column
```
## ndim : Dimension
```
array.ndim # 1 row = 1 dimension
array2.ndim # 1 row = 1 dimension
array3.ndim #2 row = 2 dimension
```
## dtype : Data type
```
array.dtype
array2.dtype
array3.dtype
```
## size
```
array.size
array2.size
array3.size
```
## Multidimensional Array
np.array should take a list. Don't forget this!
```
array_multi = np.array([[1,2,3,4,5],
[6,7,8,9,10],
[11,12,13,14,15]])
array_multi
array_multi.shape # 3 row, 5 column
```
## zeros()
**Why should we do this?**
When we wanted to add an element to the array, we were using the built-in append() function. Since this is something that affects memory, if we say **np.zeros((3,4))** at first, we want it to allocate for me with 3 rows and 4 columns.
```
A = np.zeros((3,4))
A
A[0,0] = 5
A
```
## ones()
```
np.ones((3,4))
```
## empty()
```
np.empty((5,6))
```
## arange()
**Format:** $$np.arange(start,
stop,
step,
dtype=None,
*,
like=None)$$
Values are generated within the half-open interval [start, stop) (in other words, the interval including start but excluding stop). For integer arguments the function is equivalent to the Python built-in range function, but returns an ndarray rather than a list.
### Parameters
**start :**
- integer or real,optional
- The interval includes this value.
- The default value is 0.
**stop :**
- integer or real
- The interval does not include this value, except in some cases where step is not an integer and floating point round-off affects the length of out.
**step :**
- integer of real, optional
- Spacing between values.
- The default step size is 1.
- If step is specified as a position argument, start must also be given.
**dtype :**
- dtype
- The output of the output array
- If dtype is not given, infer the data type from the other input arguments.
**like :**
- array_like
- Reference object to allow the creation of arrays which are not NumPy arrays.
### Returns
**arange :**
- array of evenly spaced values
[https://numpy.org/doc/stable/reference/generated/numpy.arange.html](https://numpy.org/doc/stable/reference/generated/numpy.arange.html)
```
np.arange(10,50,5)
```
## linspace()
**Format:** $$np.linspace(start, stop, num, endpoint = True, restep = False, dtype = None, axis = 0)$$
- Return evenly spaced numbers over a specified interval.
- Returns num evenly spaced samples, calculated over the interval [start,stop].
- The endpoint of the interval can optionally be excluded.
### Parameters
**start:**
- array_like
- The starting value of the sequence
**stop:**
- array_like
- The end value of the sequence, unless endpoint is set to False.
**num:**
- int, optional
- Number of samples to generate
- Default is 50
- Must be non-negative
**endpoint:**
- bool,optional
- If True, return (samples, step), where step is the spacing between samples
**dtype:**
- dtype, optional
- The type of the output array.
- If the dtype is not given, the data type is inferred from start and stop.
- The inferred dtype will never be an integer; float is chosen even if the arguments would produce an array of integers.
**axis:**
- int,optional
- The axis in the result to store the samples.
- Relevant only if start or stop are array-like.
- By default (0), the samples will be along a new axis inserted at the beginning.
- Use -1 to get an axis at the end.
### Returns
**samples:**
- ndarray
- There are num equally spaced in the closed interval [start, stop] or half-open interval [start, stop)
**step:**
- float, optional
- Only returned if retstep is True
- Size of spacing between samples.
[https://numpy.org/doc/stable/reference/generated/numpy.linspace.html](https://numpy.org/doc/stable/reference/generated/numpy.linspace.html)
```
np.linspace(10,50,20)
```
# NumPy Basic Operations
```
a = np.array([1,2,3])
b = np.array([4,5,6])
a + b
a - b
a ** 2
b ** 2
np.sin(a)
a < 2
c =np.array([[1,2,3], [4,5,6]])
d = np.array([[1,2,3],[4,5,6]])
# Hadamard Product - Element Wise Product
c * d
# Matrix Product
c.dot(d)
```
It is possible to multiply the vectors (2,3) and (3,2) in the matrix product. That's why the transpose of vector d needs to be taken.
```
# Matrix Product
c.dot(d.T)
# Matrix Product
c.dot(d.transpose())
np.exp(c)
e = np.random.random((4,5))
e
e.sum()
e.max()
e.min()
e.sum(axis=0)
```
axis = 0 means sum the columns in themselves.
```
e.sum(axis=1)
```
axis = 1 means sum the rows in themselves.
```
np.sqrt(e) #square root
np.square(e)
np.add(e,e)
```
# Indexing and Slicing
```
array4 = np.array([1,2,3,4,5,6,7,8,9,10])
print(array4[0])
array4[0:4]
reverse_array = array4[::-1]
print(reverse_array)
array5 = np.array([[1,2,3,4,5],
[6,7,8,9,10]])
print(array5)
array5[1,1]
array5[1,3]
print(array5[:,1])
print(array5[1,1:4])
print(array5[-1,:])
print(array5[:,-1])
```
# Shape Manipulation
```
array6 = np.array([[1,2,3],
[4,5,6],
[7,8,9]])
print(array6)
```
- ravel() : Return a contiguous flattened array.
```
# flatter
f = array6.ravel()
print(f)
array7 = f.reshape(3,3)
print(array7)
arrayT = array6.transpose()
print(arrayT)
array8 = np.array([[1,2],
[3,4],
[5,6]])
print(array8)
print(array8.reshape(2,3))
```
## Difference between resize() and reshape()
-resize() : Return a new array with the specified shape.
-reshape(): Gives a new shape to an array without changing its data.
# Stacking Arrays
```
array9 = np.array([[1,2],
[3,4],
[5,6]])
array10 = np.array([[-1,-2],
[-3,-4],
[-5,-6]])
array_vstack = np.vstack((array9,array10))
print(array_vstack)
array_hstack = np.hstack((array9,array10))
print(array_hstack)
```
# Convert and Copy Array
```
list1 = [1,2,3,4]
array11 = np.array(list1)
print(array11)
list2 = array11.tolist()
print(list2)
g = np.array([1,2,3])
h = g
j = h
h
j
h[0] = 5
print(g)
print(h)
print(j)
```
If we want to create a copy array in the different location:
```
g = np.array([1,2,3])
h = g.copy()
print(h)
h[0] = 5
print(g)
print(h)
```
|
github_jupyter
|
import numpy as np
array= np.array([1,2,3,4,5])
array
array2 = np.array([1,2,3,4,5,6,7,8,9,10])
array2
# reshape(row,column)
array3 = array2.reshape(2,5)
array3
array.shape # 1 row, 5 column
array2.shape # 1 row, 10 column
array3.shape #2 row, 5 column
array.ndim # 1 row = 1 dimension
array2.ndim # 1 row = 1 dimension
array3.ndim #2 row = 2 dimension
array.dtype
array2.dtype
array3.dtype
array.size
array2.size
array3.size
array_multi = np.array([[1,2,3,4,5],
[6,7,8,9,10],
[11,12,13,14,15]])
array_multi
array_multi.shape # 3 row, 5 column
A = np.zeros((3,4))
A
A[0,0] = 5
A
np.ones((3,4))
np.empty((5,6))
np.arange(10,50,5)
np.linspace(10,50,20)
a = np.array([1,2,3])
b = np.array([4,5,6])
a + b
a - b
a ** 2
b ** 2
np.sin(a)
a < 2
c =np.array([[1,2,3], [4,5,6]])
d = np.array([[1,2,3],[4,5,6]])
# Hadamard Product - Element Wise Product
c * d
# Matrix Product
c.dot(d)
# Matrix Product
c.dot(d.T)
# Matrix Product
c.dot(d.transpose())
np.exp(c)
e = np.random.random((4,5))
e
e.sum()
e.max()
e.min()
e.sum(axis=0)
e.sum(axis=1)
np.sqrt(e) #square root
np.square(e)
np.add(e,e)
array4 = np.array([1,2,3,4,5,6,7,8,9,10])
print(array4[0])
array4[0:4]
reverse_array = array4[::-1]
print(reverse_array)
array5 = np.array([[1,2,3,4,5],
[6,7,8,9,10]])
print(array5)
array5[1,1]
array5[1,3]
print(array5[:,1])
print(array5[1,1:4])
print(array5[-1,:])
print(array5[:,-1])
array6 = np.array([[1,2,3],
[4,5,6],
[7,8,9]])
print(array6)
# flatter
f = array6.ravel()
print(f)
array7 = f.reshape(3,3)
print(array7)
arrayT = array6.transpose()
print(arrayT)
array8 = np.array([[1,2],
[3,4],
[5,6]])
print(array8)
print(array8.reshape(2,3))
array9 = np.array([[1,2],
[3,4],
[5,6]])
array10 = np.array([[-1,-2],
[-3,-4],
[-5,-6]])
array_vstack = np.vstack((array9,array10))
print(array_vstack)
array_hstack = np.hstack((array9,array10))
print(array_hstack)
list1 = [1,2,3,4]
array11 = np.array(list1)
print(array11)
list2 = array11.tolist()
print(list2)
g = np.array([1,2,3])
h = g
j = h
h
j
h[0] = 5
print(g)
print(h)
print(j)
g = np.array([1,2,3])
h = g.copy()
print(h)
h[0] = 5
print(g)
print(h)
| 0.329392 | 0.972389 |
```
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
import tensorflow as tf
tf.enable_eager_execution()
import random
import nltk
lines=open('../input/deu.txt', encoding='utf-8', errors='ignore').read().split('\n')
pairs = [line.split('\t') for line in lines]
pairs=pairs[0:-1]
questions=[]
answers=[]
for i in range(0, len(pairs)):
questions.append(pairs[i][1])
answers.append(pairs[i][0])
data=questions+answers
for i in range(0,len(data)):
data[i]=data[i].lower()
import re
for i in range(0,len(data)):
data[i]=re.sub(r'\d+','',data[i])
from nltk.tokenize import RegexpTokenizer
tokenizer=RegexpTokenizer(r'\w+')
for i in range(0,len(data)):
data[i]=tokenizer.tokenize(data[i])
ques=[]
ans=[]
for i in range(0,len(data)):
if i<len(questions):
ques.append(data[i][:13])
else:
ans.append(data[i][:13])
ques = ques[:8000]
ans = ans[:8000]
for i in range(len(ques)):
ques[i] = (9-len(ques[i])) * ['<pad>'] + ques[i]
ans[i] = ['<start>'] + ans[i] + ['<end>'] + (7 - len(ans[i])) * ['<pad>']
from gensim.models import Word2Vec
w2v_enc=Word2Vec(sentences=ques,min_count=1,size=50,iter=50,window = 3)
w2v_dec=Word2Vec(sentences=ans,min_count=1,size=50,iter=50,window=3)
vocab_dec=w2v_dec.wv.vocab
vocab_dec=list(vocab_dec)
int_to_vocab_dec={}
for i in range(0,len(vocab_dec)):
int_to_vocab_dec[i]=vocab_dec[i]
vocab_to_int_dec={}
for key,value in int_to_vocab_dec.items():
vocab_to_int_dec[value]=key
vocab_enc=w2v_enc.wv.vocab
vocab_enc=list(vocab_enc)
int_to_vocab_enc={}
for i in range(0,len(vocab_enc)):
int_to_vocab_enc[i]=vocab_enc[i]
vocab_to_int_enc={}
for key,value in int_to_vocab_enc.items():
vocab_to_int_enc[value]=key
len(vocab_to_int_dec)
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(ques,ans,test_size = 0.1,random_state = 1234,shuffle = True)
dec_inp_train = np.zeros([len(y_train),9,50])
dec_inp_test = np.zeros([len(y_test),9,50])
for i in range(len(y_train)):
temp = y_train[i].copy()
try:
temp[temp.index('<end>')] = '<pad>'
except ValueError:
pass
y_train[i] = y_train[i][1:] + ['<pad>']
dec_inp_train[i] = w2v_dec.wv[temp]
x_train[i] = w2v_enc.wv[x_train[i]]
for i in range(len(y_test)):
temp = y_test[i].copy()
try:
temp[temp.index('<end>')] = '<pad>'
except ValueError:
pass
y_test[i] = y_test[i][1:] + ['<pad>']
dec_inp_test[i] = w2v_dec.wv[temp]
x_test[i] = w2v_enc.wv[x_test[i]]
class attention(tf.keras.Model):
def __init__(self):
super(tf.keras.Model,self).__init__()
self.encoder = tf.keras.layers.Bidirectional(tf.keras.layers.CuDNNLSTM(128,return_sequences=True,return_state=True))
self.decoder = tf.keras.layers.CuDNNLSTM(256,return_state=True)
self.dense = tf.keras.layers.Dense(400,activation='relu')
self.out = tf.keras.layers.Dense(1915)
self.attention_dense = tf.keras.layers.Dense(1,activation='tanh')
self.attention_softmax = tf.keras.layers.Dense(1,activation='softmax')
def encoder_func(self,inp):
values,ht1,ct1,ht2,ct2 = self.encoder(inp)
ht1 = tf.reshape(ht1[-1],shape=[1,128])
ht2 = tf.reshape(ht2[-1],shape=[1,128])
ct1 = tf.reshape(ct1[-1],shape=[1,128])
ct2 = tf.reshape(ct2[-1],shape=[1,128])
ht = tf.concat([ht1,ht2],axis=1)
ct = tf.concat([ct1,ct2],axis=1)
return values,ht,ct
def decoder_func(self,enc_inp,dec_input = None):
deco_out = tf.convert_to_tensor(w2v_dec['<start>'],dtype=tf.float32)
deco_out = tf.reshape(deco_out,shape=[1,1,50])
count = 0
value = 0
predictions = tf.zeros([1,1915])
encoder_states,h_t,c_t = self.encoder_func(enc_inp)
if dec_input != None:
for i in range(16):
for j in range(9):
dec_inp = self.attention_func(h_t,dec_input[i][j],encoder_states[i])
value,h_t,c_t = self.decoder(dec_inp,initial_state= [h_t,c_t])
value = self.dense(value)
value = self.out(value)
predictions = tf.concat([predictions,value],axis=0)
predictions = predictions[1:]
predictions = tf.reshape(predictions,[-1,9,1915])
return predictions
else:
sentence = []
while count < 9 and int_to_vocab_dec[value] != '<end>':
dec_inp = self.attention_func(h_t,deco_out,encoder_states[0])
value,h_t,c_t = self.decoder(deco_out,initial_state = [h_t,c_t])
value = self.dense(value)
value = self.out(value)
value = tf.nn.softmax(value)
value = random.choice(np.argsort(value[0])[-3:])
sentence.append(int_to_vocab_dec[value])
count += 1
deco_out = tf.convert_to_tensor(w2v_dec[int_to_vocab_dec[value]])
deco_out = tf.reshape(deco_out,shape=[1,1,50])
return sentence[:-1]
def attention_func(self,dec_h_t,decoder_out,enc_state):
temp = tf.zeros([1,512])
for i in range(9):
enc_statee=enc_state[i]
enc_statee=tf.reshape(enc_statee,(1,-1))
temp1=tf.concat([enc_statee,dec_h_t],axis=1)
temp=tf.concat([temp,temp1],axis=0)
temp=temp[1:]
attention_weights = self.attention_dense(temp)
attention_weights = self.attention_softmax(attention_weights)
context_vector = tf.matmul(tf.transpose(enc_state),attention_weights)
decoder_out=tf.reshape(decoder_out,(-1,1))
attention_context = tf.concat([decoder_out,context_vector],axis=0)
attention_context = tf.reshape(attention_context,(1,1,-1))
return attention_context
model = attention()
optimzer = tf.train.RMSPropOptimizer(learning_rate=0.01)
def loss_fun(x,y,z):
with tf.GradientTape() as t:
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=z,logits=model.decoder_func(x,y)))
grads = t.gradient(loss,model.variables)
optimzer.apply_gradients(zip(grads,model.variables))
return loss
for epoch in range(3):
i = 0
while i < len(x_train):
a = np.array(x_train[i:i+16])
b = np.array(dec_inp_train[i:i+16])
temp = y_train[i:i+16]
c = np.zeros([16,9,1915])
for k in range(16):
for j in range(9):
c[k][j][vocab_to_int_dec[temp[k][j]]] = 1
los = loss_fun(tf.convert_to_tensor(a,dtype=tf.float32),tf.convert_to_tensor(b,dtype=tf.float32),c)
i = i + 16
if i % 128 == 0:
score = 0
test_temp_enc,test_temp_dec = zip(*random.sample(list(zip(x_test, y_test)), 20))
for m in range(20):
prediction_sent = model.decoder_func(tf.convert_to_tensor(test_temp_enc[m].reshape([1,9,50]),dtype=tf.float32))
actual_sent = test_temp_dec[m][:test_temp_dec[m].index('<end>')]
score += nltk.translate.bleu_score.sentence_bleu([actual_sent],prediction_sent)
print("bleu score when i is: ",i, " is: ",score/20)
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
import tensorflow as tf
tf.enable_eager_execution()
import random
import nltk
lines=open('../input/deu.txt', encoding='utf-8', errors='ignore').read().split('\n')
pairs = [line.split('\t') for line in lines]
pairs=pairs[0:-1]
questions=[]
answers=[]
for i in range(0, len(pairs)):
questions.append(pairs[i][1])
answers.append(pairs[i][0])
data=questions+answers
for i in range(0,len(data)):
data[i]=data[i].lower()
import re
for i in range(0,len(data)):
data[i]=re.sub(r'\d+','',data[i])
from nltk.tokenize import RegexpTokenizer
tokenizer=RegexpTokenizer(r'\w+')
for i in range(0,len(data)):
data[i]=tokenizer.tokenize(data[i])
ques=[]
ans=[]
for i in range(0,len(data)):
if i<len(questions):
ques.append(data[i][:13])
else:
ans.append(data[i][:13])
ques = ques[:8000]
ans = ans[:8000]
for i in range(len(ques)):
ques[i] = (9-len(ques[i])) * ['<pad>'] + ques[i]
ans[i] = ['<start>'] + ans[i] + ['<end>'] + (7 - len(ans[i])) * ['<pad>']
from gensim.models import Word2Vec
w2v_enc=Word2Vec(sentences=ques,min_count=1,size=50,iter=50,window = 3)
w2v_dec=Word2Vec(sentences=ans,min_count=1,size=50,iter=50,window=3)
vocab_dec=w2v_dec.wv.vocab
vocab_dec=list(vocab_dec)
int_to_vocab_dec={}
for i in range(0,len(vocab_dec)):
int_to_vocab_dec[i]=vocab_dec[i]
vocab_to_int_dec={}
for key,value in int_to_vocab_dec.items():
vocab_to_int_dec[value]=key
vocab_enc=w2v_enc.wv.vocab
vocab_enc=list(vocab_enc)
int_to_vocab_enc={}
for i in range(0,len(vocab_enc)):
int_to_vocab_enc[i]=vocab_enc[i]
vocab_to_int_enc={}
for key,value in int_to_vocab_enc.items():
vocab_to_int_enc[value]=key
len(vocab_to_int_dec)
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(ques,ans,test_size = 0.1,random_state = 1234,shuffle = True)
dec_inp_train = np.zeros([len(y_train),9,50])
dec_inp_test = np.zeros([len(y_test),9,50])
for i in range(len(y_train)):
temp = y_train[i].copy()
try:
temp[temp.index('<end>')] = '<pad>'
except ValueError:
pass
y_train[i] = y_train[i][1:] + ['<pad>']
dec_inp_train[i] = w2v_dec.wv[temp]
x_train[i] = w2v_enc.wv[x_train[i]]
for i in range(len(y_test)):
temp = y_test[i].copy()
try:
temp[temp.index('<end>')] = '<pad>'
except ValueError:
pass
y_test[i] = y_test[i][1:] + ['<pad>']
dec_inp_test[i] = w2v_dec.wv[temp]
x_test[i] = w2v_enc.wv[x_test[i]]
class attention(tf.keras.Model):
def __init__(self):
super(tf.keras.Model,self).__init__()
self.encoder = tf.keras.layers.Bidirectional(tf.keras.layers.CuDNNLSTM(128,return_sequences=True,return_state=True))
self.decoder = tf.keras.layers.CuDNNLSTM(256,return_state=True)
self.dense = tf.keras.layers.Dense(400,activation='relu')
self.out = tf.keras.layers.Dense(1915)
self.attention_dense = tf.keras.layers.Dense(1,activation='tanh')
self.attention_softmax = tf.keras.layers.Dense(1,activation='softmax')
def encoder_func(self,inp):
values,ht1,ct1,ht2,ct2 = self.encoder(inp)
ht1 = tf.reshape(ht1[-1],shape=[1,128])
ht2 = tf.reshape(ht2[-1],shape=[1,128])
ct1 = tf.reshape(ct1[-1],shape=[1,128])
ct2 = tf.reshape(ct2[-1],shape=[1,128])
ht = tf.concat([ht1,ht2],axis=1)
ct = tf.concat([ct1,ct2],axis=1)
return values,ht,ct
def decoder_func(self,enc_inp,dec_input = None):
deco_out = tf.convert_to_tensor(w2v_dec['<start>'],dtype=tf.float32)
deco_out = tf.reshape(deco_out,shape=[1,1,50])
count = 0
value = 0
predictions = tf.zeros([1,1915])
encoder_states,h_t,c_t = self.encoder_func(enc_inp)
if dec_input != None:
for i in range(16):
for j in range(9):
dec_inp = self.attention_func(h_t,dec_input[i][j],encoder_states[i])
value,h_t,c_t = self.decoder(dec_inp,initial_state= [h_t,c_t])
value = self.dense(value)
value = self.out(value)
predictions = tf.concat([predictions,value],axis=0)
predictions = predictions[1:]
predictions = tf.reshape(predictions,[-1,9,1915])
return predictions
else:
sentence = []
while count < 9 and int_to_vocab_dec[value] != '<end>':
dec_inp = self.attention_func(h_t,deco_out,encoder_states[0])
value,h_t,c_t = self.decoder(deco_out,initial_state = [h_t,c_t])
value = self.dense(value)
value = self.out(value)
value = tf.nn.softmax(value)
value = random.choice(np.argsort(value[0])[-3:])
sentence.append(int_to_vocab_dec[value])
count += 1
deco_out = tf.convert_to_tensor(w2v_dec[int_to_vocab_dec[value]])
deco_out = tf.reshape(deco_out,shape=[1,1,50])
return sentence[:-1]
def attention_func(self,dec_h_t,decoder_out,enc_state):
temp = tf.zeros([1,512])
for i in range(9):
enc_statee=enc_state[i]
enc_statee=tf.reshape(enc_statee,(1,-1))
temp1=tf.concat([enc_statee,dec_h_t],axis=1)
temp=tf.concat([temp,temp1],axis=0)
temp=temp[1:]
attention_weights = self.attention_dense(temp)
attention_weights = self.attention_softmax(attention_weights)
context_vector = tf.matmul(tf.transpose(enc_state),attention_weights)
decoder_out=tf.reshape(decoder_out,(-1,1))
attention_context = tf.concat([decoder_out,context_vector],axis=0)
attention_context = tf.reshape(attention_context,(1,1,-1))
return attention_context
model = attention()
optimzer = tf.train.RMSPropOptimizer(learning_rate=0.01)
def loss_fun(x,y,z):
with tf.GradientTape() as t:
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=z,logits=model.decoder_func(x,y)))
grads = t.gradient(loss,model.variables)
optimzer.apply_gradients(zip(grads,model.variables))
return loss
for epoch in range(3):
i = 0
while i < len(x_train):
a = np.array(x_train[i:i+16])
b = np.array(dec_inp_train[i:i+16])
temp = y_train[i:i+16]
c = np.zeros([16,9,1915])
for k in range(16):
for j in range(9):
c[k][j][vocab_to_int_dec[temp[k][j]]] = 1
los = loss_fun(tf.convert_to_tensor(a,dtype=tf.float32),tf.convert_to_tensor(b,dtype=tf.float32),c)
i = i + 16
if i % 128 == 0:
score = 0
test_temp_enc,test_temp_dec = zip(*random.sample(list(zip(x_test, y_test)), 20))
for m in range(20):
prediction_sent = model.decoder_func(tf.convert_to_tensor(test_temp_enc[m].reshape([1,9,50]),dtype=tf.float32))
actual_sent = test_temp_dec[m][:test_temp_dec[m].index('<end>')]
score += nltk.translate.bleu_score.sentence_bleu([actual_sent],prediction_sent)
print("bleu score when i is: ",i, " is: ",score/20)
| 0.229276 | 0.249842 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.